linux/debian/patches-rt/0180-block-mq-don-t-complet...

123 lines
3.8 KiB
Diff

From 42c577ea7a4354ce8a63cfc11579a99a9a8d60c2 Mon Sep 17 00:00:00 2001
Message-Id: <42c577ea7a4354ce8a63cfc11579a99a9a8d60c2.1601675152.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: [PATCH 180/333] block/mq: don't complete requests via IPI
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
The IPI runs in hardirq context and there are sleeping locks. This patch
moves the completion into a workqueue.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-core.c | 3 +++
block/blk-mq.c | 23 +++++++++++++++++++++++
include/linux/blk-mq.h | 2 +-
include/linux/blkdev.h | 3 +++
4 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index ce3710404544..8926b5d998fa 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 830944372d51..f73b111a4d8f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->extra_len = 0;
rq->__deadline = 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+ struct request *rq = container_of(work, struct request, work);
+
+ rq->q->softirq_done_fn(rq);
+}
+
+#else
+
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
rq->q->softirq_done_fn(rq);
}
+#endif
static void __blk_mq_complete_request(struct request *rq)
{
@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(struct request *rq)
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
+ * here. But we could try to invoke it one the CPU like this.
+ */
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
} else {
rq->q->softirq_done_fn(rq);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2885dce1ad49..8dbb9ecf9993 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -256,7 +256,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
-
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 745b2d0dcf78..7cd627f2a016 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -149,6 +149,9 @@ enum mq_rq_state {
*/
struct request {
struct request_queue *q;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ struct work_struct work;
+#endif
struct blk_mq_ctx *mq_ctx;
int cpu;
--
2.17.1