2019-04-30 12:45:19 +00:00
|
|
|
From 9ec5d3b932b407e0b6780392ddb1f7f2fe1251e4 Mon Sep 17 00:00:00 2001
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Thu, 29 Jan 2015 15:10:08 +0100
|
2019-05-29 19:49:30 +00:00
|
|
|
Subject: [PATCH 181/269] block/mq: don't complete requests via IPI
|
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.37-rt20.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
The IPI runs in hardirq context and there are sleeping locks. This patch
|
|
|
|
moves the completion into a workqueue.
|
|
|
|
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
block/blk-core.c | 3 +++
|
|
|
|
block/blk-mq.c | 23 +++++++++++++++++++++++
|
|
|
|
include/linux/blk-mq.h | 2 +-
|
|
|
|
include/linux/blkdev.h | 3 +++
|
2018-08-27 14:32:32 +00:00
|
|
|
4 files changed, 30 insertions(+), 1 deletion(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
|
|
index eb8b52241453..581bf704154a 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/block/blk-core.c
|
|
|
|
+++ b/block/blk-core.c
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&rq->queuelist);
|
|
|
|
INIT_LIST_HEAD(&rq->timeout_list);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
|
|
|
|
+#endif
|
|
|
|
rq->cpu = -1;
|
|
|
|
rq->q = q;
|
|
|
|
rq->__sector = (sector_t) -1;
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
|
|
index 430037cda971..9560ebae322d 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/block/blk-mq.c
|
|
|
|
+++ b/block/blk-mq.c
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
2018-08-27 14:32:32 +00:00
|
|
|
rq->extra_len = 0;
|
|
|
|
rq->__deadline = 0;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
|
|
|
|
+#endif
|
|
|
|
INIT_LIST_HEAD(&rq->timeout_list);
|
|
|
|
rq->timeout = 0;
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
|
2018-08-27 14:32:32 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_mq_end_request);
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+
|
|
|
|
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct request *rq = container_of(work, struct request, work);
|
|
|
|
+
|
|
|
|
+ rq->q->softirq_done_fn(rq);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
static void __blk_mq_complete_request_remote(void *data)
|
|
|
|
{
|
|
|
|
struct request *rq = data;
|
|
|
|
|
|
|
|
rq->q->softirq_done_fn(rq);
|
|
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
static void __blk_mq_complete_request(struct request *rq)
|
|
|
|
{
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(struct request *rq)
|
2018-08-27 14:32:32 +00:00
|
|
|
shared = cpus_share_cache(cpu, ctx->cpu);
|
|
|
|
|
|
|
|
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ /*
|
|
|
|
+ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
|
|
|
|
+ * here. But we could try to invoke it one the CPU like this.
|
|
|
|
+ */
|
|
|
|
+ schedule_work_on(ctx->cpu, &rq->work);
|
|
|
|
+#else
|
|
|
|
rq->csd.func = __blk_mq_complete_request_remote;
|
|
|
|
rq->csd.info = rq;
|
|
|
|
rq->csd.flags = 0;
|
|
|
|
smp_call_function_single_async(ctx->cpu, &rq->csd);
|
|
|
|
+#endif
|
|
|
|
} else {
|
|
|
|
rq->q->softirq_done_fn(rq);
|
|
|
|
}
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
|
|
|
|
index 1da59c16f637..04c15b5ca76c 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/include/linux/blk-mq.h
|
|
|
|
+++ b/include/linux/blk-mq.h
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
|
2018-08-27 14:32:32 +00:00
|
|
|
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
-
|
|
|
|
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
|
|
|
|
int blk_mq_request_started(struct request *rq);
|
|
|
|
void blk_mq_start_request(struct request *rq);
|
|
|
|
void blk_mq_end_request(struct request *rq, blk_status_t error);
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
|
|
index 6980014357d4..f93ae914abda 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/include/linux/blkdev.h
|
|
|
|
+++ b/include/linux/blkdev.h
|
2018-10-30 12:40:05 +00:00
|
|
|
@@ -149,6 +149,9 @@ enum mq_rq_state {
|
2018-08-27 14:32:32 +00:00
|
|
|
*/
|
|
|
|
struct request {
|
|
|
|
struct request_queue *q;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ struct work_struct work;
|
|
|
|
+#endif
|
|
|
|
struct blk_mq_ctx *mq_ctx;
|
|
|
|
|
|
|
|
int cpu;
|
2019-04-08 23:49:20 +00:00
|
|
|
--
|
|
|
|
2.20.1
|
|
|
|
|