96 lines
3.1 KiB
Diff
96 lines
3.1 KiB
Diff
Subject: block: Shorten interrupt disabled regions
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Wed, 22 Jun 2011 19:47:02 +0200
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patches-4.16.8-rt3.tar.xz
|
|
|
|
Commit 9c40cef2b799 ("sched: Move blk_schedule_flush_plug() out of
|
|
__schedule()") moved the blk_schedule_flush_plug() call out of the
|
|
interrupt/preempt disabled region in the scheduler. This allows to replace
|
|
local_irq_save/restore(flags) by local_irq_disable/enable() in
|
|
blk_flush_plug_list().
|
|
|
|
But it makes more sense to disable interrupts explicitly when the request
|
|
queue is locked end reenable them when the request to is unlocked. This
|
|
shortens the interrupt disabled section which is important when the plug
|
|
list contains requests for more than one queue. The comment which claims
|
|
that disabling interrupts around the loop is misleading as the called
|
|
functions can reenable interrupts unconditionally anyway and obfuscates the
|
|
scope badly:
|
|
|
|
local_irq_save(flags);
|
|
spin_lock(q->queue_lock);
|
|
...
|
|
queue_unplugged(q...);
|
|
scsi_request_fn();
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
-------------------^^^ ????
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
spin_unlock(q->queue_lock);
|
|
local_irq_restore(flags);
|
|
|
|
Aside of that the detached interrupt disabling is a constant pain for
|
|
PREEMPT_RT as it requires patching and special casing when RT is enabled
|
|
while with the spin_*_irq() variants this happens automatically.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Tejun Heo <tj@kernel.org>
|
|
Cc: Jens Axboe <axboe@kernel.dk>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
|
|
---
|
|
block/blk-core.c | 12 ++----------
|
|
1 file changed, 2 insertions(+), 10 deletions(-)
|
|
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -3520,7 +3520,7 @@ static void queue_unplugged(struct reque
|
|
blk_run_queue_async(q);
|
|
else
|
|
__blk_run_queue(q);
|
|
- spin_unlock(q->queue_lock);
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
}
|
|
|
|
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
|
|
@@ -3568,7 +3568,6 @@ EXPORT_SYMBOL(blk_check_plugged);
|
|
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
{
|
|
struct request_queue *q;
|
|
- unsigned long flags;
|
|
struct request *rq;
|
|
LIST_HEAD(list);
|
|
unsigned int depth;
|
|
@@ -3588,11 +3587,6 @@ void blk_flush_plug_list(struct blk_plug
|
|
q = NULL;
|
|
depth = 0;
|
|
|
|
- /*
|
|
- * Save and disable interrupts here, to avoid doing it for every
|
|
- * queue lock we have to take.
|
|
- */
|
|
- local_irq_save(flags);
|
|
while (!list_empty(&list)) {
|
|
rq = list_entry_rq(list.next);
|
|
list_del_init(&rq->queuelist);
|
|
@@ -3605,7 +3599,7 @@ void blk_flush_plug_list(struct blk_plug
|
|
queue_unplugged(q, depth, from_schedule);
|
|
q = rq->q;
|
|
depth = 0;
|
|
- spin_lock(q->queue_lock);
|
|
+ spin_lock_irq(q->queue_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -3632,8 +3626,6 @@ void blk_flush_plug_list(struct blk_plug
|
|
*/
|
|
if (q)
|
|
queue_unplugged(q, depth, from_schedule);
|
|
-
|
|
- local_irq_restore(flags);
|
|
}
|
|
|
|
void blk_finish_plug(struct blk_plug *plug)
|