aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-22 19:47:02 +0200
committerSteven Rostedt <rostedt@goodmis.org>2012-06-12 14:39:21 -0400
commit4c4eaef1d39d975749f3c7da11aca53749b99052 (patch)
treec8f5083064f8ac32600566bac63cde364f5b2c0b /block
parent7fcd0c26f510056502801b98c8aac381a5041813 (diff)
block: Shorten interrupt disabled regions
Moving the blk_sched_flush_plug() call out of the interrupt/preempt disabled region in the scheduler allows us to replace local_irq_save/restore(flags) by local_irq_disable/enable() in blk_flush_plug(). Now instead of doing this we disable interrupts explicitely when we lock the request_queue and reenable them when we drop the lock. That allows interrupts to be handled when the plug list contains requests for more than one queue. Aside of that this change makes the scope of the irq disabled region more obvious. The current code confused the hell out of me when looking at: local_irq_save(flags); spin_lock(q->queue_lock); ... queue_unplugged(q...); scsi_request_fn(); spin_unlock(q->queue_lock); spin_lock(shost->host_lock); spin_unlock_irq(shost->host_lock); -------------------^^^ ???? spin_lock_irq(q->queue_lock); spin_unlock(q->lock); local_irq_restore(flags); Also add a comment to __blk_run_queue() documenting that q->request_fn() can drop q->queue_lock and reenable interrupts, but must return with q->queue_lock held and interrupts disabled. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74867e4..0d947d0f4717 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -302,7 +302,11 @@ void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
-
+ /*
+ * q->request_fn() can drop q->queue_lock and reenable
+ * interrupts, but must return with q->queue_lock held and
+ * interrupts disabled.
+ */
q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -2779,11 +2783,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
* this lock).
*/
if (from_schedule) {
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
blk_run_queue_async(q);
} else {
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
}
@@ -2809,7 +2813,6 @@ static void flush_plug_callbacks(struct blk_plug *plug)
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
- unsigned long flags;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -2830,11 +2833,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL;
depth = 0;
- /*
- * Save and disable interrupts here, to avoid doing it for every
- * queue lock we have to take.
- */
- local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -2847,7 +2845,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
- spin_lock(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -2874,8 +2872,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
-
- local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)