]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0006-block-Shorten-interrupt-disabled-regions.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0006-block-Shorten-interrupt-disabled-regions.patch
1 From 4fafe4ab112ed15ebd2ba11b319dda038fbe2ac9 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 22 Jun 2011 19:47:02 +0200
4 Subject: [PATCH 006/366] block: Shorten interrupt disabled regions
5
6 Moving the blk_sched_flush_plug() call out of the interrupt/preempt
7 disabled region in the scheduler allows us to replace
8 local_irq_save/restore(flags) by local_irq_disable/enable() in
9 blk_flush_plug().
10
11 Now instead of doing this we disable interrupts explicitely when we
12 lock the request_queue and reenable them when we drop the lock. That
13 allows interrupts to be handled when the plug list contains requests
14 for more than one queue.
15
16 Aside of that this change makes the scope of the irq disabled region
17 more obvious. The current code confused the hell out of me when
18 looking at:
19
20  local_irq_save(flags);
21    spin_lock(q->queue_lock);
22    ...
23    queue_unplugged(q...);
24      scsi_request_fn();
25        spin_unlock(q->queue_lock);
26        spin_lock(shost->host_lock);
27        spin_unlock_irq(shost->host_lock);
28
29 -------------------^^^ ????
30
31        spin_lock_irq(q->queue_lock);
32        spin_unlock(q->lock);
33  local_irq_restore(flags);
34
35 Also add a comment to __blk_run_queue() documenting that
36 q->request_fn() can drop q->queue_lock and reenable interrupts, but
37 must return with q->queue_lock held and interrupts disabled.
38
39 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
40 Cc: Peter Zijlstra <peterz@infradead.org>
41 Cc: Tejun Heo <tj@kernel.org>
42 Cc: Jens Axboe <axboe@kernel.dk>
43 Cc: Linus Torvalds <torvalds@linux-foundation.org>
44 Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
45 ---
46  block/blk-core.c | 12 ++----------
47  1 file changed, 2 insertions(+), 10 deletions(-)
48
49 diff --git a/block/blk-core.c b/block/blk-core.c
50 index ec3be22..b83af44 100644
51 --- a/block/blk-core.c
52 +++ b/block/blk-core.c
53 @@ -3200,7 +3200,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
54                 blk_run_queue_async(q);
55         else
56                 __blk_run_queue(q);
57 -       spin_unlock(q->queue_lock);
58 +       spin_unlock_irq(q->queue_lock);
59  }
60  
61  static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
62 @@ -3248,7 +3248,6 @@ EXPORT_SYMBOL(blk_check_plugged);
63  void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
64  {
65         struct request_queue *q;
66 -       unsigned long flags;
67         struct request *rq;
68         LIST_HEAD(list);
69         unsigned int depth;
70 @@ -3268,11 +3267,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
71         q = NULL;
72         depth = 0;
73  
74 -       /*
75 -        * Save and disable interrupts here, to avoid doing it for every
76 -        * queue lock we have to take.
77 -        */
78 -       local_irq_save(flags);
79         while (!list_empty(&list)) {
80                 rq = list_entry_rq(list.next);
81                 list_del_init(&rq->queuelist);
82 @@ -3285,7 +3279,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
83                                 queue_unplugged(q, depth, from_schedule);
84                         q = rq->q;
85                         depth = 0;
86 -                       spin_lock(q->queue_lock);
87 +                       spin_lock_irq(q->queue_lock);
88                 }
89  
90                 /*
91 @@ -3312,8 +3306,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
92          */
93         if (q)
94                 queue_unplugged(q, depth, from_schedule);
95 -
96 -       local_irq_restore(flags);
97  }
98  
99  void blk_finish_plug(struct blk_plug *plug)
100 -- 
101 1.9.1
102