]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0109-softirq-Check-preemption-after-reenabling-interrupts.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0109-softirq-Check-preemption-after-reenabling-interrupts.patch
1 From f3697f0972f10a5c91aefa7f48cbc6a66388610d Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Sun, 13 Nov 2011 17:17:09 +0100
4 Subject: [PATCH 109/366] softirq: Check preemption after reenabling interrupts
5
6 raise_softirq_irqoff() disables interrupts and wakes the softirq
7 daemon, but after reenabling interrupts there is no preemption check,
8 so the execution of the softirq thread might be delayed arbitrarily.
9
10 In principle we could add that check to local_irq_enable/restore, but
11 that's overkill as the rasie_softirq_irqoff() sections are the only
12 ones which show this behaviour.
13
14 Reported-by: Carsten Emde <cbe@osadl.org>
15 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
16 ---
17  block/blk-iopoll.c      | 3 +++
18  block/blk-softirq.c     | 3 +++
19  include/linux/preempt.h | 3 +++
20  net/core/dev.c          | 7 +++++++
21  4 files changed, 16 insertions(+)
22
23 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24 index 0736729..3e21e31 100644
25 --- a/block/blk-iopoll.c
26 +++ b/block/blk-iopoll.c
27 @@ -35,6 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
28         list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
29         __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
30         local_irq_restore(flags);
31 +       preempt_check_resched_rt();
32  }
33  EXPORT_SYMBOL(blk_iopoll_sched);
34  
35 @@ -132,6 +133,7 @@ static void blk_iopoll_softirq(struct softirq_action *h)
36                 __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
37  
38         local_irq_enable();
39 +       preempt_check_resched_rt();
40  }
41  
42  /**
43 @@ -201,6 +203,7 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
44                                  this_cpu_ptr(&blk_cpu_iopoll));
45                 __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
46                 local_irq_enable();
47 +               preempt_check_resched_rt();
48         }
49  
50         return NOTIFY_OK;
51 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
52 index 53b1737..81c3c0a 100644
53 --- a/block/blk-softirq.c
54 +++ b/block/blk-softirq.c
55 @@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
56                 raise_softirq_irqoff(BLOCK_SOFTIRQ);
57  
58         local_irq_restore(flags);
59 +       preempt_check_resched_rt();
60  }
61  
62  /*
63 @@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
64                                  this_cpu_ptr(&blk_cpu_done));
65                 raise_softirq_irqoff(BLOCK_SOFTIRQ);
66                 local_irq_enable();
67 +               preempt_check_resched_rt();
68         }
69  
70         return NOTIFY_OK;
71 @@ -150,6 +152,7 @@ do_local:
72                 goto do_local;
73  
74         local_irq_restore(flags);
75 +       preempt_check_resched_rt();
76  }
77  
78  /**
79 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
80 index 210bd26..2b761d0 100644
81 --- a/include/linux/preempt.h
82 +++ b/include/linux/preempt.h
83 @@ -160,8 +160,10 @@ do { \
84  
85  #ifdef CONFIG_PREEMPT_RT_BASE
86  # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
87 +# define preempt_check_resched_rt() preempt_check_resched()
88  #else
89  # define preempt_enable_no_resched() preempt_enable()
90 +# define preempt_check_resched_rt() barrier();
91  #endif
92  
93  #define preemptible()  (preempt_count() == 0 && !irqs_disabled())
94 @@ -232,6 +234,7 @@ do { \
95  #define preempt_disable_notrace()              barrier()
96  #define preempt_enable_no_resched_notrace()    barrier()
97  #define preempt_enable_notrace()               barrier()
98 +#define preempt_check_resched_rt()             barrier()
99  #define preemptible()                          0
100  
101  #endif /* CONFIG_PREEMPT_COUNT */
102 diff --git a/net/core/dev.c b/net/core/dev.c
103 index 145731a..98997de 100644
104 --- a/net/core/dev.c
105 +++ b/net/core/dev.c
106 @@ -2246,6 +2246,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
107         sd->output_queue_tailp = &q->next_sched;
108         raise_softirq_irqoff(NET_TX_SOFTIRQ);
109         local_irq_restore(flags);
110 +       preempt_check_resched_rt();
111  }
112  
113  void __netif_schedule(struct Qdisc *q)
114 @@ -2327,6 +2328,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
115         __this_cpu_write(softnet_data.completion_queue, skb);
116         raise_softirq_irqoff(NET_TX_SOFTIRQ);
117         local_irq_restore(flags);
118 +       preempt_check_resched_rt();
119  }
120  EXPORT_SYMBOL(__dev_kfree_skb_irq);
121  
122 @@ -3525,6 +3527,7 @@ drop:
123         rps_unlock(sd);
124  
125         local_irq_restore(flags);
126 +       preempt_check_resched_rt();
127  
128         atomic_long_inc(&skb->dev->rx_dropped);
129         kfree_skb(skb);
130 @@ -4535,6 +4538,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
131                 sd->rps_ipi_list = NULL;
132  
133                 local_irq_enable();
134 +               preempt_check_resched_rt();
135  
136                 /* Send pending IPI's to kick RPS processing on remote cpus. */
137                 while (remsd) {
138 @@ -4548,6 +4552,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
139         } else
140  #endif
141                 local_irq_enable();
142 +       preempt_check_resched_rt();
143  }
144  
145  static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
146 @@ -4629,6 +4634,7 @@ void __napi_schedule(struct napi_struct *n)
147         local_irq_save(flags);
148         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
149         local_irq_restore(flags);
150 +       preempt_check_resched_rt();
151  }
152  EXPORT_SYMBOL(__napi_schedule);
153  
154 @@ -7485,6 +7491,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
155  
156         raise_softirq_irqoff(NET_TX_SOFTIRQ);
157         local_irq_enable();
158 +       preempt_check_resched_rt();
159  
160         /* Process offline CPU's input_pkt_queue */
161         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
162 -- 
163 1.9.1
164