]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0173-sched-Distangle-worker-accounting-from-rqlock.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0173-sched-Distangle-worker-accounting-from-rqlock.patch
1 From 8b6abf9fe6a4c8e764e434812cc9053bfb6ee191 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 22 Jun 2011 19:47:03 +0200
4 Subject: [PATCH 173/366] sched: Distangle worker accounting from rqlock
5
6 The worker accounting for cpu bound workers is plugged into the core
7 scheduler code and the wakeup code. This is not a hard requirement and
8 can be avoided by keeping track of the state in the workqueue code
9 itself.
10
11 Keep track of the sleeping state in the worker itself and call the
12 notifier before entering the core scheduler. There might be false
13 positives when the task is woken between that call and actually
14 scheduling, but that's not really different from scheduling and being
15 woken immediately after switching away. There is also no harm from
16 updating nr_running when the task returns from scheduling instead of
17 accounting it in the wakeup code.
18
19 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
20 Cc: Peter Zijlstra <peterz@infradead.org>
21 Cc: Tejun Heo <tj@kernel.org>
22 Cc: Jens Axboe <axboe@kernel.dk>
23 Cc: Linus Torvalds <torvalds@linux-foundation.org>
24 Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
25 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
26 ---
27  kernel/sched/core.c         | 80 +++++++++------------------------------------
28  kernel/workqueue.c          | 55 +++++++++++++------------------
29  kernel/workqueue_internal.h |  5 +--
30  3 files changed, 41 insertions(+), 99 deletions(-)
31
32 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
33 index 061effa..41814f6 100644
34 --- a/kernel/sched/core.c
35 +++ b/kernel/sched/core.c
36 @@ -1748,10 +1748,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
37  {
38         activate_task(rq, p, en_flags);
39         p->on_rq = TASK_ON_RQ_QUEUED;
40 -
41 -       /* if a worker is waking up, notify workqueue */
42 -       if (p->flags & PF_WQ_WORKER)
43 -               wq_worker_waking_up(p, cpu_of(rq));
44  }
45  
46  /*
47 @@ -2090,52 +2086,6 @@ out:
48  }
49  
50  /**
51 - * try_to_wake_up_local - try to wake up a local task with rq lock held
52 - * @p: the thread to be awakened
53 - *
54 - * Put @p on the run-queue if it's not already there. The caller must
55 - * ensure that this_rq() is locked, @p is bound to this_rq() and not
56 - * the current task.
57 - */
58 -static void try_to_wake_up_local(struct task_struct *p)
59 -{
60 -       struct rq *rq = task_rq(p);
61 -
62 -       if (WARN_ON_ONCE(rq != this_rq()) ||
63 -           WARN_ON_ONCE(p == current))
64 -               return;
65 -
66 -       lockdep_assert_held(&rq->lock);
67 -
68 -       if (!raw_spin_trylock(&p->pi_lock)) {
69 -               /*
70 -                * This is OK, because current is on_cpu, which avoids it being
71 -                * picked for load-balance and preemption/IRQs are still
72 -                * disabled avoiding further scheduler activity on it and we've
73 -                * not yet picked a replacement task.
74 -                */
75 -               lockdep_unpin_lock(&rq->lock);
76 -               raw_spin_unlock(&rq->lock);
77 -               raw_spin_lock(&p->pi_lock);
78 -               raw_spin_lock(&rq->lock);
79 -               lockdep_pin_lock(&rq->lock);
80 -       }
81 -
82 -       if (!(p->state & TASK_NORMAL))
83 -               goto out;
84 -
85 -       trace_sched_waking(p);
86 -
87 -       if (!task_on_rq_queued(p))
88 -               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
89 -
90 -       ttwu_do_wakeup(rq, p, 0);
91 -       ttwu_stat(p, smp_processor_id(), 0);
92 -out:
93 -       raw_spin_unlock(&p->pi_lock);
94 -}
95 -
96 -/**
97   * wake_up_process - Wake up a specific process
98   * @p: The process to be woken up.
99   *
100 @@ -3380,21 +3330,6 @@ static void __sched notrace __schedule(bool preempt)
101                 } else {
102                         deactivate_task(rq, prev, DEQUEUE_SLEEP);
103                         prev->on_rq = 0;
104 -
105 -                       /*
106 -                        * If a worker went to sleep, notify and ask workqueue
107 -                        * whether it wants to wake up a task to maintain
108 -                        * concurrency.
109 -                        * Only call wake up if prev isn't blocked on a sleeping
110 -                        * spin lock.
111 -                        */
112 -                       if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
113 -                               struct task_struct *to_wakeup;
114 -
115 -                               to_wakeup = wq_worker_sleeping(prev, cpu);
116 -                               if (to_wakeup)
117 -                                       try_to_wake_up_local(to_wakeup);
118 -                       }
119                 }
120                 switch_count = &prev->nvcsw;
121         }
122 @@ -3427,6 +3362,14 @@ static inline void sched_submit_work(struct task_struct *tsk)
123  {
124         if (!tsk->state || tsk_is_pi_blocked(tsk))
125                 return;
126 +
127 +       /*
128 +        * If a worker went to sleep, notify and ask workqueue whether
129 +        * it wants to wake up a task to maintain concurrency.
130 +        */
131 +       if (tsk->flags & PF_WQ_WORKER)
132 +               wq_worker_sleeping(tsk);
133 +
134         /*
135          * If we are going to sleep and we have plugged IO queued,
136          * make sure to submit it to avoid deadlocks.
137 @@ -3435,6 +3378,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
138                 blk_schedule_flush_plug(tsk);
139  }
140  
141 +static void sched_update_worker(struct task_struct *tsk)
142 +{
143 +       if (tsk->flags & PF_WQ_WORKER)
144 +               wq_worker_running(tsk);
145 +}
146 +
147  asmlinkage __visible void __sched schedule(void)
148  {
149         struct task_struct *tsk = current;
150 @@ -3445,6 +3394,7 @@ asmlinkage __visible void __sched schedule(void)
151                 __schedule(false);
152                 sched_preempt_enable_no_resched();
153         } while (need_resched());
154 +       sched_update_worker(tsk);
155  }
156  EXPORT_SYMBOL(schedule);
157  
158 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
159 index 492968a..71f6fe8 100644
160 --- a/kernel/workqueue.c
161 +++ b/kernel/workqueue.c
162 @@ -850,44 +850,31 @@ static void wake_up_worker(struct worker_pool *pool)
163  }
164  
165  /**
166 - * wq_worker_waking_up - a worker is waking up
167 - * @task: task waking up
168 - * @cpu: CPU @task is waking up to
169 + * wq_worker_running - a worker is running again
170 + * @task: task returning from sleep
171   *
172 - * This function is called during try_to_wake_up() when a worker is
173 - * being awoken.
174 - *
175 - * CONTEXT:
176 - * spin_lock_irq(rq->lock)
177 + * This function is called when a worker returns from schedule()
178   */
179 -void wq_worker_waking_up(struct task_struct *task, int cpu)
180 +void wq_worker_running(struct task_struct *task)
181  {
182         struct worker *worker = kthread_data(task);
183  
184 -       if (!(worker->flags & WORKER_NOT_RUNNING)) {
185 -               WARN_ON_ONCE(worker->pool->cpu != cpu);
186 +       if (!worker->sleeping)
187 +               return;
188 +       if (!(worker->flags & WORKER_NOT_RUNNING))
189                 atomic_inc(&worker->pool->nr_running);
190 -       }
191 +       worker->sleeping = 0;
192  }
193  
194  /**
195   * wq_worker_sleeping - a worker is going to sleep
196   * @task: task going to sleep
197 - * @cpu: CPU in question, must be the current CPU number
198 - *
199 - * This function is called during schedule() when a busy worker is
200 - * going to sleep.  Worker on the same cpu can be woken up by
201 - * returning pointer to its task.
202 - *
203 - * CONTEXT:
204 - * spin_lock_irq(rq->lock)
205 - *
206 - * Return:
207 - * Worker task on @cpu to wake up, %NULL if none.
208 + * This function is called from schedule() when a busy worker is
209 + * going to sleep.
210   */
211 -struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
212 +void wq_worker_sleeping(struct task_struct *task)
213  {
214 -       struct worker *worker = kthread_data(task), *to_wakeup = NULL;
215 +       struct worker *next, *worker = kthread_data(task);
216         struct worker_pool *pool;
217  
218         /*
219 @@ -896,14 +883,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
220          * checking NOT_RUNNING.
221          */
222         if (worker->flags & WORKER_NOT_RUNNING)
223 -               return NULL;
224 +               return;
225  
226         pool = worker->pool;
227  
228 -       /* this can only happen on the local cpu */
229 -       if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
230 -               return NULL;
231 +       if (WARN_ON_ONCE(worker->sleeping))
232 +               return;
233  
234 +       worker->sleeping = 1;
235 +       spin_lock_irq(&pool->lock);
236         /*
237          * The counterpart of the following dec_and_test, implied mb,
238          * worklist not empty test sequence is in insert_work().
239 @@ -916,9 +904,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
240          * lock is safe.
241          */
242         if (atomic_dec_and_test(&pool->nr_running) &&
243 -           !list_empty(&pool->worklist))
244 -               to_wakeup = first_idle_worker(pool);
245 -       return to_wakeup ? to_wakeup->task : NULL;
246 +           !list_empty(&pool->worklist)) {
247 +               next = first_idle_worker(pool);
248 +               if (next)
249 +                       wake_up_process(next->task);
250 +       }
251 +       spin_unlock_irq(&pool->lock);
252  }
253  
254  /**
255 diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
256 index 4521587..f000c4d 100644
257 --- a/kernel/workqueue_internal.h
258 +++ b/kernel/workqueue_internal.h
259 @@ -43,6 +43,7 @@ struct worker {
260         unsigned long           last_active;    /* L: last active timestamp */
261         unsigned int            flags;          /* X: flags */
262         int                     id;             /* I: worker id */
263 +       int                     sleeping;       /* None */
264  
265         /*
266          * Opaque string set with work_set_desc().  Printed out with task
267 @@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
268   * Scheduler hooks for concurrency managed workqueue.  Only to be used from
269   * sched/core.c and workqueue.c.
270   */
271 -void wq_worker_waking_up(struct task_struct *task, int cpu);
272 -struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
273 +void wq_worker_running(struct task_struct *task);
274 +void wq_worker_sleeping(struct task_struct *task);
275  
276  #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
277 -- 
278 1.9.1
279