]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0348-mm-perform-lru_add_drain_all-remotely.patch
WAR:media:i2c:ov5693: add flip and mirror setting
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0348-mm-perform-lru_add_drain_all-remotely.patch
1 From 94a0002a838d2813d37919398a2964ca32ada13e Mon Sep 17 00:00:00 2001
2 From: Luiz Capitulino <lcapitulino@redhat.com>
3 Date: Fri, 27 May 2016 15:03:28 +0200
4 Subject: [PATCH 348/365] mm: perform lru_add_drain_all() remotely
5
6 lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
7 on all CPUs that have non-empty LRU pagevecs and then waiting for
8 the scheduled work to complete. However, workqueue threads may never
9 have the chance to run on a CPU that's running a SCHED_FIFO task.
10 This causes lru_add_drain_all() to block forever.
11
12 This commit solves this problem by changing lru_add_drain_all()
13 to drain the LRU pagevecs of remote CPUs. This is done by grabbing
14 swapvec_lock and calling lru_add_drain_cpu().
15
16 PS: This is based on an idea and initial implementation by
17     Rik van Riel.
18
19 Signed-off-by: Rik van Riel <riel@redhat.com>
20 Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
21 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
22 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
23 ---
24  mm/swap.c | 37 ++++++++++++++++++++++++++++++-------
25  1 file changed, 30 insertions(+), 7 deletions(-)
26
27 diff --git a/mm/swap.c b/mm/swap.c
28 index ca194ae..ad16649 100644
29 --- a/mm/swap.c
30 +++ b/mm/swap.c
31 @@ -821,9 +821,15 @@ void lru_add_drain_cpu(int cpu)
32                 unsigned long flags;
33  
34                 /* No harm done if a racing interrupt already did this */
35 +#ifdef CONFIG_PREEMPT_RT_BASE
36 +               local_lock_irqsave_on(rotate_lock, flags, cpu);
37 +               pagevec_move_tail(pvec);
38 +               local_unlock_irqrestore_on(rotate_lock, flags, cpu);
39 +#else
40                 local_lock_irqsave(rotate_lock, flags);
41                 pagevec_move_tail(pvec);
42                 local_unlock_irqrestore(rotate_lock, flags);
43 +#endif
44         }
45  
46         pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
47 @@ -866,12 +872,32 @@ void lru_add_drain(void)
48         local_unlock_cpu(swapvec_lock);
49  }
50  
51 +
52 +#ifdef CONFIG_PREEMPT_RT_BASE
53 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
54 +{
55 +       local_lock_on(swapvec_lock, cpu);
56 +       lru_add_drain_cpu(cpu);
57 +       local_unlock_on(swapvec_lock, cpu);
58 +}
59 +
60 +#else
61 +
62  static void lru_add_drain_per_cpu(struct work_struct *dummy)
63  {
64         lru_add_drain();
65  }
66  
67  static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
68 +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
69 +{
70 +       struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
71 +
72 +       INIT_WORK(work, lru_add_drain_per_cpu);
73 +       schedule_work_on(cpu, work);
74 +       cpumask_set_cpu(cpu, has_work);
75 +}
76 +#endif
77  
78  void lru_add_drain_all(void)
79  {
80 @@ -884,20 +910,17 @@ void lru_add_drain_all(void)
81         cpumask_clear(&has_work);
82  
83         for_each_online_cpu(cpu) {
84 -               struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
85 -
86                 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
87                     pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
88                     pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
89 -                   need_activate_page_drain(cpu)) {
90 -                       INIT_WORK(work, lru_add_drain_per_cpu);
91 -                       schedule_work_on(cpu, work);
92 -                       cpumask_set_cpu(cpu, &has_work);
93 -               }
94 +                   need_activate_page_drain(cpu))
95 +                       remote_lru_add_drain(cpu, &has_work);
96         }
97  
98 +#ifndef CONFIG_PREEMPT_RT_BASE
99         for_each_cpu(cpu, &has_work)
100                 flush_work(&per_cpu(lru_add_drain_work, cpu));
101 +#endif
102  
103         put_online_cpus();
104         mutex_unlock(&lock);
105 -- 
106 2.7.4
107