]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0061-mm-swap-Convert-to-percpu-locked.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0061-mm-swap-Convert-to-percpu-locked.patch
1 From 317acc8dec1c5c92f87c0636019b3ccffca9a740 Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Fri, 3 Jul 2009 08:29:51 -0500
4 Subject: [PATCH 061/366] mm/swap: Convert to percpu locked
5
6 Replace global locks (get_cpu + local_irq_save) with "local_locks()".
7 Currently there is one of for "rotate" and one for "swap".
8
9 Signed-off-by: Ingo Molnar <mingo@elte.hu>
10 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
11 ---
12  mm/swap.c | 34 ++++++++++++++++++++--------------
13  1 file changed, 20 insertions(+), 14 deletions(-)
14
15 diff --git a/mm/swap.c b/mm/swap.c
16 index 39395fb..93657a6 100644
17 --- a/mm/swap.c
18 +++ b/mm/swap.c
19 @@ -31,6 +31,7 @@
20  #include <linux/memcontrol.h>
21  #include <linux/gfp.h>
22  #include <linux/uio.h>
23 +#include <linux/locallock.h>
24  #include <linux/hugetlb.h>
25  #include <linux/page_idle.h>
26  
27 @@ -46,6 +47,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
28  static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
29  static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
30  
31 +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
32 +static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
33 +
34  /*
35   * This path almost never happens for VM activity - pages are normally
36   * freed via pagevecs.  But it gets used by networking.
37 @@ -481,11 +485,11 @@ void rotate_reclaimable_page(struct page *page)
38                 unsigned long flags;
39  
40                 page_cache_get(page);
41 -               local_irq_save(flags);
42 +               local_lock_irqsave(rotate_lock, flags);
43                 pvec = this_cpu_ptr(&lru_rotate_pvecs);
44                 if (!pagevec_add(pvec, page))
45                         pagevec_move_tail(pvec);
46 -               local_irq_restore(flags);
47 +               local_unlock_irqrestore(rotate_lock, flags);
48         }
49  }
50  
51 @@ -536,12 +540,13 @@ static bool need_activate_page_drain(int cpu)
52  void activate_page(struct page *page)
53  {
54         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
55 -               struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
56 +               struct pagevec *pvec = &get_locked_var(swapvec_lock,
57 +                                                      activate_page_pvecs);
58  
59                 page_cache_get(page);
60                 if (!pagevec_add(pvec, page))
61                         pagevec_lru_move_fn(pvec, __activate_page, NULL);
62 -               put_cpu_var(activate_page_pvecs);
63 +               put_locked_var(swapvec_lock, activate_page_pvecs);
64         }
65  }
66  
67 @@ -567,7 +572,7 @@ void activate_page(struct page *page)
68  
69  static void __lru_cache_activate_page(struct page *page)
70  {
71 -       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
72 +       struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
73         int i;
74  
75         /*
76 @@ -589,7 +594,7 @@ static void __lru_cache_activate_page(struct page *page)
77                 }
78         }
79  
80 -       put_cpu_var(lru_add_pvec);
81 +       put_locked_var(swapvec_lock, lru_add_pvec);
82  }
83  
84  /*
85 @@ -630,13 +635,13 @@ EXPORT_SYMBOL(mark_page_accessed);
86  
87  static void __lru_cache_add(struct page *page)
88  {
89 -       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
90 +       struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
91  
92         page_cache_get(page);
93         if (!pagevec_space(pvec))
94                 __pagevec_lru_add(pvec);
95         pagevec_add(pvec, page);
96 -       put_cpu_var(lru_add_pvec);
97 +       put_locked_var(swapvec_lock, lru_add_pvec);
98  }
99  
100  /**
101 @@ -816,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
102                 unsigned long flags;
103  
104                 /* No harm done if a racing interrupt already did this */
105 -               local_irq_save(flags);
106 +               local_lock_irqsave(rotate_lock, flags);
107                 pagevec_move_tail(pvec);
108 -               local_irq_restore(flags);
109 +               local_unlock_irqrestore(rotate_lock, flags);
110         }
111  
112         pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
113 @@ -846,18 +851,19 @@ void deactivate_file_page(struct page *page)
114                 return;
115  
116         if (likely(get_page_unless_zero(page))) {
117 -               struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
118 +               struct pagevec *pvec = &get_locked_var(swapvec_lock,
119 +                                                      lru_deactivate_file_pvecs);
120  
121                 if (!pagevec_add(pvec, page))
122                         pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
123 -               put_cpu_var(lru_deactivate_file_pvecs);
124 +               put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
125         }
126  }
127  
128  void lru_add_drain(void)
129  {
130 -       lru_add_drain_cpu(get_cpu());
131 -       put_cpu();
132 +       lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
133 +       local_unlock_cpu(swapvec_lock);
134  }
135  
136  static void lru_add_drain_per_cpu(struct work_struct *dummy)
137 -- 
138 1.9.1
139