1 From 317acc8dec1c5c92f87c0636019b3ccffca9a740 Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Fri, 3 Jul 2009 08:29:51 -0500
4 Subject: [PATCH 061/366] mm/swap: Convert to percpu locked
6 Replace global locks (get_cpu + local_irq_save) with "local_locks()".
7 Currently there is one of for "rotate" and one for "swap".
9 Signed-off-by: Ingo Molnar <mingo@elte.hu>
10 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
12 mm/swap.c | 34 ++++++++++++++++++++--------------
13 1 file changed, 20 insertions(+), 14 deletions(-)
15 diff --git a/mm/swap.c b/mm/swap.c
16 index 39395fb..93657a6 100644
20 #include <linux/memcontrol.h>
21 #include <linux/gfp.h>
22 #include <linux/uio.h>
23 +#include <linux/locallock.h>
24 #include <linux/hugetlb.h>
25 #include <linux/page_idle.h>
27 @@ -46,6 +47,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
28 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
29 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
31 +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
32 +static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
35 * This path almost never happens for VM activity - pages are normally
36 * freed via pagevecs. But it gets used by networking.
37 @@ -481,11 +485,11 @@ void rotate_reclaimable_page(struct page *page)
41 - local_irq_save(flags);
42 + local_lock_irqsave(rotate_lock, flags);
43 pvec = this_cpu_ptr(&lru_rotate_pvecs);
44 if (!pagevec_add(pvec, page))
45 pagevec_move_tail(pvec);
46 - local_irq_restore(flags);
47 + local_unlock_irqrestore(rotate_lock, flags);
51 @@ -536,12 +540,13 @@ static bool need_activate_page_drain(int cpu)
52 void activate_page(struct page *page)
54 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
55 - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
56 + struct pagevec *pvec = &get_locked_var(swapvec_lock,
57 + activate_page_pvecs);
60 if (!pagevec_add(pvec, page))
61 pagevec_lru_move_fn(pvec, __activate_page, NULL);
62 - put_cpu_var(activate_page_pvecs);
63 + put_locked_var(swapvec_lock, activate_page_pvecs);
67 @@ -567,7 +572,7 @@ void activate_page(struct page *page)
69 static void __lru_cache_activate_page(struct page *page)
71 - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
72 + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
76 @@ -589,7 +594,7 @@ static void __lru_cache_activate_page(struct page *page)
80 - put_cpu_var(lru_add_pvec);
81 + put_locked_var(swapvec_lock, lru_add_pvec);
85 @@ -630,13 +635,13 @@ EXPORT_SYMBOL(mark_page_accessed);
87 static void __lru_cache_add(struct page *page)
89 - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
90 + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
93 if (!pagevec_space(pvec))
94 __pagevec_lru_add(pvec);
95 pagevec_add(pvec, page);
96 - put_cpu_var(lru_add_pvec);
97 + put_locked_var(swapvec_lock, lru_add_pvec);
101 @@ -816,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
104 /* No harm done if a racing interrupt already did this */
105 - local_irq_save(flags);
106 + local_lock_irqsave(rotate_lock, flags);
107 pagevec_move_tail(pvec);
108 - local_irq_restore(flags);
109 + local_unlock_irqrestore(rotate_lock, flags);
112 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
113 @@ -846,18 +851,19 @@ void deactivate_file_page(struct page *page)
116 if (likely(get_page_unless_zero(page))) {
117 - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
118 + struct pagevec *pvec = &get_locked_var(swapvec_lock,
119 + lru_deactivate_file_pvecs);
121 if (!pagevec_add(pvec, page))
122 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
123 - put_cpu_var(lru_deactivate_file_pvecs);
124 + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
128 void lru_add_drain(void)
130 - lru_add_drain_cpu(get_cpu());
132 + lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
133 + local_unlock_cpu(swapvec_lock);
136 static void lru_add_drain_per_cpu(struct work_struct *dummy)