]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0174-idr-Use-local-lock-instead-of-preempt-enable-disable.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0174-idr-Use-local-lock-instead-of-preempt-enable-disable.patch
1 From 7f536bd95b67f43531a225ed4e4f538f08eb2493 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Tue, 14 Jul 2015 14:26:34 +0200
4 Subject: [PATCH 174/366] idr: Use local lock instead of preempt enable/disable
5
6 We need to protect the per cpu variable and prevent migration.
7
8 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
9 ---
10  include/linux/idr.h |  4 ++++
11  lib/idr.c           | 43 +++++++++++++++++++++++++++++++++++++------
12  2 files changed, 41 insertions(+), 6 deletions(-)
13
14 diff --git a/include/linux/idr.h b/include/linux/idr.h
15 index 013fd9b..f62be0a 100644
16 --- a/include/linux/idr.h
17 +++ b/include/linux/idr.h
18 @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
19   * Each idr_preload() should be matched with an invocation of this
20   * function.  See idr_preload() for details.
21   */
22 +#ifdef CONFIG_PREEMPT_RT_FULL
23 +void idr_preload_end(void);
24 +#else
25  static inline void idr_preload_end(void)
26  {
27         preempt_enable();
28  }
29 +#endif
30  
31  /**
32   * idr_find - return pointer for given id
33 diff --git a/lib/idr.c b/lib/idr.c
34 index 6098336..9decbe9 100644
35 --- a/lib/idr.c
36 +++ b/lib/idr.c
37 @@ -30,6 +30,7 @@
38  #include <linux/idr.h>
39  #include <linux/spinlock.h>
40  #include <linux/percpu.h>
41 +#include <linux/locallock.h>
42  
43  #define MAX_IDR_SHIFT          (sizeof(int) * 8 - 1)
44  #define MAX_IDR_BIT            (1U << MAX_IDR_SHIFT)
45 @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
46  static DEFINE_PER_CPU(int, idr_preload_cnt);
47  static DEFINE_SPINLOCK(simple_ida_lock);
48  
49 +#ifdef CONFIG_PREEMPT_RT_FULL
50 +static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
51 +
52 +static inline void idr_preload_lock(void)
53 +{
54 +       local_lock(idr_lock);
55 +}
56 +
57 +static inline void idr_preload_unlock(void)
58 +{
59 +       local_unlock(idr_lock);
60 +}
61 +
62 +void idr_preload_end(void)
63 +{
64 +       idr_preload_unlock();
65 +}
66 +EXPORT_SYMBOL(idr_preload_end);
67 +#else
68 +static inline void idr_preload_lock(void)
69 +{
70 +       preempt_disable();
71 +}
72 +
73 +static inline void idr_preload_unlock(void)
74 +{
75 +       preempt_enable();
76 +}
77 +#endif
78 +
79 +
80  /* the maximum ID which can be allocated given idr->layers */
81  static int idr_max(int layers)
82  {
83 @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
84          * context.  See idr_preload() for details.
85          */
86         if (!in_interrupt()) {
87 -               preempt_disable();
88 +               idr_preload_lock();
89                 new = __this_cpu_read(idr_preload_head);
90                 if (new) {
91                         __this_cpu_write(idr_preload_head, new->ary[0]);
92                         __this_cpu_dec(idr_preload_cnt);
93                         new->ary[0] = NULL;
94                 }
95 -               preempt_enable();
96 +               idr_preload_unlock();
97                 if (new)
98                         return new;
99         }
100 @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
101         idr_mark_full(pa, id);
102  }
103  
104 -
105  /**
106   * idr_preload - preload for idr_alloc()
107   * @gfp_mask: allocation mask to use for preloading
108 @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
109         WARN_ON_ONCE(in_interrupt());
110         might_sleep_if(gfpflags_allow_blocking(gfp_mask));
111  
112 -       preempt_disable();
113 +       idr_preload_lock();
114  
115         /*
116          * idr_alloc() is likely to succeed w/o full idr_layer buffer and
117 @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
118         while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
119                 struct idr_layer *new;
120  
121 -               preempt_enable();
122 +               idr_preload_unlock();
123                 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
124 -               preempt_disable();
125 +               idr_preload_lock();
126                 if (!new)
127                         break;
128  
129 -- 
130 1.9.1
131