]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0331-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0331-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
1 From 55c12e696baa5f6e39f8bf8999f8d69e0a21f5c8 Mon Sep 17 00:00:00 2001
2 From: Mike Galbraith <umgwanakikbuti@gmail.com>
3 Date: Thu, 31 Mar 2016 04:08:28 +0200
4 Subject: [PATCH 331/366] drivers/block/zram: Replace bit spinlocks with
5  rtmutex for -rt
6
7 They're nondeterministic, and lead to ___might_sleep() splats in -rt.
8 OTOH, they're a lot less wasteful than an rtmutex per page.
9
10 Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
11 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
12 ---
13  drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
14  drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
15  2 files changed, 57 insertions(+), 14 deletions(-)
16
17 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
18 index 1770c45..39d639c 100644
19 --- a/drivers/block/zram/zram_drv.c
20 +++ b/drivers/block/zram/zram_drv.c
21 @@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
22                 goto out_error;
23         }
24  
25 +       zram_meta_init_table_locks(meta, disksize);
26 +
27         return meta;
28  
29  out_error:
30 @@ -568,12 +570,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
31         unsigned long handle;
32         size_t size;
33  
34 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
35 +       zram_lock_table(&meta->table[index]);
36         handle = meta->table[index].handle;
37         size = zram_get_obj_size(meta, index);
38  
39         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
40 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
41 +               zram_unlock_table(&meta->table[index]);
42                 clear_page(mem);
43                 return 0;
44         }
45 @@ -584,7 +586,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
46         else
47                 ret = zcomp_decompress(zram->comp, cmem, size, mem);
48         zs_unmap_object(meta->mem_pool, handle);
49 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
50 +       zram_unlock_table(&meta->table[index]);
51  
52         /* Should NEVER happen. Return bio error if it does. */
53         if (unlikely(ret)) {
54 @@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
55         struct zram_meta *meta = zram->meta;
56         page = bvec->bv_page;
57  
58 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
59 +       zram_lock_table(&meta->table[index]);
60         if (unlikely(!meta->table[index].handle) ||
61                         zram_test_flag(meta, index, ZRAM_ZERO)) {
62 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
63 +               zram_unlock_table(&meta->table[index]);
64                 handle_zero_page(bvec);
65                 return 0;
66         }
67 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
68 +       zram_unlock_table(&meta->table[index]);
69  
70         if (is_partial_io(bvec))
71                 /* Use  a temporary buffer to decompress the page */
72 @@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
73                 if (user_mem)
74                         kunmap_atomic(user_mem);
75                 /* Free memory associated with this sector now. */
76 -               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
77 +               zram_lock_table(&meta->table[index]);
78                 zram_free_page(zram, index);
79                 zram_set_flag(meta, index, ZRAM_ZERO);
80 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
81 +               zram_unlock_table(&meta->table[index]);
82  
83                 atomic64_inc(&zram->stats.zero_pages);
84                 ret = 0;
85 @@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
86          * Free memory associated with this sector
87          * before overwriting unused sectors.
88          */
89 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
90 +       zram_lock_table(&meta->table[index]);
91         zram_free_page(zram, index);
92  
93         meta->table[index].handle = handle;
94         zram_set_obj_size(meta, index, clen);
95 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
96 +       zram_unlock_table(&meta->table[index]);
97  
98         /* Update stats */
99         atomic64_add(clen, &zram->stats.compr_data_size);
100 @@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
101         }
102  
103         while (n >= PAGE_SIZE) {
104 -               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
105 +               zram_lock_table(&meta->table[index]);
106                 zram_free_page(zram, index);
107 -               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
108 +               zram_unlock_table(&meta->table[index]);
109                 atomic64_inc(&zram->stats.notify_free);
110                 index++;
111                 n -= PAGE_SIZE;
112 @@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
113         zram = bdev->bd_disk->private_data;
114         meta = zram->meta;
115  
116 -       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
117 +       zram_lock_table(&meta->table[index]);
118         zram_free_page(zram, index);
119 -       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
120 +       zram_unlock_table(&meta->table[index]);
121         atomic64_inc(&zram->stats.notify_free);
122  }
123  
124 diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
125 index 8e92339..1e4a3c6 100644
126 --- a/drivers/block/zram/zram_drv.h
127 +++ b/drivers/block/zram/zram_drv.h
128 @@ -72,6 +72,9 @@ enum zram_pageflags {
129  struct zram_table_entry {
130         unsigned long handle;
131         unsigned long value;
132 +#ifdef CONFIG_PREEMPT_RT_BASE
133 +       spinlock_t lock;
134 +#endif
135  };
136  
137  struct zram_stats {
138 @@ -119,4 +122,42 @@ struct zram {
139          */
140         bool claim; /* Protected by bdev->bd_mutex */
141  };
142 +
143 +#ifndef CONFIG_PREEMPT_RT_BASE
144 +static inline void zram_lock_table(struct zram_table_entry *table)
145 +{
146 +       bit_spin_lock(ZRAM_ACCESS, &table->value);
147 +}
148 +
149 +static inline void zram_unlock_table(struct zram_table_entry *table)
150 +{
151 +       bit_spin_unlock(ZRAM_ACCESS, &table->value);
152 +}
153 +
154 +static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { }
155 +#else /* CONFIG_PREEMPT_RT_BASE */
156 +static inline void zram_lock_table(struct zram_table_entry *table)
157 +{
158 +       spin_lock(&table->lock);
159 +       __set_bit(ZRAM_ACCESS, &table->value);
160 +}
161 +
162 +static inline void zram_unlock_table(struct zram_table_entry *table)
163 +{
164 +       __clear_bit(ZRAM_ACCESS, &table->value);
165 +       spin_unlock(&table->lock);
166 +}
167 +
168 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
169 +{
170 +        size_t num_pages = disksize >> PAGE_SHIFT;
171 +        size_t index;
172 +
173 +        for (index = 0; index < num_pages; index++) {
174 +               spinlock_t *lock = &meta->table[index].lock;
175 +               spin_lock_init(lock);
176 +        }
177 +}
178 +#endif /* CONFIG_PREEMPT_RT_BASE */
179 +
180  #endif
181 -- 
182 1.9.1
183