1 From 55c12e696baa5f6e39f8bf8999f8d69e0a21f5c8 Mon Sep 17 00:00:00 2001
2 From: Mike Galbraith <umgwanakikbuti@gmail.com>
3 Date: Thu, 31 Mar 2016 04:08:28 +0200
4 Subject: [PATCH 331/366] drivers/block/zram: Replace bit spinlocks with
7 They're nondeterministic, and lead to ___might_sleep() splats in -rt.
8 OTOH, they're a lot less wasteful than an rtmutex per page.
10 Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
11 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
13 drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
14 drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
15 2 files changed, 57 insertions(+), 14 deletions(-)
17 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
18 index 1770c45..39d639c 100644
19 --- a/drivers/block/zram/zram_drv.c
20 +++ b/drivers/block/zram/zram_drv.c
21 @@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
25 + zram_meta_init_table_locks(meta, disksize);
30 @@ -568,12 +570,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
34 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
35 + zram_lock_table(&meta->table[index]);
36 handle = meta->table[index].handle;
37 size = zram_get_obj_size(meta, index);
39 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
40 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
41 + zram_unlock_table(&meta->table[index]);
45 @@ -584,7 +586,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
47 ret = zcomp_decompress(zram->comp, cmem, size, mem);
48 zs_unmap_object(meta->mem_pool, handle);
49 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
50 + zram_unlock_table(&meta->table[index]);
52 /* Should NEVER happen. Return bio error if it does. */
54 @@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
55 struct zram_meta *meta = zram->meta;
58 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
59 + zram_lock_table(&meta->table[index]);
60 if (unlikely(!meta->table[index].handle) ||
61 zram_test_flag(meta, index, ZRAM_ZERO)) {
62 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
63 + zram_unlock_table(&meta->table[index]);
64 handle_zero_page(bvec);
67 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
68 + zram_unlock_table(&meta->table[index]);
70 if (is_partial_io(bvec))
71 /* Use a temporary buffer to decompress the page */
72 @@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
74 kunmap_atomic(user_mem);
75 /* Free memory associated with this sector now. */
76 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
77 + zram_lock_table(&meta->table[index]);
78 zram_free_page(zram, index);
79 zram_set_flag(meta, index, ZRAM_ZERO);
80 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
81 + zram_unlock_table(&meta->table[index]);
83 atomic64_inc(&zram->stats.zero_pages);
85 @@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
86 * Free memory associated with this sector
87 * before overwriting unused sectors.
89 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
90 + zram_lock_table(&meta->table[index]);
91 zram_free_page(zram, index);
93 meta->table[index].handle = handle;
94 zram_set_obj_size(meta, index, clen);
95 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
96 + zram_unlock_table(&meta->table[index]);
99 atomic64_add(clen, &zram->stats.compr_data_size);
100 @@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
103 while (n >= PAGE_SIZE) {
104 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
105 + zram_lock_table(&meta->table[index]);
106 zram_free_page(zram, index);
107 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
108 + zram_unlock_table(&meta->table[index]);
109 atomic64_inc(&zram->stats.notify_free);
112 @@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
113 zram = bdev->bd_disk->private_data;
116 - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
117 + zram_lock_table(&meta->table[index]);
118 zram_free_page(zram, index);
119 - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
120 + zram_unlock_table(&meta->table[index]);
121 atomic64_inc(&zram->stats.notify_free);
124 diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
125 index 8e92339..1e4a3c6 100644
126 --- a/drivers/block/zram/zram_drv.h
127 +++ b/drivers/block/zram/zram_drv.h
128 @@ -72,6 +72,9 @@ enum zram_pageflags {
129 struct zram_table_entry {
130 unsigned long handle;
132 +#ifdef CONFIG_PREEMPT_RT_BASE
138 @@ -119,4 +122,42 @@ struct zram {
140 bool claim; /* Protected by bdev->bd_mutex */
143 +#ifndef CONFIG_PREEMPT_RT_BASE
144 +static inline void zram_lock_table(struct zram_table_entry *table)
146 + bit_spin_lock(ZRAM_ACCESS, &table->value);
149 +static inline void zram_unlock_table(struct zram_table_entry *table)
151 + bit_spin_unlock(ZRAM_ACCESS, &table->value);
154 +static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { }
155 +#else /* CONFIG_PREEMPT_RT_BASE */
156 +static inline void zram_lock_table(struct zram_table_entry *table)
158 + spin_lock(&table->lock);
159 + __set_bit(ZRAM_ACCESS, &table->value);
162 +static inline void zram_unlock_table(struct zram_table_entry *table)
164 + __clear_bit(ZRAM_ACCESS, &table->value);
165 + spin_unlock(&table->lock);
168 +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
170 + size_t num_pages = disksize >> PAGE_SHIFT;
173 + for (index = 0; index < num_pages; index++) {
174 + spinlock_t *lock = &meta->table[index].lock;
175 + spin_lock_init(lock);
178 +#endif /* CONFIG_PREEMPT_RT_BASE */