2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2006
20 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/preempt.h>
31 #include <linux/rcupdate.h>
32 #include <linux/sched.h>
33 #include <linux/smp.h>
34 #include <linux/delay.h>
35 #include <linux/srcu.h>
37 static int init_srcu_struct_fields(struct srcu_struct *sp)
40 mutex_init(&sp->mutex);
41 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
42 return sp->per_cpu_ref ? 0 : -ENOMEM;
45 #ifdef CONFIG_DEBUG_LOCK_ALLOC
47 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
48 struct lock_class_key *key)
50 /* Don't re-initialize a lock while it is held. */
51 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
52 lockdep_init_map(&sp->dep_map, name, key, 0);
53 return init_srcu_struct_fields(sp);
55 EXPORT_SYMBOL_GPL(__init_srcu_struct);
57 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
60 * init_srcu_struct - initialize a sleep-RCU structure
61 * @sp: structure to initialize.
63 * Must invoke this on a given srcu_struct before passing that srcu_struct
64 * to any other function. Each srcu_struct represents a separate domain
67 int init_srcu_struct(struct srcu_struct *sp)
69 return init_srcu_struct_fields(sp);
71 EXPORT_SYMBOL_GPL(init_srcu_struct);
73 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76 * Returns approximate number of readers active on the specified rank
77 * of per-CPU counters. Also snapshots each counter's value in the
78 * corresponding element of sp->snap[] for later use validating
81 static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
84 unsigned long sum = 0;
87 for_each_possible_cpu(cpu) {
88 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
92 return sum & SRCU_REF_MASK;
96 * To be called from the update side after an index flip. Returns true
97 * if the modulo sum of the counters is stably zero, false if there is
98 * some possibility of non-zero.
100 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
105 * Note that srcu_readers_active_idx() can incorrectly return
106 * zero even though there is a pre-existing reader throughout.
107 * To see this, suppose that task A is in a very long SRCU
108 * read-side critical section that started on CPU 0, and that
109 * no other reader exists, so that the modulo sum of the counters
110 * is equal to one. Then suppose that task B starts executing
111 * srcu_readers_active_idx(), summing up to CPU 1, and then that
112 * task C starts reading on CPU 0, so that its increment is not
113 * summed, but finishes reading on CPU 2, so that its decrement
114 * -is- summed. Then when task B completes its sum, it will
115 * incorrectly get zero, despite the fact that task A has been
116 * in its SRCU read-side critical section the whole time.
118 * We therefore do a validation step should srcu_readers_active_idx()
121 if (srcu_readers_active_idx(sp, idx) != 0)
125 * Since the caller recently flipped ->completed, we can see at
126 * most one increment of each CPU's counter from this point
127 * forward. The reason for this is that the reader CPU must have
128 * fetched the index before srcu_readers_active_idx checked
129 * that CPU's counter, but not yet incremented its counter.
130 * Its eventual counter increment will follow the read in
131 * srcu_readers_active_idx(), and that increment is immediately
132 * followed by smp_mb() B. Because smp_mb() D is between
133 * the ->completed flip and srcu_readers_active_idx()'s read,
134 * that CPU's subsequent load of ->completed must see the new
135 * value, and therefore increment the counter in the other rank.
140 * Now, we check the ->snap array that srcu_readers_active_idx()
141 * filled in from the per-CPU counter values. Since
142 * __srcu_read_lock() increments the upper bits of the per-CPU
143 * counter, an increment/decrement pair will change the value
144 * of the counter. Since there is only one possible increment,
145 * the only way to wrap the counter is to have a huge number of
146 * counter decrements, which requires a huge number of tasks and
147 * huge SRCU read-side critical-section nesting levels, even on
150 * All of the ways of confusing the readings require that the scan
151 * in srcu_readers_active_idx() see the read-side task's decrement,
152 * but not its increment. However, between that decrement and
153 * increment are smb_mb() B and C. Either or both of these pair
154 * with smp_mb() A above to ensure that the scan below will see
155 * the read-side tasks's increment, thus noting a difference in
156 * the counter values between the two passes.
158 * Therefore, if srcu_readers_active_idx() returned zero, and
159 * none of the counters changed, we know that the zero was the
162 * Of course, it is possible that a task might be delayed
163 * for a very long time in __srcu_read_lock() after fetching
164 * the index but before incrementing its counter. This
165 * possibility will be dealt with in __synchronize_srcu().
167 for_each_possible_cpu(cpu)
169 ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]))
170 return false; /* False zero reading! */
175 * srcu_readers_active - returns approximate number of readers.
176 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
178 * Note that this is not an atomic primitive, and can therefore suffer
179 * severe errors when invoked on an active srcu_struct. That said, it
180 * can be useful as an error check at cleanup time.
182 static int srcu_readers_active(struct srcu_struct *sp)
184 return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
188 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
189 * @sp: structure to clean up.
191 * Must invoke this after you are finished using a given srcu_struct that
192 * was initialized via init_srcu_struct(), else you leak memory.
194 void cleanup_srcu_struct(struct srcu_struct *sp)
198 sum = srcu_readers_active(sp);
199 WARN_ON(sum); /* Leakage unless caller handles error. */
202 free_percpu(sp->per_cpu_ref);
203 sp->per_cpu_ref = NULL;
205 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
208 * Counts the new reader in the appropriate per-CPU element of the
209 * srcu_struct. Must be called from process context.
210 * Returns an index that must be passed to the matching srcu_read_unlock().
212 int __srcu_read_lock(struct srcu_struct *sp)
217 idx = rcu_dereference_index_check(sp->completed,
218 rcu_read_lock_sched_held()) & 0x1;
219 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) +=
220 SRCU_USAGE_COUNT + 1;
221 smp_mb(); /* B */ /* Avoid leaking the critical section. */
225 EXPORT_SYMBOL_GPL(__srcu_read_lock);
228 * Removes the count for the old reader from the appropriate per-CPU
229 * element of the srcu_struct. Note that this may well be a different
230 * CPU than that which was incremented by the corresponding srcu_read_lock().
231 * Must be called from process context.
233 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
236 smp_mb(); /* C */ /* Avoid leaking the critical section. */
237 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
240 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
243 * We use an adaptive strategy for synchronize_srcu() and especially for
244 * synchronize_srcu_expedited(). We spin for a fixed time period
245 * (defined below) to allow SRCU readers to exit their read-side critical
246 * sections. If there are still some readers after 10 microseconds,
247 * we repeatedly block for 1-millisecond time periods. This approach
248 * has done well in testing, so there is no need for a config parameter.
250 #define SYNCHRONIZE_SRCU_READER_DELAY 5
253 * Wait until all pre-existing readers complete. Such readers
254 * will have used the index specified by "idx".
256 static void wait_idx(struct srcu_struct *sp, int idx, bool expedited)
261 * If a reader fetches the index before the ->completed increment,
262 * but increments its counter after srcu_readers_active_idx_check()
263 * sums it, then smp_mb() D will pair with __srcu_read_lock()'s
264 * smp_mb() B to ensure that the SRCU read-side critical section
265 * will see any updates that the current task performed before its
266 * call to synchronize_srcu(), or to synchronize_srcu_expedited(),
267 * as the case may be.
272 * SRCU read-side critical sections are normally short, so wait
273 * a small amount of time before possibly blocking.
275 if (!srcu_readers_active_idx_check(sp, idx)) {
276 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
277 while (!srcu_readers_active_idx_check(sp, idx)) {
278 if (expedited && ++ trycount < 10)
279 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
281 schedule_timeout_interruptible(1);
286 * The following smp_mb() E pairs with srcu_read_unlock()'s
287 * smp_mb C to ensure that if srcu_readers_active_idx_check()
288 * sees srcu_read_unlock()'s counter decrement, then any
289 * of the current task's subsequent code will happen after
290 * that SRCU read-side critical section.
292 * It also ensures the order between the above waiting and
298 static void srcu_flip(struct srcu_struct *sp)
304 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
306 static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
310 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
311 !lock_is_held(&rcu_bh_lock_map) &&
312 !lock_is_held(&rcu_lock_map) &&
313 !lock_is_held(&rcu_sched_lock_map),
314 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
316 mutex_lock(&sp->mutex);
317 busy_idx = sp->completed & 0X1UL;
320 * If we recently flipped the index, there will be some readers
321 * using idx=0 and others using idx=1. Therefore, two calls to
322 * wait_idx()s suffice to ensure that all pre-existing readers
325 * __synchronize_srcu() {
326 * wait_idx(sp, 0, expedited);
327 * wait_idx(sp, 1, expedited);
330 * Starvation is prevented by the fact that we flip the index.
331 * While we wait on one index to clear out, almost all new readers
332 * will be using the other index. The number of new readers using the
333 * index we are waiting on is sharply bounded by roughly the number
336 * How can new readers possibly using the old pre-flip value of
337 * the index? Consider the following sequence of events:
339 * Suppose that during the previous grace period, a reader
340 * picked up the old value of the index, but did not increment
341 * its counter until after the previous instance of
342 * __synchronize_srcu() did the counter summation and recheck.
343 * That previous grace period was OK because the reader did
344 * not start until after the grace period started, so the grace
345 * period was not obligated to wait for that reader.
347 * However, this sequence of events is quite improbable, so
348 * this call to wait_idx(), which waits on really old readers
349 * describe in this comment above, will almost never need to wait.
351 wait_idx(sp, 1 - busy_idx, expedited);
353 /* Flip the index to avoid reader-induced starvation. */
356 /* Wait for recent pre-existing readers. */
357 wait_idx(sp, busy_idx, expedited);
359 mutex_unlock(&sp->mutex);
363 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
364 * @sp: srcu_struct with which to synchronize.
366 * Flip the completed counter, and wait for the old count to drain to zero.
367 * As with classic RCU, the updater must use some separate means of
368 * synchronizing concurrent updates. Can block; must be called from
371 * Note that it is illegal to call synchronize_srcu() from the corresponding
372 * SRCU read-side critical section; doing so will result in deadlock.
373 * However, it is perfectly legal to call synchronize_srcu() on one
374 * srcu_struct from some other srcu_struct's read-side critical section.
376 void synchronize_srcu(struct srcu_struct *sp)
378 __synchronize_srcu(sp, 0);
380 EXPORT_SYMBOL_GPL(synchronize_srcu);
383 * synchronize_srcu_expedited - Brute-force SRCU grace period
384 * @sp: srcu_struct with which to synchronize.
386 * Wait for an SRCU grace period to elapse, but be more aggressive about
387 * spinning rather than blocking when waiting.
389 * Note that it is illegal to call this function while holding any lock
390 * that is acquired by a CPU-hotplug notifier. It is also illegal to call
391 * synchronize_srcu_expedited() from the corresponding SRCU read-side
392 * critical section; doing so will result in deadlock. However, it is
393 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
394 * from some other srcu_struct's read-side critical section, as long as
395 * the resulting graph of srcu_structs is acyclic.
397 void synchronize_srcu_expedited(struct srcu_struct *sp)
399 __synchronize_srcu(sp, 1);
401 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
404 * srcu_batches_completed - return batches completed.
405 * @sp: srcu_struct on which to report batch completion.
407 * Report the number of batches, correlated with, but not necessarily
408 * precisely the same as, the number of grace periods that have elapsed.
411 long srcu_batches_completed(struct srcu_struct *sp)
413 return sp->completed;
415 EXPORT_SYMBOL_GPL(srcu_batches_completed);