1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. There are two types
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
16 * This is not as cache friendly as brlock. Also, this may not work well
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
20 * Expected non-blocking reader usage:
22 * seq = read_seqbegin(&foo);
24 * } while (read_seqretry(&foo, seq));
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
35 #include <linux/spinlock.h>
36 #include <linux/preempt.h>
37 #include <linux/lockdep.h>
38 #include <linux/compiler.h>
39 #include <asm/processor.h>
42 * Version using sequence counter only.
43 * This can be used when code has its own mutex protecting the
44 * updating starting before the write_seqcountbeqin() and ending
45 * after the write_seqcount_end().
47 typedef struct seqcount {
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 struct lockdep_map dep_map;
54 static inline void __seqcount_init(seqcount_t *s, const char *name,
55 struct lock_class_key *key)
58 * Make sure we are not reinitializing a held lock:
60 lockdep_init_map(&s->dep_map, name, key, 0);
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
66 .dep_map = { .name = #lockname } \
68 # define seqcount_init(s) \
70 static struct lock_class_key __key; \
71 __seqcount_init((s), #s, &__key); \
74 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
76 seqcount_t *l = (seqcount_t *)s;
79 local_irq_save(flags);
80 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
81 seqcount_release(&l->dep_map, 1, _RET_IP_);
82 local_irq_restore(flags);
86 # define SEQCOUNT_DEP_MAP_INIT(lockname)
87 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
88 # define seqcount_lockdep_reader_access(x)
91 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
95 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
96 * @s: pointer to seqcount_t
97 * Returns: count to be passed to read_seqcount_retry
99 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
100 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
101 * provided before actually loading any of the variables that are to be
102 * protected in this critical section.
104 * Use carefully, only in critical code, and comment how the barrier is
107 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
112 ret = READ_ONCE(s->sequence);
113 if (unlikely(ret & 1)) {
121 * raw_read_seqcount - Read the raw seqcount
122 * @s: pointer to seqcount_t
123 * Returns: count to be passed to read_seqcount_retry
125 * raw_read_seqcount opens a read critical section of the given
126 * seqcount without any lockdep checking and without checking or
127 * masking the LSB. Calling code is responsible for handling that.
129 static inline unsigned raw_read_seqcount(const seqcount_t *s)
131 unsigned ret = READ_ONCE(s->sequence);
137 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
138 * @s: pointer to seqcount_t
139 * Returns: count to be passed to read_seqcount_retry
141 * raw_read_seqcount_begin opens a read critical section of the given
142 * seqcount, but without any lockdep checking. Validity of the critical
143 * section is tested by checking read_seqcount_retry function.
145 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
147 unsigned ret = __read_seqcount_begin(s);
153 * read_seqcount_begin - begin a seq-read critical section
154 * @s: pointer to seqcount_t
155 * Returns: count to be passed to read_seqcount_retry
157 * read_seqcount_begin opens a read critical section of the given seqcount.
158 * Validity of the critical section is tested by checking read_seqcount_retry
161 static inline unsigned read_seqcount_begin(const seqcount_t *s)
163 seqcount_lockdep_reader_access(s);
164 return raw_read_seqcount_begin(s);
168 * raw_seqcount_begin - begin a seq-read critical section
169 * @s: pointer to seqcount_t
170 * Returns: count to be passed to read_seqcount_retry
172 * raw_seqcount_begin opens a read critical section of the given seqcount.
173 * Validity of the critical section is tested by checking read_seqcount_retry
176 * Unlike read_seqcount_begin(), this function will not wait for the count
177 * to stabilize. If a writer is active when we begin, we will fail the
178 * read_seqcount_retry() instead of stabilizing at the beginning of the
181 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 unsigned ret = READ_ONCE(s->sequence);
189 * __read_seqcount_retry - end a seq-read critical section (without barrier)
190 * @s: pointer to seqcount_t
191 * @start: count, from read_seqcount_begin
192 * Returns: 1 if retry is required, else 0
194 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
195 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
196 * provided before actually loading any of the variables that are to be
197 * protected in this critical section.
199 * Use carefully, only in critical code, and comment how the barrier is
202 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 return unlikely(s->sequence != start);
208 * read_seqcount_retry - end a seq-read critical section
209 * @s: pointer to seqcount_t
210 * @start: count, from read_seqcount_begin
211 * Returns: 1 if retry is required, else 0
213 * read_seqcount_retry closes a read critical section of the given seqcount.
214 * If the critical section was invalid, it must be ignored (and typically
217 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
220 return __read_seqcount_retry(s, start);
223 static inline void __raw_write_seqcount_begin(seqcount_t *s)
229 static inline void raw_write_seqcount_begin(seqcount_t *s)
231 preempt_disable_rt();
232 __raw_write_seqcount_begin(s);
235 static inline void __raw_write_seqcount_end(seqcount_t *s)
241 static inline void raw_write_seqcount_end(seqcount_t *s)
243 __raw_write_seqcount_end(s);
248 * raw_write_seqcount_barrier - do a seq write barrier
249 * @s: pointer to seqcount_t
251 * This can be used to provide an ordering guarantee instead of the
252 * usual consistency guarantee. It is one wmb cheaper, because we can
253 * collapse the two back-to-back wmb()s.
256 * bool X = true, Y = false;
263 * int s = read_seqcount_begin(&seq);
267 * } while (read_seqcount_retry(&seq, s));
276 * raw_write_seqcount_barrier(seq);
281 static inline void raw_write_seqcount_barrier(seqcount_t *s)
288 static inline int raw_read_seqcount_latch(seqcount_t *s)
290 int seq = READ_ONCE(s->sequence);
291 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
292 smp_read_barrier_depends();
297 * raw_write_seqcount_latch - redirect readers to even/odd copy
298 * @s: pointer to seqcount_t
300 * The latch technique is a multiversion concurrency control method that allows
301 * queries during non-atomic modifications. If you can guarantee queries never
302 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
303 * -- you most likely do not need this.
305 * Where the traditional RCU/lockless data structures rely on atomic
306 * modifications to ensure queries observe either the old or the new state the
307 * latch allows the same for non-atomic updates. The trade-off is doubling the
308 * cost of storage; we have to maintain two copies of the entire data
311 * Very simply put: we first modify one copy and then the other. This ensures
312 * there is always one copy in a stable state, ready to give us an answer.
314 * The basic form is a data structure like:
316 * struct latch_struct {
318 * struct data_struct data[2];
321 * Where a modification, which is assumed to be externally serialized, does the
324 * void latch_modify(struct latch_struct *latch, ...)
326 * smp_wmb(); <- Ensure that the last data[1] update is visible
328 * smp_wmb(); <- Ensure that the seqcount update is visible
330 * modify(latch->data[0], ...);
332 * smp_wmb(); <- Ensure that the data[0] update is visible
334 * smp_wmb(); <- Ensure that the seqcount update is visible
336 * modify(latch->data[1], ...);
339 * The query will have a form like:
341 * struct entry *latch_query(struct latch_struct *latch, ...)
343 * struct entry *entry;
347 * seq = raw_read_seqcount_latch(&latch->seq);
350 * entry = data_query(latch->data[idx], ...);
353 * } while (seq != latch->seq);
358 * So during the modification, queries are first redirected to data[1]. Then we
359 * modify data[0]. When that is complete, we redirect queries back to data[0]
360 * and we can modify data[1].
362 * NOTE: The non-requirement for atomic modifications does _NOT_ include
363 * the publishing of new entries in the case where data is a dynamic
366 * An iteration might start in data[0] and get suspended long enough
367 * to miss an entire modification sequence, once it resumes it might
368 * observe the new entry.
370 * NOTE: When data is a dynamic data structure; one should use regular RCU
371 * patterns to manage the lifetimes of the objects within.
373 static inline void raw_write_seqcount_latch(seqcount_t *s)
375 smp_wmb(); /* prior stores before incrementing "sequence" */
377 smp_wmb(); /* increment "sequence" before following stores */
381 * Sequence counter only version assumes that callers are using their
384 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
386 raw_write_seqcount_begin(s);
387 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
390 static inline void write_seqcount_begin(seqcount_t *s)
392 write_seqcount_begin_nested(s, 0);
395 static inline void write_seqcount_end(seqcount_t *s)
397 seqcount_release(&s->dep_map, 1, _RET_IP_);
398 raw_write_seqcount_end(s);
402 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
403 * @s: pointer to seqcount_t
405 * After write_seqcount_invalidate, no read-side seq operations will complete
406 * successfully and see data older than this.
408 static inline void write_seqcount_invalidate(seqcount_t *s)
415 struct seqcount seqcount;
420 * These macros triggered gcc-3.x compile-time problems. We think these are
421 * OK now. Be cautious.
423 #define __SEQLOCK_UNLOCKED(lockname) \
425 .seqcount = SEQCNT_ZERO(lockname), \
426 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
429 #define seqlock_init(x) \
431 seqcount_init(&(x)->seqcount); \
432 spin_lock_init(&(x)->lock); \
435 #define DEFINE_SEQLOCK(x) \
436 seqlock_t x = __SEQLOCK_UNLOCKED(x)
439 * Read side functions for starting and finalizing a read side section.
441 #ifndef CONFIG_PREEMPT_RT_FULL
442 static inline unsigned read_seqbegin(const seqlock_t *sl)
444 return read_seqcount_begin(&sl->seqcount);
448 * Starvation safe read side for RT
450 static inline unsigned read_seqbegin(seqlock_t *sl)
455 ret = ACCESS_ONCE(sl->seqcount.sequence);
456 if (unlikely(ret & 1)) {
458 * Take the lock and let the writer proceed (i.e. evtl
459 * boost it), otherwise we could loop here forever.
461 spin_unlock_wait(&sl->lock);
468 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
470 return read_seqcount_retry(&sl->seqcount, start);
474 * Lock out other writers and update the count.
475 * Acts like a normal spin_lock/unlock.
476 * Don't need preempt_disable() because that is in the spin_lock already.
478 static inline void write_seqlock(seqlock_t *sl)
480 spin_lock(&sl->lock);
481 __raw_write_seqcount_begin(&sl->seqcount);
484 static inline int try_write_seqlock(seqlock_t *sl)
486 if (spin_trylock(&sl->lock)) {
487 __raw_write_seqcount_begin(&sl->seqcount);
493 static inline void write_sequnlock(seqlock_t *sl)
495 __raw_write_seqcount_end(&sl->seqcount);
496 spin_unlock(&sl->lock);
499 static inline void write_seqlock_bh(seqlock_t *sl)
501 spin_lock_bh(&sl->lock);
502 __raw_write_seqcount_begin(&sl->seqcount);
505 static inline void write_sequnlock_bh(seqlock_t *sl)
507 __raw_write_seqcount_end(&sl->seqcount);
508 spin_unlock_bh(&sl->lock);
511 static inline void write_seqlock_irq(seqlock_t *sl)
513 spin_lock_irq(&sl->lock);
514 __raw_write_seqcount_begin(&sl->seqcount);
517 static inline void write_sequnlock_irq(seqlock_t *sl)
519 __raw_write_seqcount_end(&sl->seqcount);
520 spin_unlock_irq(&sl->lock);
523 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
527 spin_lock_irqsave(&sl->lock, flags);
528 __raw_write_seqcount_begin(&sl->seqcount);
532 #define write_seqlock_irqsave(lock, flags) \
533 do { flags = __write_seqlock_irqsave(lock); } while (0)
536 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
538 __raw_write_seqcount_end(&sl->seqcount);
539 spin_unlock_irqrestore(&sl->lock, flags);
543 * A locking reader exclusively locks out other writers and locking readers,
544 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
545 * Don't need preempt_disable() because that is in the spin_lock already.
547 static inline void read_seqlock_excl(seqlock_t *sl)
549 spin_lock(&sl->lock);
552 static inline void read_sequnlock_excl(seqlock_t *sl)
554 spin_unlock(&sl->lock);
558 * read_seqbegin_or_lock - begin a sequence number check or locking block
559 * @lock: sequence lock
560 * @seq : sequence number to be checked
562 * First try it once optimistically without taking the lock. If that fails,
563 * take the lock. The sequence number is also used as a marker for deciding
564 * whether to be a reader (even) or writer (odd).
565 * N.B. seq must be initialized to an even number to begin with.
567 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
569 if (!(*seq & 1)) /* Even */
570 *seq = read_seqbegin(lock);
572 read_seqlock_excl(lock);
575 static inline int need_seqretry(seqlock_t *lock, int seq)
577 return !(seq & 1) && read_seqretry(lock, seq);
580 static inline void done_seqretry(seqlock_t *lock, int seq)
583 read_sequnlock_excl(lock);
586 static inline void read_seqlock_excl_bh(seqlock_t *sl)
588 spin_lock_bh(&sl->lock);
591 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
593 spin_unlock_bh(&sl->lock);
596 static inline void read_seqlock_excl_irq(seqlock_t *sl)
598 spin_lock_irq(&sl->lock);
601 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
603 spin_unlock_irq(&sl->lock);
606 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
610 spin_lock_irqsave(&sl->lock, flags);
614 #define read_seqlock_excl_irqsave(lock, flags) \
615 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
618 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
620 spin_unlock_irqrestore(&sl->lock, flags);
623 static inline unsigned long
624 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
626 unsigned long flags = 0;
628 if (!(*seq & 1)) /* Even */
629 *seq = read_seqbegin(lock);
631 read_seqlock_excl_irqsave(lock, flags);
637 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
640 read_sequnlock_excl_irqrestore(lock, flags);
642 #endif /* __LINUX_SEQLOCK_H */