1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
9 * See Documentation/locking/seqlock.rst
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/preempt.h>
21 #include <linux/spinlock.h>
23 #include <asm/processor.h>
26 * The seqlock seqcount_t interface does not prescribe a precise sequence of
27 * read begin/retry/end. For readers, typically there is a call to
28 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
29 * esoteric cases which do not follow this pattern.
31 * As a consequence, we take the following best-effort approach for raw usage
32 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
33 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
34 * atomics; if there is a matching read_seqcount_retry() call, no following
35 * memory operations are considered atomic. Usage of the seqlock_t interface
38 #define KCSAN_SEQLOCK_REGION_MAX 1000
41 * Sequence counters (seqcount_t)
43 * This is the raw counting mechanism, without any writer protection.
45 * Write side critical sections must be serialized and non-preemptible.
47 * If readers can be invoked from hardirq or softirq contexts,
48 * interrupts or bottom halves must also be respectively disabled before
49 * entering the write section.
51 * This mechanism can't be used if the protected data contains pointers,
52 * as the writer can invalidate a pointer that a reader is following.
54 * If the write serialization mechanism is one of the common kernel
55 * locking primitives, use a sequence counter with associated lock
56 * (seqcount_LOCKNAME_t) instead.
58 * If it's desired to automatically handle the sequence counter writer
59 * serialization and non-preemptibility requirements, use a sequential
60 * lock (seqlock_t) instead.
62 * See Documentation/locking/seqlock.rst
64 typedef struct seqcount {
66 #ifdef CONFIG_DEBUG_LOCK_ALLOC
67 struct lockdep_map dep_map;
71 static inline void __seqcount_init(seqcount_t *s, const char *name,
72 struct lock_class_key *key)
75 * Make sure we are not reinitializing a held lock:
77 lockdep_init_map(&s->dep_map, name, key, 0);
81 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
84 .dep_map = { .name = #lockname }
87 * seqcount_init() - runtime initializer for seqcount_t
88 * @s: Pointer to the seqcount_t instance
90 # define seqcount_init(s) \
92 static struct lock_class_key __key; \
93 __seqcount_init((s), #s, &__key); \
96 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
98 seqcount_t *l = (seqcount_t *)s;
101 local_irq_save(flags);
102 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
103 seqcount_release(&l->dep_map, _RET_IP_);
104 local_irq_restore(flags);
108 # define SEQCOUNT_DEP_MAP_INIT(lockname)
109 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
110 # define seqcount_lockdep_reader_access(x)
114 * SEQCNT_ZERO() - static initializer for seqcount_t
115 * @name: Name of the seqcount_t instance
117 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
120 * Sequence counters with associated locks (seqcount_LOCKNAME_t)
122 * A sequence counter which associates the lock used for writer
123 * serialization at initialization time. This enables lockdep to validate
124 * that the write side critical section is properly serialized.
126 * For associated locks which do not implicitly disable preemption,
127 * preemption protection is enforced in the write side function.
129 * Lockdep is never used in any for the raw write variants.
131 * See Documentation/locking/seqlock.rst
135 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
136 * disable preemption. It can lead to higher latencies, and the write side
137 * sections will not be able to acquire locks which become sleeping locks
140 * To remain preemptible while avoiding a possible livelock caused by the
141 * reader preempting the writer, use a different technique: let the reader
142 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
143 * case, acquire then release the associated LOCKNAME writer serialization
144 * lock. This will allow any possibly-preempted writer to make progress
145 * until the end of its writer serialization lock critical section.
147 * This lock-unlock technique must be implemented for all of PREEMPT_RT
148 * sleeping locks. See Documentation/locking/locktypes.rst
150 #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
151 #define __SEQ_LOCK(expr) expr
153 #define __SEQ_LOCK(expr)
157 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
158 * @seqcount: The real sequence counter
159 * @lock: Pointer to the associated lock
161 * A plain sequence counter with external writer synchronization by
162 * LOCKNAME @lock. The lock is associated to the sequence counter in the
163 * static initializer or init function. This enables lockdep to validate
164 * that the write side critical section is properly serialized.
166 * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
170 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
171 * @s: Pointer to the seqcount_LOCKNAME_t instance
172 * @lock: Pointer to the associated lock
175 #define seqcount_LOCKNAME_init(s, _lock, lockname) \
177 seqcount_##lockname##_t *____s = (s); \
178 seqcount_init(&____s->seqcount); \
179 __SEQ_LOCK(____s->lock = (_lock)); \
182 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
183 #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
184 #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
185 #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
188 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
189 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
191 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
192 * @locktype: LOCKNAME canonical C data type
193 * @preemptible: preemptibility of above locktype
194 * @lockmember: argument for lockdep_assert_held()
195 * @lockbase: associated lock release function (prefix only)
196 * @lock_acquire: associated lock acquisition function (full call)
198 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
199 typedef struct seqcount_##lockname { \
200 seqcount_t seqcount; \
201 __SEQ_LOCK(locktype *lock); \
202 } seqcount_##lockname##_t; \
204 static __always_inline seqcount_t * \
205 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
207 return &s->seqcount; \
210 static __always_inline unsigned \
211 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
213 unsigned seq = READ_ONCE(s->seqcount.sequence); \
215 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
218 if (preemptible && unlikely(seq & 1)) { \
219 __SEQ_LOCK(lock_acquire); \
220 __SEQ_LOCK(lockbase##_unlock(s->lock)); \
223 * Re-read the sequence counter since the (possibly \
224 * preempted) writer made progress. \
226 seq = READ_ONCE(s->seqcount.sequence); \
232 static __always_inline bool \
233 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
235 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
236 return preemptible; \
238 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
242 static __always_inline void \
243 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
245 __SEQ_LOCK(lockdep_assert_held(lockmember)); \
249 * __seqprop() for seqcount_t
252 static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
257 static inline unsigned __seqprop_sequence(const seqcount_t *s)
259 return READ_ONCE(s->sequence);
262 static inline bool __seqprop_preemptible(const seqcount_t *s)
267 static inline void __seqprop_assert(const seqcount_t *s)
269 lockdep_assert_preemption_disabled();
272 #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
274 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
275 SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
276 SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
277 SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
280 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
281 * @name: Name of the seqcount_LOCKNAME_t instance
282 * @lock: Pointer to the associated LOCKNAME
285 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
286 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
287 __SEQ_LOCK(.lock = (assoc_lock)) \
290 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
291 #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
292 #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
293 #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
294 #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
296 #define __seqprop_case(s, lockname, prop) \
297 seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
299 #define __seqprop(s, prop) _Generic(*(s), \
300 seqcount_t: __seqprop_##prop((void *)(s)), \
301 __seqprop_case((s), raw_spinlock, prop), \
302 __seqprop_case((s), spinlock, prop), \
303 __seqprop_case((s), rwlock, prop), \
304 __seqprop_case((s), mutex, prop))
306 #define seqprop_ptr(s) __seqprop(s, ptr)
307 #define seqprop_sequence(s) __seqprop(s, sequence)
308 #define seqprop_preemptible(s) __seqprop(s, preemptible)
309 #define seqprop_assert(s) __seqprop(s, assert)
312 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
313 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
315 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
316 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
317 * provided before actually loading any of the variables that are to be
318 * protected in this critical section.
320 * Use carefully, only in critical code, and comment how the barrier is
323 * Return: count to be passed to read_seqcount_retry()
325 #define __read_seqcount_begin(s) \
329 while ((__seq = seqprop_sequence(s)) & 1) \
332 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
337 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
338 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
340 * Return: count to be passed to read_seqcount_retry()
342 #define raw_read_seqcount_begin(s) \
344 unsigned _seq = __read_seqcount_begin(s); \
351 * read_seqcount_begin() - begin a seqcount_t read critical section
352 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
354 * Return: count to be passed to read_seqcount_retry()
356 #define read_seqcount_begin(s) \
358 seqcount_lockdep_reader_access(seqprop_ptr(s)); \
359 raw_read_seqcount_begin(s); \
363 * raw_read_seqcount() - read the raw seqcount_t counter value
364 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
366 * raw_read_seqcount opens a read critical section of the given
367 * seqcount_t, without any lockdep checking, and without checking or
368 * masking the sequence counter LSB. Calling code is responsible for
371 * Return: count to be passed to read_seqcount_retry()
373 #define raw_read_seqcount(s) \
375 unsigned __seq = seqprop_sequence(s); \
378 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
383 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
384 * lockdep and w/o counter stabilization
385 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
387 * raw_seqcount_begin opens a read critical section of the given
388 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
389 * for the count to stabilize. If a writer is active when it begins, it
390 * will fail the read_seqcount_retry() at the end of the read critical
391 * section instead of stabilizing at the beginning of it.
393 * Use this only in special kernel hot paths where the read section is
394 * small and has a high probability of success through other external
395 * means. It will save a single branching instruction.
397 * Return: count to be passed to read_seqcount_retry()
399 #define raw_seqcount_begin(s) \
402 * If the counter is odd, let read_seqcount_retry() fail \
403 * by decrementing the counter. \
405 raw_read_seqcount(s) & ~1; \
409 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
410 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
411 * @start: count, from read_seqcount_begin()
413 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
414 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
415 * provided before actually loading any of the variables that are to be
416 * protected in this critical section.
418 * Use carefully, only in critical code, and comment how the barrier is
421 * Return: true if a read section retry is required, else false
423 #define __read_seqcount_retry(s, start) \
424 do___read_seqcount_retry(seqprop_ptr(s), start)
426 static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
428 kcsan_atomic_next(0);
429 return unlikely(READ_ONCE(s->sequence) != start);
433 * read_seqcount_retry() - end a seqcount_t read critical section
434 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
435 * @start: count, from read_seqcount_begin()
437 * read_seqcount_retry closes the read critical section of given
438 * seqcount_t. If the critical section was invalid, it must be ignored
439 * (and typically retried).
441 * Return: true if a read section retry is required, else false
443 #define read_seqcount_retry(s, start) \
444 do_read_seqcount_retry(seqprop_ptr(s), start)
446 static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
449 return do___read_seqcount_retry(s, start);
453 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
454 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
456 * Context: check write_seqcount_begin()
458 #define raw_write_seqcount_begin(s) \
460 if (seqprop_preemptible(s)) \
463 do_raw_write_seqcount_begin(seqprop_ptr(s)); \
466 static inline void do_raw_write_seqcount_begin(seqcount_t *s)
468 kcsan_nestable_atomic_begin();
474 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
475 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
477 * Context: check write_seqcount_end()
479 #define raw_write_seqcount_end(s) \
481 do_raw_write_seqcount_end(seqprop_ptr(s)); \
483 if (seqprop_preemptible(s)) \
487 static inline void do_raw_write_seqcount_end(seqcount_t *s)
491 kcsan_nestable_atomic_end();
495 * write_seqcount_begin_nested() - start a seqcount_t write section with
496 * custom lockdep nesting level
497 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
498 * @subclass: lockdep nesting level
500 * See Documentation/locking/lockdep-design.rst
501 * Context: check write_seqcount_begin()
503 #define write_seqcount_begin_nested(s, subclass) \
507 if (seqprop_preemptible(s)) \
510 do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
513 static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
515 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
516 do_raw_write_seqcount_begin(s);
520 * write_seqcount_begin() - start a seqcount_t write side critical section
521 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
523 * Context: sequence counter write side sections must be serialized and
524 * non-preemptible. Preemption will be automatically disabled if and
525 * only if the seqcount write serialization lock is associated, and
526 * preemptible. If readers can be invoked from hardirq or softirq
527 * context, interrupts or bottom halves must be respectively disabled.
529 #define write_seqcount_begin(s) \
533 if (seqprop_preemptible(s)) \
536 do_write_seqcount_begin(seqprop_ptr(s)); \
539 static inline void do_write_seqcount_begin(seqcount_t *s)
541 do_write_seqcount_begin_nested(s, 0);
545 * write_seqcount_end() - end a seqcount_t write side critical section
546 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
548 * Context: Preemption will be automatically re-enabled if and only if
549 * the seqcount write serialization lock is associated, and preemptible.
551 #define write_seqcount_end(s) \
553 do_write_seqcount_end(seqprop_ptr(s)); \
555 if (seqprop_preemptible(s)) \
559 static inline void do_write_seqcount_end(seqcount_t *s)
561 seqcount_release(&s->dep_map, _RET_IP_);
562 do_raw_write_seqcount_end(s);
566 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
567 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
569 * This can be used to provide an ordering guarantee instead of the usual
570 * consistency guarantee. It is one wmb cheaper, because it can collapse
571 * the two back-to-back wmb()s.
573 * Note that writes surrounding the barrier should be declared atomic (e.g.
574 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
575 * atomically, avoiding compiler optimizations; b) to document which writes are
576 * meant to propagate to the reader critical section. This is necessary because
577 * neither writes before and after the barrier are enclosed in a seq-writer
578 * critical section that would ensure readers are aware of ongoing writes::
581 * bool X = true, Y = false;
588 * int s = read_seqcount_begin(&seq);
592 * } while (read_seqcount_retry(&seq, s));
599 * WRITE_ONCE(Y, true);
601 * raw_write_seqcount_barrier(seq);
603 * WRITE_ONCE(X, false);
606 #define raw_write_seqcount_barrier(s) \
607 do_raw_write_seqcount_barrier(seqprop_ptr(s))
609 static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
611 kcsan_nestable_atomic_begin();
615 kcsan_nestable_atomic_end();
619 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
621 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
623 * After write_seqcount_invalidate, no seqcount_t read side operations
624 * will complete successfully and see data older than this.
626 #define write_seqcount_invalidate(s) \
627 do_write_seqcount_invalidate(seqprop_ptr(s))
629 static inline void do_write_seqcount_invalidate(seqcount_t *s)
632 kcsan_nestable_atomic_begin();
634 kcsan_nestable_atomic_end();
638 * Latch sequence counters (seqcount_latch_t)
640 * A sequence counter variant where the counter even/odd value is used to
641 * switch between two copies of protected data. This allows the read path,
642 * typically NMIs, to safely interrupt the write side critical section.
644 * As the write sections are fully preemptible, no special handling for
645 * PREEMPT_RT is needed.
652 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
653 * @seq_name: Name of the seqcount_latch_t instance
655 #define SEQCNT_LATCH_ZERO(seq_name) { \
656 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
660 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
661 * @s: Pointer to the seqcount_latch_t instance
663 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
666 * raw_read_seqcount_latch() - pick even/odd latch data copy
667 * @s: Pointer to seqcount_latch_t
669 * See raw_write_seqcount_latch() for details and a full reader/writer
672 * Return: sequence counter raw value. Use the lowest bit as an index for
673 * picking which data copy to read. The full counter must then be checked
674 * with raw_read_seqcount_latch_retry().
676 static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
679 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
680 * Due to the dependent load, a full smp_rmb() is not needed.
682 return READ_ONCE(s->seqcount.sequence);
686 * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
687 * @s: Pointer to seqcount_latch_t
688 * @start: count, from raw_read_seqcount_latch()
690 * Return: true if a read section retry is required, else false
692 static __always_inline int
693 raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
696 return unlikely(READ_ONCE(s->seqcount.sequence) != start);
700 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
701 * @s: Pointer to seqcount_latch_t
703 * The latch technique is a multiversion concurrency control method that allows
704 * queries during non-atomic modifications. If you can guarantee queries never
705 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
706 * -- you most likely do not need this.
708 * Where the traditional RCU/lockless data structures rely on atomic
709 * modifications to ensure queries observe either the old or the new state the
710 * latch allows the same for non-atomic updates. The trade-off is doubling the
711 * cost of storage; we have to maintain two copies of the entire data
714 * Very simply put: we first modify one copy and then the other. This ensures
715 * there is always one copy in a stable state, ready to give us an answer.
717 * The basic form is a data structure like::
719 * struct latch_struct {
720 * seqcount_latch_t seq;
721 * struct data_struct data[2];
724 * Where a modification, which is assumed to be externally serialized, does the
727 * void latch_modify(struct latch_struct *latch, ...)
729 * smp_wmb(); // Ensure that the last data[1] update is visible
730 * latch->seq.sequence++;
731 * smp_wmb(); // Ensure that the seqcount update is visible
733 * modify(latch->data[0], ...);
735 * smp_wmb(); // Ensure that the data[0] update is visible
736 * latch->seq.sequence++;
737 * smp_wmb(); // Ensure that the seqcount update is visible
739 * modify(latch->data[1], ...);
742 * The query will have a form like::
744 * struct entry *latch_query(struct latch_struct *latch, ...)
746 * struct entry *entry;
750 * seq = raw_read_seqcount_latch(&latch->seq);
753 * entry = data_query(latch->data[idx], ...);
755 * // This includes needed smp_rmb()
756 * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
761 * So during the modification, queries are first redirected to data[1]. Then we
762 * modify data[0]. When that is complete, we redirect queries back to data[0]
763 * and we can modify data[1].
767 * The non-requirement for atomic modifications does _NOT_ include
768 * the publishing of new entries in the case where data is a dynamic
771 * An iteration might start in data[0] and get suspended long enough
772 * to miss an entire modification sequence, once it resumes it might
773 * observe the new entry.
777 * When data is a dynamic data structure; one should use regular RCU
778 * patterns to manage the lifetimes of the objects within.
780 static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
782 smp_wmb(); /* prior stores before incrementing "sequence" */
783 s->seqcount.sequence++;
784 smp_wmb(); /* increment "sequence" before following stores */
788 * Sequential locks (seqlock_t)
790 * Sequence counters with an embedded spinlock for writer serialization
791 * and non-preemptibility.
793 * For more info, see:
794 * - Comments on top of seqcount_t
795 * - Documentation/locking/seqlock.rst
799 * Make sure that readers don't starve writers on PREEMPT_RT: use
800 * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
802 seqcount_spinlock_t seqcount;
806 #define __SEQLOCK_UNLOCKED(lockname) \
808 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
809 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
813 * seqlock_init() - dynamic initializer for seqlock_t
814 * @sl: Pointer to the seqlock_t instance
816 #define seqlock_init(sl) \
818 spin_lock_init(&(sl)->lock); \
819 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
823 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
824 * @sl: Name of the seqlock_t instance
826 #define DEFINE_SEQLOCK(sl) \
827 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
830 * read_seqbegin() - start a seqlock_t read side critical section
831 * @sl: Pointer to seqlock_t
833 * Return: count, to be passed to read_seqretry()
835 static inline unsigned read_seqbegin(const seqlock_t *sl)
837 unsigned ret = read_seqcount_begin(&sl->seqcount);
839 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
840 kcsan_flat_atomic_begin();
845 * read_seqretry() - end a seqlock_t read side section
846 * @sl: Pointer to seqlock_t
847 * @start: count, from read_seqbegin()
849 * read_seqretry closes the read side critical section of given seqlock_t.
850 * If the critical section was invalid, it must be ignored (and typically
853 * Return: true if a read section retry is required, else false
855 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
858 * Assume not nested: read_seqretry() may be called multiple times when
859 * completing read critical section.
861 kcsan_flat_atomic_end();
863 return read_seqcount_retry(&sl->seqcount, start);
867 * For all seqlock_t write side functions, use the the internal
868 * do_write_seqcount_begin() instead of generic write_seqcount_begin().
869 * This way, no redundant lockdep_assert_held() checks are added.
873 * write_seqlock() - start a seqlock_t write side critical section
874 * @sl: Pointer to seqlock_t
876 * write_seqlock opens a write side critical section for the given
877 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
878 * that sequential lock. All seqlock_t write side sections are thus
879 * automatically serialized and non-preemptible.
881 * Context: if the seqlock_t read section, or other write side critical
882 * sections, can be invoked from hardirq or softirq contexts, use the
883 * _irqsave or _bh variants of this function instead.
885 static inline void write_seqlock(seqlock_t *sl)
887 spin_lock(&sl->lock);
888 do_write_seqcount_begin(&sl->seqcount.seqcount);
892 * write_sequnlock() - end a seqlock_t write side critical section
893 * @sl: Pointer to seqlock_t
895 * write_sequnlock closes the (serialized and non-preemptible) write side
896 * critical section of given seqlock_t.
898 static inline void write_sequnlock(seqlock_t *sl)
900 do_write_seqcount_end(&sl->seqcount.seqcount);
901 spin_unlock(&sl->lock);
905 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
906 * @sl: Pointer to seqlock_t
908 * _bh variant of write_seqlock(). Use only if the read side section, or
909 * other write side sections, can be invoked from softirq contexts.
911 static inline void write_seqlock_bh(seqlock_t *sl)
913 spin_lock_bh(&sl->lock);
914 do_write_seqcount_begin(&sl->seqcount.seqcount);
918 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
919 * @sl: Pointer to seqlock_t
921 * write_sequnlock_bh closes the serialized, non-preemptible, and
922 * softirqs-disabled, seqlock_t write side critical section opened with
923 * write_seqlock_bh().
925 static inline void write_sequnlock_bh(seqlock_t *sl)
927 do_write_seqcount_end(&sl->seqcount.seqcount);
928 spin_unlock_bh(&sl->lock);
932 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
933 * @sl: Pointer to seqlock_t
935 * _irq variant of write_seqlock(). Use only if the read side section, or
936 * other write sections, can be invoked from hardirq contexts.
938 static inline void write_seqlock_irq(seqlock_t *sl)
940 spin_lock_irq(&sl->lock);
941 do_write_seqcount_begin(&sl->seqcount.seqcount);
945 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
946 * @sl: Pointer to seqlock_t
948 * write_sequnlock_irq closes the serialized and non-interruptible
949 * seqlock_t write side section opened with write_seqlock_irq().
951 static inline void write_sequnlock_irq(seqlock_t *sl)
953 do_write_seqcount_end(&sl->seqcount.seqcount);
954 spin_unlock_irq(&sl->lock);
957 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
961 spin_lock_irqsave(&sl->lock, flags);
962 do_write_seqcount_begin(&sl->seqcount.seqcount);
967 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
969 * @lock: Pointer to seqlock_t
970 * @flags: Stack-allocated storage for saving caller's local interrupt
971 * state, to be passed to write_sequnlock_irqrestore().
973 * _irqsave variant of write_seqlock(). Use it only if the read side
974 * section, or other write sections, can be invoked from hardirq context.
976 #define write_seqlock_irqsave(lock, flags) \
977 do { flags = __write_seqlock_irqsave(lock); } while (0)
980 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
982 * @sl: Pointer to seqlock_t
983 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
985 * write_sequnlock_irqrestore closes the serialized and non-interruptible
986 * seqlock_t write section previously opened with write_seqlock_irqsave().
989 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
991 do_write_seqcount_end(&sl->seqcount.seqcount);
992 spin_unlock_irqrestore(&sl->lock, flags);
996 * read_seqlock_excl() - begin a seqlock_t locking reader section
997 * @sl: Pointer to seqlock_t
999 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
1000 * locking reader exclusively locks out *both* other writers *and* other
1001 * locking readers, but it does not update the embedded sequence number.
1003 * Locking readers act like a normal spin_lock()/spin_unlock().
1005 * Context: if the seqlock_t write section, *or other read sections*, can
1006 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1007 * variant of this function instead.
1009 * The opened read section must be closed with read_sequnlock_excl().
1011 static inline void read_seqlock_excl(seqlock_t *sl)
1013 spin_lock(&sl->lock);
1017 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1018 * @sl: Pointer to seqlock_t
1020 static inline void read_sequnlock_excl(seqlock_t *sl)
1022 spin_unlock(&sl->lock);
1026 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1028 * @sl: Pointer to seqlock_t
1030 * _bh variant of read_seqlock_excl(). Use this variant only if the
1031 * seqlock_t write side section, *or other read sections*, can be invoked
1032 * from softirq contexts.
1034 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1036 spin_lock_bh(&sl->lock);
1040 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1042 * @sl: Pointer to seqlock_t
1044 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1046 spin_unlock_bh(&sl->lock);
1050 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1052 * @sl: Pointer to seqlock_t
1054 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1055 * write side section, *or other read sections*, can be invoked from a
1058 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1060 spin_lock_irq(&sl->lock);
1064 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1065 * locking reader section
1066 * @sl: Pointer to seqlock_t
1068 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1070 spin_unlock_irq(&sl->lock);
1073 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1075 unsigned long flags;
1077 spin_lock_irqsave(&sl->lock, flags);
1082 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1083 * locking reader section
1084 * @lock: Pointer to seqlock_t
1085 * @flags: Stack-allocated storage for saving caller's local interrupt
1086 * state, to be passed to read_sequnlock_excl_irqrestore().
1088 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1089 * write side section, *or other read sections*, can be invoked from a
1092 #define read_seqlock_excl_irqsave(lock, flags) \
1093 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1096 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1097 * locking reader section
1098 * @sl: Pointer to seqlock_t
1099 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1102 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1104 spin_unlock_irqrestore(&sl->lock, flags);
1108 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1109 * @lock: Pointer to seqlock_t
1110 * @seq : Marker and return parameter. If the passed value is even, the
1111 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1112 * If the passed value is odd, the reader will become a *locking* reader
1113 * as in read_seqlock_excl(). In the first call to this function, the
1114 * caller *must* initialize and pass an even value to @seq; this way, a
1115 * lockless read can be optimistically tried first.
1117 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1118 * lockless seqlock_t read section first. If an odd counter is found, the
1119 * lockless read trial has failed, and the next read iteration transforms
1120 * itself into a full seqlock_t locking reader.
1122 * This is typically used to avoid seqlock_t lockless readers starvation
1123 * (too much retry loops) in the case of a sharp spike in write side
1126 * Context: if the seqlock_t write section, *or other read sections*, can
1127 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1128 * variant of this function instead.
1130 * Check Documentation/locking/seqlock.rst for template example code.
1132 * Return: the encountered sequence counter value, through the @seq
1133 * parameter, which is overloaded as a return parameter. This returned
1134 * value must be checked with need_seqretry(). If the read section need to
1135 * be retried, this returned value must also be passed as the @seq
1136 * parameter of the next read_seqbegin_or_lock() iteration.
1138 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1140 if (!(*seq & 1)) /* Even */
1141 *seq = read_seqbegin(lock);
1143 read_seqlock_excl(lock);
1147 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1148 * @lock: Pointer to seqlock_t
1149 * @seq: sequence count, from read_seqbegin_or_lock()
1151 * Return: true if a read section retry is required, false otherwise
1153 static inline int need_seqretry(seqlock_t *lock, int seq)
1155 return !(seq & 1) && read_seqretry(lock, seq);
1159 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1160 * @lock: Pointer to seqlock_t
1161 * @seq: count, from read_seqbegin_or_lock()
1163 * done_seqretry finishes the seqlock_t read side critical section started
1164 * with read_seqbegin_or_lock() and validated by need_seqretry().
1166 static inline void done_seqretry(seqlock_t *lock, int seq)
1169 read_sequnlock_excl(lock);
1173 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1174 * a non-interruptible locking reader
1175 * @lock: Pointer to seqlock_t
1176 * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
1178 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1179 * the seqlock_t write section, *or other read sections*, can be invoked
1180 * from hardirq context.
1182 * Note: Interrupts will be disabled only for "locking reader" mode.
1186 * 1. The saved local interrupts state in case of a locking reader, to
1187 * be passed to done_seqretry_irqrestore().
1189 * 2. The encountered sequence counter value, returned through @seq
1190 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1192 static inline unsigned long
1193 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1195 unsigned long flags = 0;
1197 if (!(*seq & 1)) /* Even */
1198 *seq = read_seqbegin(lock);
1200 read_seqlock_excl_irqsave(lock, flags);
1206 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1207 * non-interruptible locking reader section
1208 * @lock: Pointer to seqlock_t
1209 * @seq: Count, from read_seqbegin_or_lock_irqsave()
1210 * @flags: Caller's saved local interrupt state in case of a locking
1211 * reader, also from read_seqbegin_or_lock_irqsave()
1213 * This is the _irqrestore variant of done_seqretry(). The read section
1214 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1215 * by need_seqretry().
1218 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1221 read_sequnlock_excl_irqrestore(lock, flags);
1223 #endif /* __LINUX_SEQLOCK_H */