1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. Readers never
7 * block but they may have to retry if a writer is in
8 * progress. Writers do not wait for readers.
10 * This is not as cache friendly as brlock. Also, this will not work
11 * for data that contains pointers, because any writer could
12 * invalidate a pointer that a reader was following.
14 * Expected reader usage:
16 * seq = read_seqbegin(&foo);
18 * } while (read_seqretry(&foo, seq));
21 * On non-SMP the spin locks disappear but the writer still needs
22 * to increment the sequence variables because an interrupt routine could
23 * change the state of the data.
25 * Based on x86_64 vsyscall gettimeofday
26 * by Keith Owens and Andrea Arcangeli
29 #include <linux/spinlock.h>
30 #include <linux/preempt.h>
31 #include <asm/processor.h>
39 * These macros triggered gcc-3.x compile-time problems. We think these are
40 * OK now. Be cautious.
42 #define __SEQLOCK_UNLOCKED(lockname) \
43 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
45 #define seqlock_init(x) \
48 spin_lock_init(&(x)->lock); \
51 #define DEFINE_SEQLOCK(x) \
52 seqlock_t x = __SEQLOCK_UNLOCKED(x)
54 /* Lock out other writers and update the count.
55 * Acts like a normal spin_lock/unlock.
56 * Don't need preempt_disable() because that is in the spin_lock already.
58 static inline void write_seqlock(seqlock_t *sl)
65 static inline void write_sequnlock(seqlock_t *sl)
69 spin_unlock(&sl->lock);
72 static inline int write_tryseqlock(seqlock_t *sl)
74 int ret = spin_trylock(&sl->lock);
83 /* Start of read calculation -- fetch last complete writer token */
84 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
89 ret = ACCESS_ONCE(sl->sequence);
90 if (unlikely(ret & 1)) {
100 * Test if reader processed invalid data.
102 * If sequence value changed then writer changed data while in section.
104 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
108 return unlikely(sl->sequence != start);
113 * Version using sequence counter only.
114 * This can be used when code has its own mutex protecting the
115 * updating starting before the write_seqcountbeqin() and ending
116 * after the write_seqcount_end().
119 typedef struct seqcount {
123 #define SEQCNT_ZERO { 0 }
124 #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
127 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
128 * @s: pointer to seqcount_t
129 * Returns: count to be passed to read_seqcount_retry
131 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
132 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
133 * provided before actually loading any of the variables that are to be
134 * protected in this critical section.
136 * Use carefully, only in critical code, and comment how the barrier is
139 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
144 ret = ACCESS_ONCE(s->sequence);
145 if (unlikely(ret & 1)) {
153 * read_seqcount_begin - begin a seq-read critical section
154 * @s: pointer to seqcount_t
155 * Returns: count to be passed to read_seqcount_retry
157 * read_seqcount_begin opens a read critical section of the given seqcount.
158 * Validity of the critical section is tested by checking read_seqcount_retry
161 static inline unsigned read_seqcount_begin(const seqcount_t *s)
163 unsigned ret = __read_seqcount_begin(s);
169 * raw_seqcount_begin - begin a seq-read critical section
170 * @s: pointer to seqcount_t
171 * Returns: count to be passed to read_seqcount_retry
173 * raw_seqcount_begin opens a read critical section of the given seqcount.
174 * Validity of the critical section is tested by checking read_seqcount_retry
177 * Unlike read_seqcount_begin(), this function will not wait for the count
178 * to stabilize. If a writer is active when we begin, we will fail the
179 * read_seqcount_retry() instead of stabilizing at the beginning of the
182 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
184 unsigned ret = ACCESS_ONCE(s->sequence);
190 * __read_seqcount_retry - end a seq-read critical section (without barrier)
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
195 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197 * provided before actually loading any of the variables that are to be
198 * protected in this critical section.
200 * Use carefully, only in critical code, and comment how the barrier is
203 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
205 return unlikely(s->sequence != start);
209 * read_seqcount_retry - end a seq-read critical section
210 * @s: pointer to seqcount_t
211 * @start: count, from read_seqcount_begin
212 * Returns: 1 if retry is required, else 0
214 * read_seqcount_retry closes a read critical section of the given seqcount.
215 * If the critical section was invalid, it must be ignored (and typically
218 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
222 return __read_seqcount_retry(s, start);
227 * Sequence counter only version assumes that callers are using their
230 static inline void write_seqcount_begin(seqcount_t *s)
236 static inline void write_seqcount_end(seqcount_t *s)
243 * write_seqcount_barrier - invalidate in-progress read-side seq operations
244 * @s: pointer to seqcount_t
246 * After write_seqcount_barrier, no read-side seq operations will complete
247 * successfully and see data older than this.
249 static inline void write_seqcount_barrier(seqcount_t *s)
256 * Possible sw/hw IRQ protected versions of the interfaces.
258 #define write_seqlock_irqsave(lock, flags) \
259 do { local_irq_save(flags); write_seqlock(lock); } while (0)
260 #define write_seqlock_irq(lock) \
261 do { local_irq_disable(); write_seqlock(lock); } while (0)
262 #define write_seqlock_bh(lock) \
263 do { local_bh_disable(); write_seqlock(lock); } while (0)
265 #define write_sequnlock_irqrestore(lock, flags) \
266 do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
267 #define write_sequnlock_irq(lock) \
268 do { write_sequnlock(lock); local_irq_enable(); } while(0)
269 #define write_sequnlock_bh(lock) \
270 do { write_sequnlock(lock); local_bh_enable(); } while(0)
272 #define read_seqbegin_irqsave(lock, flags) \
273 ({ local_irq_save(flags); read_seqbegin(lock); })
275 #define read_seqretry_irqrestore(lock, iv, flags) \
277 int ret = read_seqretry(lock, iv); \
278 local_irq_restore(flags); \
282 #endif /* __LINUX_SEQLOCK_H */