1 #ifndef _LINUX_PERCPU_RWSEM_H
2 #define _LINUX_PERCPU_RWSEM_H
4 #include <linux/mutex.h>
5 #include <linux/percpu.h>
6 #include <linux/rcupdate.h>
7 #include <linux/delay.h>
9 struct percpu_rw_semaphore {
10 unsigned __percpu *counters;
15 static inline void percpu_down_read(struct percpu_rw_semaphore *p)
18 if (unlikely(p->locked)) {
21 this_cpu_inc(*p->counters);
22 mutex_unlock(&p->mtx);
25 this_cpu_inc(*p->counters);
29 static inline void percpu_up_read(struct percpu_rw_semaphore *p)
32 * On X86, write operation in this_cpu_dec serves as a memory unlock
33 * barrier (i.e. memory accesses may be moved before the write, but
34 * no memory accesses are moved past the write).
35 * On other architectures this may not be the case, so we need smp_mb()
38 #if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
43 this_cpu_dec(*p->counters);
46 static inline unsigned __percpu_count(unsigned __percpu *counters)
51 for_each_possible_cpu(cpu)
52 total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
57 static inline void percpu_down_write(struct percpu_rw_semaphore *p)
62 while (__percpu_count(p->counters))
64 smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
67 static inline void percpu_up_write(struct percpu_rw_semaphore *p)
70 mutex_unlock(&p->mtx);
73 static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
75 p->counters = alloc_percpu(unsigned);
76 if (unlikely(!p->counters))
83 static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
85 free_percpu(p->counters);
86 p->counters = NULL; /* catch use after free bugs */