#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
-static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
- int ret = 1;
+ bool ret = true;
preempt_disable();
/*
}
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
-int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
+bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
/*
* Due to having preemption disabled the decrement happens on
* release in percpu_up_write().
*/
if (likely(!smp_load_acquire(&sem->readers_block)))
- return 1;
+ return true;
/*
* Per the above comment; we still have preemption disabled and
__percpu_up_read(sem);
if (try)
- return 0;
+ return false;
/*
* We either call schedule() in the wait, or we'll fall through
__up_read(&sem->rw_sem);
preempt_disable();
- return 1;
+ return true;
}
EXPORT_SYMBOL_GPL(__percpu_down_read);