1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_LOCKREF_H
3 #define __LINUX_LOCKREF_H
6 * Locked reference counts.
8 * These are different from just plain atomic refcounts in that they
9 * are atomic with respect to the spinlock that goes with them. In
10 * particular, there can be implementations that don't actually get
11 * the spinlock for the common decrement/increment operations, but they
12 * still have to check that the operation is done semantically as if
13 * the spinlock had been taken (using a cmpxchg operation that covers
14 * both the lock and the count word, or using memory transactions, for
18 #include <linux/spinlock.h>
19 #include <generated/bounds.h>
21 #define USE_CMPXCHG_LOCKREF \
22 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
23 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
27 #if USE_CMPXCHG_LOCKREF
28 aligned_u64 lock_count;
37 extern void lockref_get(struct lockref *);
38 extern int lockref_put_return(struct lockref *);
39 extern int lockref_get_not_zero(struct lockref *);
40 extern int lockref_put_not_zero(struct lockref *);
41 extern int lockref_put_or_lock(struct lockref *);
43 extern void lockref_mark_dead(struct lockref *);
44 extern int lockref_get_not_dead(struct lockref *);
46 /* Must be called under spinlock for reliable results */
47 static inline bool __lockref_is_dead(const struct lockref *l)
49 return ((int)l->count < 0);
52 #endif /* __LINUX_LOCKREF_H */