1 #include <linux/export.h>
2 #include <linux/lockref.h>
4 #ifdef CONFIG_CMPXCHG_LOCKREF
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
10 #ifndef cmpxchg64_relaxed
11 # define cmpxchg64_relaxed cmpxchg64
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
18 #ifndef arch_mutex_cpu_relax
19 # define arch_mutex_cpu_relax() cpu_relax()
23 * Note that the "cmpxchg()" reloads the "old" value for the
26 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
28 BUILD_BUG_ON(sizeof(old) != 8); \
29 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
31 struct lockref new = old, prev = old; \
33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
36 if (likely(old.lock_count == prev.lock_count)) { \
39 arch_mutex_cpu_relax(); \
45 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
50 * lockref_get - Increments reference count unconditionally
51 * @lockref: pointer to lockref structure
53 * This operation is only valid if you already hold a reference
54 * to the object, so you know the count cannot be zero.
56 void lockref_get(struct lockref *lockref)
64 spin_lock(&lockref->lock);
66 spin_unlock(&lockref->lock);
68 EXPORT_SYMBOL(lockref_get);
71 * lockref_get_not_zero - Increments count unless the count is 0
72 * @lockref: pointer to lockref structure
73 * Return: 1 if count updated successfully or 0 if count was zero
75 int lockref_get_not_zero(struct lockref *lockref)
87 spin_lock(&lockref->lock);
93 spin_unlock(&lockref->lock);
96 EXPORT_SYMBOL(lockref_get_not_zero);
99 * lockref_get_or_lock - Increments count unless the count is 0
100 * @lockref: pointer to lockref structure
101 * Return: 1 if count updated successfully or 0 if count was zero
102 * and we got the lock instead.
104 int lockref_get_or_lock(struct lockref *lockref)
114 spin_lock(&lockref->lock);
118 spin_unlock(&lockref->lock);
121 EXPORT_SYMBOL(lockref_get_or_lock);
124 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
125 * @lockref: pointer to lockref structure
126 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
128 int lockref_put_or_lock(struct lockref *lockref)
138 spin_lock(&lockref->lock);
139 if (lockref->count <= 1)
142 spin_unlock(&lockref->lock);
145 EXPORT_SYMBOL(lockref_put_or_lock);
148 * lockref_mark_dead - mark lockref dead
149 * @lockref: pointer to lockref structure
151 void lockref_mark_dead(struct lockref *lockref)
153 assert_spin_locked(&lockref->lock);
154 lockref->count = -128;
158 * lockref_get_not_dead - Increments count unless the ref is dead
159 * @lockref: pointer to lockref structure
160 * Return: 1 if count updated successfully or 0 if lockref was dead
162 int lockref_get_not_dead(struct lockref *lockref)
168 if ((int)old.count < 0)
174 spin_lock(&lockref->lock);
176 if ((int) lockref->count >= 0) {
180 spin_unlock(&lockref->lock);
183 EXPORT_SYMBOL(lockref_get_not_dead);