Merge branch 'for-3.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[platform/kernel/linux-exynos.git] / lib / lockref.c
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3 #include <linux/mutex.h>
4
5 #if USE_CMPXCHG_LOCKREF
6
7 /*
8  * Allow weakly-ordered memory architectures to provide barrier-less
9  * cmpxchg semantics for lockref updates.
10  */
11 #ifndef cmpxchg64_relaxed
12 # define cmpxchg64_relaxed cmpxchg64
13 #endif
14
15 /*
16  * Note that the "cmpxchg()" reloads the "old" value for the
17  * failure case.
18  */
19 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                                        \
20         struct lockref old;                                                     \
21         BUILD_BUG_ON(sizeof(old) != 8);                                         \
22         old.lock_count = ACCESS_ONCE(lockref->lock_count);                      \
23         while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
24                 struct lockref new = old, prev = old;                           \
25                 CODE                                                            \
26                 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
27                                                    old.lock_count,              \
28                                                    new.lock_count);             \
29                 if (likely(old.lock_count == prev.lock_count)) {                \
30                         SUCCESS;                                                \
31                 }                                                               \
32                 arch_mutex_cpu_relax();                                         \
33         }                                                                       \
34 } while (0)
35
36 #else
37
38 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
39
40 #endif
41
42 /**
43  * lockref_get - Increments reference count unconditionally
44  * @lockref: pointer to lockref structure
45  *
46  * This operation is only valid if you already hold a reference
47  * to the object, so you know the count cannot be zero.
48  */
49 void lockref_get(struct lockref *lockref)
50 {
51         CMPXCHG_LOOP(
52                 new.count++;
53         ,
54                 return;
55         );
56
57         spin_lock(&lockref->lock);
58         lockref->count++;
59         spin_unlock(&lockref->lock);
60 }
61 EXPORT_SYMBOL(lockref_get);
62
63 /**
64  * lockref_get_not_zero - Increments count unless the count is 0
65  * @lockref: pointer to lockref structure
66  * Return: 1 if count updated successfully or 0 if count was zero
67  */
68 int lockref_get_not_zero(struct lockref *lockref)
69 {
70         int retval;
71
72         CMPXCHG_LOOP(
73                 new.count++;
74                 if (!old.count)
75                         return 0;
76         ,
77                 return 1;
78         );
79
80         spin_lock(&lockref->lock);
81         retval = 0;
82         if (lockref->count) {
83                 lockref->count++;
84                 retval = 1;
85         }
86         spin_unlock(&lockref->lock);
87         return retval;
88 }
89 EXPORT_SYMBOL(lockref_get_not_zero);
90
91 /**
92  * lockref_get_or_lock - Increments count unless the count is 0
93  * @lockref: pointer to lockref structure
94  * Return: 1 if count updated successfully or 0 if count was zero
95  * and we got the lock instead.
96  */
97 int lockref_get_or_lock(struct lockref *lockref)
98 {
99         CMPXCHG_LOOP(
100                 new.count++;
101                 if (!old.count)
102                         break;
103         ,
104                 return 1;
105         );
106
107         spin_lock(&lockref->lock);
108         if (!lockref->count)
109                 return 0;
110         lockref->count++;
111         spin_unlock(&lockref->lock);
112         return 1;
113 }
114 EXPORT_SYMBOL(lockref_get_or_lock);
115
116 /**
117  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118  * @lockref: pointer to lockref structure
119  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
120  */
121 int lockref_put_or_lock(struct lockref *lockref)
122 {
123         CMPXCHG_LOOP(
124                 new.count--;
125                 if (old.count <= 1)
126                         break;
127         ,
128                 return 1;
129         );
130
131         spin_lock(&lockref->lock);
132         if (lockref->count <= 1)
133                 return 0;
134         lockref->count--;
135         spin_unlock(&lockref->lock);
136         return 1;
137 }
138 EXPORT_SYMBOL(lockref_put_or_lock);
139
140 /**
141  * lockref_mark_dead - mark lockref dead
142  * @lockref: pointer to lockref structure
143  */
144 void lockref_mark_dead(struct lockref *lockref)
145 {
146         assert_spin_locked(&lockref->lock);
147         lockref->count = -128;
148 }
149 EXPORT_SYMBOL(lockref_mark_dead);
150
151 /**
152  * lockref_get_not_dead - Increments count unless the ref is dead
153  * @lockref: pointer to lockref structure
154  * Return: 1 if count updated successfully or 0 if lockref was dead
155  */
156 int lockref_get_not_dead(struct lockref *lockref)
157 {
158         int retval;
159
160         CMPXCHG_LOOP(
161                 new.count++;
162                 if ((int)old.count < 0)
163                         return 0;
164         ,
165                 return 1;
166         );
167
168         spin_lock(&lockref->lock);
169         retval = 0;
170         if ((int) lockref->count >= 0) {
171                 lockref->count++;
172                 retval = 1;
173         }
174         spin_unlock(&lockref->lock);
175         return retval;
176 }
177 EXPORT_SYMBOL(lockref_get_not_dead);