mm/mempolicy: rename check_*range to queue_pages_*range
[platform/adaptation/renesas_rcar/renesas_kernel.git] / lib / lockref.c
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3
4 #ifdef CONFIG_CMPXCHG_LOCKREF
5
6 /*
7  * Note that the "cmpxchg()" reloads the "old" value for the
8  * failure case.
9  */
10 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                                        \
11         struct lockref old;                                                     \
12         BUILD_BUG_ON(sizeof(old) != 8);                                         \
13         old.lock_count = ACCESS_ONCE(lockref->lock_count);                      \
14         while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
15                 struct lockref new = old, prev = old;                           \
16                 CODE                                                            \
17                 old.lock_count = cmpxchg(&lockref->lock_count,                  \
18                                          old.lock_count, new.lock_count);       \
19                 if (likely(old.lock_count == prev.lock_count)) {                \
20                         SUCCESS;                                                \
21                 }                                                               \
22                 cpu_relax();                                                    \
23         }                                                                       \
24 } while (0)
25
26 #else
27
28 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
29
30 #endif
31
32 /**
33  * lockref_get - Increments reference count unconditionally
34  * @lockref: pointer to lockref structure
35  *
36  * This operation is only valid if you already hold a reference
37  * to the object, so you know the count cannot be zero.
38  */
39 void lockref_get(struct lockref *lockref)
40 {
41         CMPXCHG_LOOP(
42                 new.count++;
43         ,
44                 return;
45         );
46
47         spin_lock(&lockref->lock);
48         lockref->count++;
49         spin_unlock(&lockref->lock);
50 }
51 EXPORT_SYMBOL(lockref_get);
52
53 /**
54  * lockref_get_not_zero - Increments count unless the count is 0
55  * @lockref: pointer to lockref structure
56  * Return: 1 if count updated successfully or 0 if count was zero
57  */
58 int lockref_get_not_zero(struct lockref *lockref)
59 {
60         int retval;
61
62         CMPXCHG_LOOP(
63                 new.count++;
64                 if (!old.count)
65                         return 0;
66         ,
67                 return 1;
68         );
69
70         spin_lock(&lockref->lock);
71         retval = 0;
72         if (lockref->count) {
73                 lockref->count++;
74                 retval = 1;
75         }
76         spin_unlock(&lockref->lock);
77         return retval;
78 }
79 EXPORT_SYMBOL(lockref_get_not_zero);
80
81 /**
82  * lockref_get_or_lock - Increments count unless the count is 0
83  * @lockref: pointer to lockref structure
84  * Return: 1 if count updated successfully or 0 if count was zero
85  * and we got the lock instead.
86  */
87 int lockref_get_or_lock(struct lockref *lockref)
88 {
89         CMPXCHG_LOOP(
90                 new.count++;
91                 if (!old.count)
92                         break;
93         ,
94                 return 1;
95         );
96
97         spin_lock(&lockref->lock);
98         if (!lockref->count)
99                 return 0;
100         lockref->count++;
101         spin_unlock(&lockref->lock);
102         return 1;
103 }
104 EXPORT_SYMBOL(lockref_get_or_lock);
105
106 /**
107  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
108  * @lockref: pointer to lockref structure
109  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
110  */
111 int lockref_put_or_lock(struct lockref *lockref)
112 {
113         CMPXCHG_LOOP(
114                 new.count--;
115                 if (old.count <= 1)
116                         break;
117         ,
118                 return 1;
119         );
120
121         spin_lock(&lockref->lock);
122         if (lockref->count <= 1)
123                 return 0;
124         lockref->count--;
125         spin_unlock(&lockref->lock);
126         return 1;
127 }
128 EXPORT_SYMBOL(lockref_put_or_lock);
129
130 /**
131  * lockref_mark_dead - mark lockref dead
132  * @lockref: pointer to lockref structure
133  */
134 void lockref_mark_dead(struct lockref *lockref)
135 {
136         assert_spin_locked(&lockref->lock);
137         lockref->count = -128;
138 }
139
140 /**
141  * lockref_get_not_dead - Increments count unless the ref is dead
142  * @lockref: pointer to lockref structure
143  * Return: 1 if count updated successfully or 0 if lockref was dead
144  */
145 int lockref_get_not_dead(struct lockref *lockref)
146 {
147         int retval;
148
149         CMPXCHG_LOOP(
150                 new.count++;
151                 if ((int)old.count < 0)
152                         return 0;
153         ,
154                 return 1;
155         );
156
157         spin_lock(&lockref->lock);
158         retval = 0;
159         if ((int) lockref->count >= 0) {
160                 lockref->count++;
161                 retval = 1;
162         }
163         spin_unlock(&lockref->lock);
164         return retval;
165 }
166 EXPORT_SYMBOL(lockref_get_not_dead);