Merge branch 'fix/hda' into topic/hda
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / ia64 / include / asm / spinlock.h
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3
4 /*
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15
16 #include <asm/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
19
20 #define __raw_spin_lock_init(x)                 ((x)->lock = 0)
21
22 /*
23  * Ticket locks are conceptually two parts, one indicating the current head of
24  * the queue, and the other indicating the current tail. The lock is acquired
25  * by atomically noting the tail and incrementing it by one (thus adding
26  * ourself to the queue and noting our position), then waiting until the head
27  * becomes equal to the the initial value of the tail.
28  *
29  *   63                     32  31                      0
30  *  +----------------------------------------------------+
31  *  |  next_ticket_number      |     now_serving         |
32  *  +----------------------------------------------------+
33  */
34
35 #define TICKET_SHIFT    32
36
37 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
38 {
39         int     *p = (int *)&lock->lock, turn, now_serving;
40
41         now_serving = *p;
42         turn = ia64_fetchadd(1, p+1, acq);
43
44         if (turn == now_serving)
45                 return;
46
47         do {
48                 cpu_relax();
49         } while (ACCESS_ONCE(*p) != turn);
50 }
51
52 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
53 {
54         long tmp = ACCESS_ONCE(lock->lock), try;
55
56         if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
57                 try = tmp + (1L << TICKET_SHIFT);
58
59                 return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
60         }
61         return 0;
62 }
63
64 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
65 {
66         int     *p = (int *)&lock->lock;
67
68         (void)ia64_fetchadd(1, p, rel);
69 }
70
71 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
72 {
73         long tmp = ACCESS_ONCE(lock->lock);
74
75         return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
76 }
77
78 static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
79 {
80         long tmp = ACCESS_ONCE(lock->lock);
81
82         return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
83 }
84
85 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
86 {
87         return __ticket_spin_is_locked(lock);
88 }
89
90 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
91 {
92         return __ticket_spin_is_contended(lock);
93 }
94 #define __raw_spin_is_contended __raw_spin_is_contended
95
96 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
97 {
98         __ticket_spin_lock(lock);
99 }
100
101 static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
102 {
103         return __ticket_spin_trylock(lock);
104 }
105
106 static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
107 {
108         __ticket_spin_unlock(lock);
109 }
110
111 static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
112                                                   unsigned long flags)
113 {
114         __raw_spin_lock(lock);
115 }
116
117 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
118 {
119         while (__raw_spin_is_locked(lock))
120                 cpu_relax();
121 }
122
123 #define __raw_read_can_lock(rw)         (*(volatile int *)(rw) >= 0)
124 #define __raw_write_can_lock(rw)        (*(volatile int *)(rw) == 0)
125
126 #ifdef ASM_SUPPORTED
127
128 static __always_inline void
129 __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
130 {
131         __asm__ __volatile__ (
132                 "tbit.nz p6, p0 = %1,%2\n"
133                 "br.few 3f\n"
134                 "1:\n"
135                 "fetchadd4.rel r2 = [%0], -1;;\n"
136                 "(p6) ssm psr.i\n"
137                 "2:\n"
138                 "hint @pause\n"
139                 "ld4 r2 = [%0];;\n"
140                 "cmp4.lt p7,p0 = r2, r0\n"
141                 "(p7) br.cond.spnt.few 2b\n"
142                 "(p6) rsm psr.i\n"
143                 ";;\n"
144                 "3:\n"
145                 "fetchadd4.acq r2 = [%0], 1;;\n"
146                 "cmp4.lt p7,p0 = r2, r0\n"
147                 "(p7) br.cond.spnt.few 1b\n"
148                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
149                 : "p6", "p7", "r2", "memory");
150 }
151
152 #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
153
154 #else /* !ASM_SUPPORTED */
155
156 #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
157
158 #define __raw_read_lock(rw)                                                             \
159 do {                                                                                    \
160         raw_rwlock_t *__read_lock_ptr = (rw);                                           \
161                                                                                         \
162         while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {          \
163                 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                        \
164                 while (*(volatile int *)__read_lock_ptr < 0)                            \
165                         cpu_relax();                                                    \
166         }                                                                               \
167 } while (0)
168
169 #endif /* !ASM_SUPPORTED */
170
171 #define __raw_read_unlock(rw)                                   \
172 do {                                                            \
173         raw_rwlock_t *__read_lock_ptr = (rw);                   \
174         ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
175 } while (0)
176
177 #ifdef ASM_SUPPORTED
178
179 static __always_inline void
180 __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
181 {
182         __asm__ __volatile__ (
183                 "tbit.nz p6, p0 = %1, %2\n"
184                 "mov ar.ccv = r0\n"
185                 "dep r29 = -1, r0, 31, 1\n"
186                 "br.few 3f;;\n"
187                 "1:\n"
188                 "(p6) ssm psr.i\n"
189                 "2:\n"
190                 "hint @pause\n"
191                 "ld4 r2 = [%0];;\n"
192                 "cmp4.eq p0,p7 = r0, r2\n"
193                 "(p7) br.cond.spnt.few 2b\n"
194                 "(p6) rsm psr.i\n"
195                 ";;\n"
196                 "3:\n"
197                 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
198                 "cmp4.eq p0,p7 = r0, r2\n"
199                 "(p7) br.cond.spnt.few 1b;;\n"
200                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
201                 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
202 }
203
204 #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
205
206 #define __raw_write_trylock(rw)                                                 \
207 ({                                                                              \
208         register long result;                                                   \
209                                                                                 \
210         __asm__ __volatile__ (                                                  \
211                 "mov ar.ccv = r0\n"                                             \
212                 "dep r29 = -1, r0, 31, 1;;\n"                                   \
213                 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         \
214                 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          \
215         (result == 0);                                                          \
216 })
217
218 static inline void __raw_write_unlock(raw_rwlock_t *x)
219 {
220         u8 *y = (u8 *)x;
221         barrier();
222         asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
223 }
224
225 #else /* !ASM_SUPPORTED */
226
227 #define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
228
229 #define __raw_write_lock(l)                                                             \
230 ({                                                                                      \
231         __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
232         __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
233         do {                                                                            \
234                 while (*ia64_write_lock_ptr)                                            \
235                         ia64_barrier();                                                 \
236                 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);     \
237         } while (ia64_val);                                                             \
238 })
239
240 #define __raw_write_trylock(rw)                                         \
241 ({                                                                      \
242         __u64 ia64_val;                                                 \
243         __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
244         ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
245         (ia64_val == 0);                                                \
246 })
247
248 static inline void __raw_write_unlock(raw_rwlock_t *x)
249 {
250         barrier();
251         x->write_lock = 0;
252 }
253
254 #endif /* !ASM_SUPPORTED */
255
256 static inline int __raw_read_trylock(raw_rwlock_t *x)
257 {
258         union {
259                 raw_rwlock_t lock;
260                 __u32 word;
261         } old, new;
262         old.lock = new.lock = *x;
263         old.lock.write_lock = new.lock.write_lock = 0;
264         ++new.lock.read_counter;
265         return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
266 }
267
268 #define _raw_spin_relax(lock)   cpu_relax()
269 #define _raw_read_relax(lock)   cpu_relax()
270 #define _raw_write_relax(lock)  cpu_relax()
271
272 #endif /*  _ASM_IA64_SPINLOCK_H */