1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/jump_label.h>
5 #include <linux/atomic.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 #include <asm/bitops.h>
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
15 * Simple spin lock operations. There are two variants, one clears IRQ's
16 * on the local processor, one does not.
18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
20 * (the type definitions are in asm/spinlock_types.h)
24 # define LOCK_PTR_REG "a"
26 # define LOCK_PTR_REG "D"
29 #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
31 * On PPro SMP, we use a locked operation to unlock
32 * (PPro errata 66, 92)
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36 # define UNLOCK_LOCK_PREFIX
39 /* How long a lock should spin before we consider blocking */
40 #define SPIN_THRESHOLD (1 << 15)
42 extern struct static_key paravirt_ticketlocks_enabled;
43 static __always_inline bool static_key_false(struct static_key *key);
45 #ifdef CONFIG_PARAVIRT_SPINLOCKS
47 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
49 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
52 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
53 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
57 static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
62 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
64 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
66 return lock.tickets.head == lock.tickets.tail;
70 * Ticket locks are conceptually two parts, one indicating the current head of
71 * the queue, and the other indicating the current tail. The lock is acquired
72 * by atomically noting the tail and incrementing it by one (thus adding
73 * ourself to the queue and noting our position), then waiting until the head
74 * becomes equal to the the initial value of the tail.
76 * We use an xadd covering *both* parts of the lock, to increment the tail and
77 * also load the position of the head, which takes care of memory ordering
78 * issues and should be optimal for the uncontended case. Note the tail must be
79 * in the high part, because a wide xadd increment of the low part would carry
80 * up and contaminate the high part.
82 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
84 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
86 inc = xadd(&lock->tickets, inc);
87 if (likely(inc.head == inc.tail))
90 inc.tail &= ~TICKET_SLOWPATH_FLAG;
92 unsigned count = SPIN_THRESHOLD;
95 if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
99 __ticket_lock_spinning(lock, inc.tail);
101 out: barrier(); /* make sure nothing creeps before the lock is taken */
104 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
106 arch_spinlock_t old, new;
108 old.tickets = ACCESS_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
112 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
114 /* cmpxchg is a full barrier, so nothing can move before it */
115 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
118 static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
123 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
125 /* Perform the unlock on the "before" copy */
126 old.tickets.head += TICKET_LOCK_INC;
128 /* Clear the slowpath flag */
129 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
132 * If the lock is uncontended, clear the flag - use cmpxchg in
133 * case it changes behind our back though.
135 if (new.tickets.head != new.tickets.tail ||
136 cmpxchg(&lock->head_tail, old.head_tail,
137 new.head_tail) != old.head_tail) {
139 * Lock still has someone queued for it, so wake up an
140 * appropriate waiter.
142 __ticket_unlock_kick(lock, old.tickets.head);
146 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
148 if (TICKET_SLOWPATH_FLAG &&
149 static_key_false(¶virt_ticketlocks_enabled)) {
150 arch_spinlock_t prev;
153 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
155 /* add_smp() is a full mb() */
157 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
158 __ticket_unlock_slowpath(lock, prev);
160 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
163 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
167 return tmp.tail != tmp.head;
170 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
172 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
176 #define arch_spin_is_contended arch_spin_is_contended
178 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
181 arch_spin_lock(lock);
184 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
186 while (arch_spin_is_locked(lock))
191 * Read-write spinlocks, allowing multiple readers
192 * but only one writer.
194 * NOTE! it is quite common to have readers in interrupts
195 * but no interrupt writers. For those circumstances we
196 * can "mix" irq-safe locks - any writer needs to get a
197 * irq-safe write-lock, but readers can get non-irqsafe
200 * On x86, we implement read-write locks as a 32-bit counter
201 * with the high bit (sign) being the "contended" bit.
205 * read_can_lock - would read_trylock() succeed?
206 * @lock: the rwlock in question.
208 static inline int arch_read_can_lock(arch_rwlock_t *lock)
210 return lock->lock > 0;
214 * write_can_lock - would write_trylock() succeed?
215 * @lock: the rwlock in question.
217 static inline int arch_write_can_lock(arch_rwlock_t *lock)
219 return lock->write == WRITE_LOCK_CMP;
222 static inline void arch_read_lock(arch_rwlock_t *rw)
224 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
226 "call __read_lock_failed\n\t"
228 ::LOCK_PTR_REG (rw) : "memory");
231 static inline void arch_write_lock(arch_rwlock_t *rw)
233 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
235 "call __write_lock_failed\n\t"
237 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
241 static inline int arch_read_trylock(arch_rwlock_t *lock)
243 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
245 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
247 READ_LOCK_ATOMIC(inc)(count);
251 static inline int arch_write_trylock(arch_rwlock_t *lock)
253 atomic_t *count = (atomic_t *)&lock->write;
255 if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
257 atomic_add(WRITE_LOCK_CMP, count);
261 static inline void arch_read_unlock(arch_rwlock_t *rw)
263 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
264 :"+m" (rw->lock) : : "memory");
267 static inline void arch_write_unlock(arch_rwlock_t *rw)
269 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
270 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
273 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
274 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
276 #undef READ_LOCK_SIZE
277 #undef READ_LOCK_ATOMIC
278 #undef WRITE_LOCK_ADD
279 #undef WRITE_LOCK_SUB
280 #undef WRITE_LOCK_CMP
282 #define arch_spin_relax(lock) cpu_relax()
283 #define arch_read_relax(lock) cpu_relax()
284 #define arch_write_relax(lock) cpu_relax()
286 #endif /* _ASM_X86_SPINLOCK_H */