Merge tag 'ceph-for-5.10-rc1' of git://github.com/ceph/ceph-client
[platform/kernel/linux-starfive.git] / include / linux / spinlock_up.h
1 #ifndef __LINUX_SPINLOCK_UP_H
2 #define __LINUX_SPINLOCK_UP_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
6 #endif
7
8 #include <asm/processor.h>      /* for cpu_relax() */
9 #include <asm/barrier.h>
10
11 /*
12  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
13  *
14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15  * Released under the General Public License (GPL).
16  *
17  * In the debug case, 1 means unlocked, 0 means locked. (the values
18  * are inverted, to catch initialization bugs)
19  *
20  * No atomicity anywhere, we are on UP. However, we still need
21  * the compiler barriers, because we do not want the compiler to
22  * move potentially faulting instructions (notably user accesses)
23  * into the locked sequence, resulting in non-atomic execution.
24  */
25
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define arch_spin_is_locked(x)          ((x)->slock == 0)
28
29 static inline void arch_spin_lock(arch_spinlock_t *lock)
30 {
31         lock->slock = 0;
32         barrier();
33 }
34
35 static inline int arch_spin_trylock(arch_spinlock_t *lock)
36 {
37         char oldval = lock->slock;
38
39         lock->slock = 0;
40         barrier();
41
42         return oldval > 0;
43 }
44
45 static inline void arch_spin_unlock(arch_spinlock_t *lock)
46 {
47         barrier();
48         lock->slock = 1;
49 }
50
51 /*
52  * Read-write spinlocks. No debug version.
53  */
54 #define arch_read_lock(lock)            do { barrier(); (void)(lock); } while (0)
55 #define arch_write_lock(lock)           do { barrier(); (void)(lock); } while (0)
56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57 #define arch_write_trylock(lock)        ({ barrier(); (void)(lock); 1; })
58 #define arch_read_unlock(lock)          do { barrier(); (void)(lock); } while (0)
59 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
60
61 #else /* DEBUG_SPINLOCK */
62 #define arch_spin_is_locked(lock)       ((void)(lock), 0)
63 /* for sched/core.c and kernel_lock.c: */
64 # define arch_spin_lock(lock)           do { barrier(); (void)(lock); } while (0)
65 # define arch_spin_lock_flags(lock, flags)      do { barrier(); (void)(lock); } while (0)
66 # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
67 # define arch_spin_trylock(lock)        ({ barrier(); (void)(lock); 1; })
68 #endif /* DEBUG_SPINLOCK */
69
70 #define arch_spin_is_contended(lock)    (((void)(lock), 0))
71
72 #endif /* __LINUX_SPINLOCK_UP_H */