s390/alternatives: provide identical sized orginal/alternative sequences
[platform/kernel/linux-starfive.git] / arch / s390 / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/spinlock.h"
8  */
9
10 #ifndef __ASM_SPINLOCK_H
11 #define __ASM_SPINLOCK_H
12
13 #include <linux/smp.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/alternative.h>
18
19 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
20
21 extern int spin_retry;
22
23 bool arch_vcpu_is_preempted(int cpu);
24
25 #define vcpu_is_preempted arch_vcpu_is_preempted
26
27 /*
28  * Simple spin lock operations.  There are two variants, one clears IRQ's
29  * on the local processor, one does not.
30  *
31  * We make no fairness assumptions. They have a cost.
32  *
33  * (the type definitions are in asm/spinlock_types.h)
34  */
35
36 void arch_spin_relax(arch_spinlock_t *lock);
37 #define arch_spin_relax arch_spin_relax
38
39 void arch_spin_lock_wait(arch_spinlock_t *);
40 int arch_spin_trylock_retry(arch_spinlock_t *);
41 void arch_spin_lock_setup(int cpu);
42
43 static inline u32 arch_spin_lockval(int cpu)
44 {
45         return cpu + 1;
46 }
47
48 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49 {
50         return lock.lock == 0;
51 }
52
53 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
54 {
55         return READ_ONCE(lp->lock) != 0;
56 }
57
58 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
59 {
60         barrier();
61         return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
62 }
63
64 static inline void arch_spin_lock(arch_spinlock_t *lp)
65 {
66         if (!arch_spin_trylock_once(lp))
67                 arch_spin_lock_wait(lp);
68 }
69
70 static inline int arch_spin_trylock(arch_spinlock_t *lp)
71 {
72         if (!arch_spin_trylock_once(lp))
73                 return arch_spin_trylock_retry(lp);
74         return 1;
75 }
76
77 static inline void arch_spin_unlock(arch_spinlock_t *lp)
78 {
79         typecheck(int, lp->lock);
80         kcsan_release();
81         asm_inline volatile(
82                 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
83                 "       sth     %1,%0\n"
84                 : "=R" (((unsigned short *) &lp->lock)[1])
85                 : "d" (0) : "cc", "memory");
86 }
87
88 /*
89  * Read-write spinlocks, allowing multiple readers
90  * but only one writer.
91  *
92  * NOTE! it is quite common to have readers in interrupts
93  * but no interrupt writers. For those circumstances we
94  * can "mix" irq-safe locks - any writer needs to get a
95  * irq-safe write-lock, but readers can get non-irqsafe
96  * read-locks.
97  */
98
99 #define arch_read_relax(rw) barrier()
100 #define arch_write_relax(rw) barrier()
101
102 void arch_read_lock_wait(arch_rwlock_t *lp);
103 void arch_write_lock_wait(arch_rwlock_t *lp);
104
105 static inline void arch_read_lock(arch_rwlock_t *rw)
106 {
107         int old;
108
109         old = __atomic_add(1, &rw->cnts);
110         if (old & 0xffff0000)
111                 arch_read_lock_wait(rw);
112 }
113
114 static inline void arch_read_unlock(arch_rwlock_t *rw)
115 {
116         __atomic_add_const_barrier(-1, &rw->cnts);
117 }
118
119 static inline void arch_write_lock(arch_rwlock_t *rw)
120 {
121         if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
122                 arch_write_lock_wait(rw);
123 }
124
125 static inline void arch_write_unlock(arch_rwlock_t *rw)
126 {
127         __atomic_add_barrier(-0x30000, &rw->cnts);
128 }
129
130
131 static inline int arch_read_trylock(arch_rwlock_t *rw)
132 {
133         int old;
134
135         old = READ_ONCE(rw->cnts);
136         return (!(old & 0xffff0000) &&
137                 __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
138 }
139
140 static inline int arch_write_trylock(arch_rwlock_t *rw)
141 {
142         int old;
143
144         old = READ_ONCE(rw->cnts);
145         return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
146 }
147
148 #endif /* __ASM_SPINLOCK_H */