Prepare v2023.10
[platform/kernel/u-boot.git] / arch / arm / include / asm / proc-armv / system.h
1 /*
2  *  linux/include/asm-arm/proc-armv/system.h
3  *
4  *  Copyright (C) 1996 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef __ASM_PROC_SYSTEM_H
11 #define __ASM_PROC_SYSTEM_H
12
13 /*
14  * Save the current interrupt enable state & disable IRQs
15  */
16 #ifdef CONFIG_ARM64
17
18 /*
19  * Save the current interrupt enable state
20  * and disable IRQs/FIQs
21  */
22 #define local_irq_save(flags)                                   \
23         ({                                                      \
24         asm volatile(                                           \
25         "mrs    %0, daif\n"                                     \
26         "msr    daifset, #3"                                    \
27         : "=r" (flags)                                          \
28         :                                                       \
29         : "memory");                                            \
30         })
31
32 /*
33  * restore saved IRQ & FIQ state
34  */
35 #define local_irq_restore(flags)                                \
36         ({                                                      \
37         asm volatile(                                           \
38         "msr    daif, %0"                                       \
39         :                                                       \
40         : "r" (flags)                                           \
41         : "memory");                                            \
42         })
43
44 /*
45  * Enable IRQs/FIQs
46  */
47 #define local_irq_enable()                                      \
48         ({                                                      \
49         asm volatile(                                           \
50         "msr    daifclr, #3"                                    \
51         :                                                       \
52         :                                                       \
53         : "memory");                                            \
54         })
55
56 /*
57  * Disable IRQs/FIQs
58  */
59 #define local_irq_disable()                                     \
60         ({                                                      \
61         asm volatile(                                           \
62         "msr    daifset, #3"                                    \
63         :                                                       \
64         :                                                       \
65         : "memory");                                            \
66         })
67
68 #else   /* CONFIG_ARM64 */
69
70 #define local_irq_save(x)                                       \
71         ({                                                      \
72                 unsigned long temp;                             \
73         __asm__ __volatile__(                                   \
74         "mrs    %0, cpsr                @ local_irq_save\n"     \
75 "       orr     %1, %0, #128\n"                                 \
76 "       msr     cpsr_c, %1"                                     \
77         : "=r" (x), "=r" (temp)                                 \
78         :                                                       \
79         : "memory");                                            \
80         })
81
82 /*
83  * Enable IRQs
84  */
85 #define local_irq_enable()                                      \
86         ({                                                      \
87                 unsigned long temp;                             \
88         __asm__ __volatile__(                                   \
89         "mrs    %0, cpsr                @ local_irq_enable\n"   \
90 "       bic     %0, %0, #128\n"                                 \
91 "       msr     cpsr_c, %0"                                     \
92         : "=r" (temp)                                           \
93         :                                                       \
94         : "memory");                                            \
95         })
96
97 /*
98  * Disable IRQs
99  */
100 #define local_irq_disable()                                     \
101         ({                                                      \
102                 unsigned long temp;                             \
103         __asm__ __volatile__(                                   \
104         "mrs    %0, cpsr                @ local_irq_disable\n"  \
105 "       orr     %0, %0, #128\n"                                 \
106 "       msr     cpsr_c, %0"                                     \
107         : "=r" (temp)                                           \
108         :                                                       \
109         : "memory");                                            \
110         })
111
112 /*
113  * Enable FIQs
114  */
115 #define __stf()                                                 \
116         ({                                                      \
117                 unsigned long temp;                             \
118         __asm__ __volatile__(                                   \
119         "mrs    %0, cpsr                @ stf\n"                \
120 "       bic     %0, %0, #64\n"                                  \
121 "       msr     cpsr_c, %0"                                     \
122         : "=r" (temp)                                           \
123         :                                                       \
124         : "memory");                                            \
125         })
126
127 /*
128  * Disable FIQs
129  */
130 #define __clf()                                                 \
131         ({                                                      \
132                 unsigned long temp;                             \
133         __asm__ __volatile__(                                   \
134         "mrs    %0, cpsr                @ clf\n"                \
135 "       orr     %0, %0, #64\n"                                  \
136 "       msr     cpsr_c, %0"                                     \
137         : "=r" (temp)                                           \
138         :                                                       \
139         : "memory");                                            \
140         })
141
142 /*
143  * Save the current interrupt enable state.
144  */
145 #define local_save_flags(x)                                     \
146         ({                                                      \
147         __asm__ __volatile__(                                   \
148         "mrs    %0, cpsr                @ local_save_flags\n"   \
149           : "=r" (x)                                            \
150           :                                                     \
151           : "memory");                                          \
152         })
153
154 /*
155  * restore saved IRQ & FIQ state
156  */
157 #define local_irq_restore(x)                                    \
158         __asm__ __volatile__(                                   \
159         "msr    cpsr_c, %0              @ local_irq_restore\n"  \
160         :                                                       \
161         : "r" (x)                                               \
162         : "memory")
163
164 #endif  /* CONFIG_ARM64 */
165
166 #if defined(CONFIG_ARM64)
167 /*
168  * On the StrongARM, "swp" is terminally broken since it bypasses the
169  * cache totally.  This means that the cache becomes inconsistent, and,
170  * since we use normal loads/stores as well, this is really bad.
171  * Typically, this causes oopsen in filp_close, but could have other,
172  * more disasterous effects.  There are two work-arounds:
173  *  1. Disable interrupts and emulate the atomic swap
174  *  2. Clean the cache, perform atomic swap, flush the cache
175  *
176  * We choose (1) since its the "easiest" to achieve here and is not
177  * dependent on the processor type.
178  */
179 #define swp_is_buggy
180 #endif
181
182 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
183 {
184         extern void __bad_xchg(volatile void *, int);
185         unsigned long ret;
186 #ifdef swp_is_buggy
187         unsigned long flags;
188 #endif
189
190         switch (size) {
191 #ifdef swp_is_buggy
192                 case 1:
193                         local_irq_save(flags);
194                         ret = *(volatile unsigned char *)ptr;
195                         *(volatile unsigned char *)ptr = x;
196                         local_irq_restore(flags);
197                         break;
198
199                 case 4:
200                         local_irq_save(flags);
201                         ret = *(volatile unsigned long *)ptr;
202                         *(volatile unsigned long *)ptr = x;
203                         local_irq_restore(flags);
204                         break;
205 #else
206                 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
207                                         : "=&r" (ret)
208                                         : "r" (x), "r" (ptr)
209                                         : "memory");
210                         break;
211                 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
212                                         : "=&r" (ret)
213                                         : "r" (x), "r" (ptr)
214                                         : "memory");
215                         break;
216 #endif
217                 default: __bad_xchg(ptr, size), ret = 0;
218         }
219
220         return ret;
221 }
222
223 #endif