1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/include/asm/uaccess.h
5 #ifndef _ASMARM_UACCESS_H
6 #define _ASMARM_UACCESS_H
9 * User space memory access functions
11 #include <linux/string.h>
12 #include <asm/memory.h>
13 #include <asm/domain.h>
14 #include <asm/unaligned.h>
15 #include <asm/unified.h>
16 #include <asm/compiler.h>
18 #include <asm/extable.h>
21 * These two functions allow hooking accesses to userspace to increase
22 * system integrity by ensuring that the kernel can not inadvertantly
23 * perform such accesses (eg, via list poison values) which could then
24 * be exploited for priviledge escalation.
26 static __always_inline unsigned int uaccess_save_and_enable(void)
28 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
29 unsigned int old_domain = get_domain();
31 /* Set the current domain access to permit user accesses */
32 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
33 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
41 static __always_inline void uaccess_restore(unsigned int flags)
43 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
44 /* Restore the user access mask */
50 * These two are intentionally not defined anywhere - if the kernel
51 * code generates any references to them, that's a bug.
53 extern int __get_user_bad(void);
54 extern int __put_user_bad(void);
59 * We use 33-bit arithmetic here. Success returns zero, failure returns
60 * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS,
61 * so this will always return success in that case.
63 #define __range_ok(addr, size) ({ \
64 unsigned long flag, roksum; \
65 __chk_user_ptr(addr); \
66 __asm__(".syntax unified\n" \
67 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
68 : "=&r" (flag), "=&r" (roksum) \
69 : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \
74 * This is a type: either unsigned long, if the argument fits into
75 * that type, or otherwise unsigned long long.
77 #define __inttype(x) \
78 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
81 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
82 * is above the current addr_limit.
84 #define uaccess_mask_range_ptr(ptr, size) \
85 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
86 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
89 void __user *safe_ptr = (void __user *)ptr;
97 " subshs %1, %1, %2\n"
99 : "+r" (safe_ptr), "=&r" (tmp)
100 : "r" (size), "r" (TASK_SIZE)
108 * Single-value transfer routines. They automatically use the right
109 * size if we just have the right pointer type. Note that the functions
110 * which read from user space (*get_*) need to take care not to leak
111 * kernel data even if the calling code is buggy and fails to check
112 * the return value. This means zeroing out the destination variable
113 * or buffer on error. Normally this is done out of line by the
114 * fixup code, but there are a few places where it intrudes on the
115 * main code path. When we only write to user space, there is no
118 extern int __get_user_1(void *);
119 extern int __get_user_2(void *);
120 extern int __get_user_4(void *);
121 extern int __get_user_32t_8(void *);
122 extern int __get_user_8(void *);
123 extern int __get_user_64t_1(void *);
124 extern int __get_user_64t_2(void *);
125 extern int __get_user_64t_4(void *);
127 #define __GUP_CLOBBER_1 "lr", "cc"
128 #ifdef CONFIG_CPU_USE_DOMAINS
129 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
131 #define __GUP_CLOBBER_2 "lr", "cc"
133 #define __GUP_CLOBBER_4 "lr", "cc"
134 #define __GUP_CLOBBER_32t_8 "lr", "cc"
135 #define __GUP_CLOBBER_8 "lr", "cc"
137 #define __get_user_x(__r2, __p, __e, __l, __s) \
138 __asm__ __volatile__ ( \
139 __asmeq("%0", "r0") __asmeq("%1", "r2") \
140 __asmeq("%3", "r1") \
141 "bl __get_user_" #__s \
142 : "=&r" (__e), "=r" (__r2) \
143 : "0" (__p), "r" (__l) \
144 : __GUP_CLOBBER_##__s)
146 /* narrowing a double-word get into a single 32bit word register: */
148 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
149 __get_user_x(__r2, __p, __e, __l, 32t_8)
151 #define __get_user_x_32t __get_user_x
155 * storing result into proper least significant word of 64bit target var,
156 * different only for big endian case where 64 bit __r2 lsw is r3:
159 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
160 __asm__ __volatile__ ( \
161 __asmeq("%0", "r0") __asmeq("%1", "r2") \
162 __asmeq("%3", "r1") \
163 "bl __get_user_64t_" #__s \
164 : "=&r" (__e), "=r" (__r2) \
165 : "0" (__p), "r" (__l) \
166 : __GUP_CLOBBER_##__s)
168 #define __get_user_x_64t __get_user_x
172 #define __get_user_check(x, p) \
174 unsigned long __limit = TASK_SIZE - 1; \
175 register typeof(*(p)) __user *__p asm("r0") = (p); \
176 register __inttype(x) __r2 asm("r2"); \
177 register unsigned long __l asm("r1") = __limit; \
178 register int __e asm("r0"); \
179 unsigned int __ua_flags = uaccess_save_and_enable(); \
181 switch (sizeof(*(__p))) { \
183 if (sizeof((x)) >= 8) \
184 __get_user_x_64t(__r2, __p, __e, __l, 1); \
186 __get_user_x(__r2, __p, __e, __l, 1); \
189 if (sizeof((x)) >= 8) \
190 __get_user_x_64t(__r2, __p, __e, __l, 2); \
192 __get_user_x(__r2, __p, __e, __l, 2); \
195 if (sizeof((x)) >= 8) \
196 __get_user_x_64t(__r2, __p, __e, __l, 4); \
198 __get_user_x(__r2, __p, __e, __l, 4); \
201 if (sizeof((x)) < 8) \
202 __get_user_x_32t(__r2, __p, __e, __l, 4); \
204 __get_user_x(__r2, __p, __e, __l, 8); \
206 default: __e = __get_user_bad(); break; \
209 uaccess_restore(__ua_flags); \
210 x = (typeof(*(p))) __r2; \
214 #define get_user(x, p) \
217 __get_user_check(x, p); \
220 extern int __put_user_1(void *, unsigned int);
221 extern int __put_user_2(void *, unsigned int);
222 extern int __put_user_4(void *, unsigned int);
223 extern int __put_user_8(void *, unsigned long long);
225 #define __put_user_check(__pu_val, __ptr, __err, __s) \
227 unsigned long __limit = TASK_SIZE - 1; \
228 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
229 register const void __user *__p asm("r0") = __ptr; \
230 register unsigned long __l asm("r1") = __limit; \
231 register int __e asm("r0"); \
232 __asm__ __volatile__ ( \
233 __asmeq("%0", "r0") __asmeq("%2", "r2") \
234 __asmeq("%3", "r1") \
235 "bl __put_user_" #__s \
237 : "0" (__p), "r" (__r2), "r" (__l) \
238 : "ip", "lr", "cc"); \
242 #else /* CONFIG_MMU */
244 #define __addr_ok(addr) ((void)(addr), 1)
245 #define __range_ok(addr, size) ((void)(addr), 0)
247 #define get_user(x, p) __get_user(x, p)
248 #define __put_user_check __put_user_nocheck
250 #endif /* CONFIG_MMU */
252 #define access_ok(addr, size) (__range_ok(addr, size) == 0)
254 #ifdef CONFIG_CPU_SPECTRE
256 * When mitigating Spectre variant 1, it is not worth fixing the non-
257 * verifying accessors, because we need to add verification of the
258 * address space there. Force these to use the standard get_user()
261 #define __get_user(x, ptr) get_user(x, ptr)
265 * The "__xxx" versions of the user access functions do not verify the
266 * address space - it must have been done previously with a separate
267 * "access_ok()" call.
269 * The "xxx_error" versions set the third argument to EFAULT if an
270 * error occurs, and leave it unchanged on success. Note that these
271 * versions are void (ie, don't return a value as such).
273 #define __get_user(x, ptr) \
276 __get_user_err((x), (ptr), __gu_err, TUSER()); \
280 #define __get_user_err(x, ptr, err, __t) \
282 unsigned long __gu_addr = (unsigned long)(ptr); \
283 unsigned long __gu_val; \
284 unsigned int __ua_flags; \
285 __chk_user_ptr(ptr); \
287 __ua_flags = uaccess_save_and_enable(); \
288 switch (sizeof(*(ptr))) { \
289 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
290 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
291 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
292 default: (__gu_val) = __get_user_bad(); \
294 uaccess_restore(__ua_flags); \
295 (x) = (__typeof__(*(ptr)))__gu_val; \
299 #define __get_user_asm(x, addr, err, instr) \
300 __asm__ __volatile__( \
301 "1: " instr " %1, [%2], #0\n" \
303 " .pushsection .text.fixup,\"ax\"\n" \
309 " .pushsection __ex_table,\"a\"\n" \
313 : "+r" (err), "=&r" (x) \
314 : "r" (addr), "i" (-EFAULT) \
317 #define __get_user_asm_byte(x, addr, err, __t) \
318 __get_user_asm(x, addr, err, "ldrb" __t)
320 #if __LINUX_ARM_ARCH__ >= 6
322 #define __get_user_asm_half(x, addr, err, __t) \
323 __get_user_asm(x, addr, err, "ldrh" __t)
328 #define __get_user_asm_half(x, __gu_addr, err, __t) \
330 unsigned long __b1, __b2; \
331 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
332 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
333 (x) = __b1 | (__b2 << 8); \
336 #define __get_user_asm_half(x, __gu_addr, err, __t) \
338 unsigned long __b1, __b2; \
339 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
340 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
341 (x) = (__b1 << 8) | __b2; \
345 #endif /* __LINUX_ARM_ARCH__ >= 6 */
347 #define __get_user_asm_word(x, addr, err, __t) \
348 __get_user_asm(x, addr, err, "ldr" __t)
350 #define __put_user_switch(x, ptr, __err, __fn) \
352 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
353 __typeof__(*(ptr)) __pu_val = (x); \
354 unsigned int __ua_flags; \
356 __ua_flags = uaccess_save_and_enable(); \
357 switch (sizeof(*(ptr))) { \
358 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
359 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
360 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
361 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
362 default: __err = __put_user_bad(); break; \
364 uaccess_restore(__ua_flags); \
367 #define put_user(x, ptr) \
370 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
374 #ifdef CONFIG_CPU_SPECTRE
376 * When mitigating Spectre variant 1.1, all accessors need to include
377 * verification of the address space.
379 #define __put_user(x, ptr) put_user(x, ptr)
382 #define __put_user(x, ptr) \
385 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
389 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
391 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
392 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
395 #define __put_user_nocheck_1 __put_user_asm_byte
396 #define __put_user_nocheck_2 __put_user_asm_half
397 #define __put_user_nocheck_4 __put_user_asm_word
398 #define __put_user_nocheck_8 __put_user_asm_dword
400 #endif /* !CONFIG_CPU_SPECTRE */
402 #define __put_user_asm(x, __pu_addr, err, instr) \
403 __asm__ __volatile__( \
404 "1: " instr " %1, [%2], #0\n" \
406 " .pushsection .text.fixup,\"ax\"\n" \
411 " .pushsection __ex_table,\"a\"\n" \
416 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
419 #define __put_user_asm_byte(x, __pu_addr, err, __t) \
420 __put_user_asm(x, __pu_addr, err, "strb" __t)
422 #if __LINUX_ARM_ARCH__ >= 6
424 #define __put_user_asm_half(x, __pu_addr, err, __t) \
425 __put_user_asm(x, __pu_addr, err, "strh" __t)
430 #define __put_user_asm_half(x, __pu_addr, err, __t) \
432 unsigned long __temp = (__force unsigned long)(x); \
433 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
434 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
437 #define __put_user_asm_half(x, __pu_addr, err, __t) \
439 unsigned long __temp = (__force unsigned long)(x); \
440 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
441 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
445 #endif /* __LINUX_ARM_ARCH__ >= 6 */
447 #define __put_user_asm_word(x, __pu_addr, err, __t) \
448 __put_user_asm(x, __pu_addr, err, "str" __t)
451 #define __reg_oper0 "%R2"
452 #define __reg_oper1 "%Q2"
454 #define __reg_oper0 "%Q2"
455 #define __reg_oper1 "%R2"
458 #define __put_user_asm_dword(x, __pu_addr, err, __t) \
459 __asm__ __volatile__( \
460 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
461 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
462 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
463 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
465 " .pushsection .text.fixup,\"ax\"\n" \
470 " .pushsection __ex_table,\"a\"\n" \
475 : "+r" (err), "+r" (__pu_addr) \
476 : "r" (x), "i" (-EFAULT) \
479 #define HAVE_GET_KERNEL_NOFAULT
481 #define __get_kernel_nofault(dst, src, type, err_label) \
483 const type *__pk_ptr = (src); \
484 unsigned long __src = (unsigned long)(__pk_ptr); \
487 switch (sizeof(type)) { \
488 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
489 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
490 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
492 u32 *__v32 = (u32*)&__val; \
493 __get_user_asm_word(__v32[0], __src, __err, ""); \
496 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
499 default: __err = __get_user_bad(); break; \
501 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
502 put_unaligned(__val, (type *)(dst)); \
504 *(type *)(dst) = __val; /* aligned by caller */ \
509 #define __put_kernel_nofault(dst, src, type, err_label) \
511 const type *__pk_ptr = (dst); \
512 unsigned long __dst = (unsigned long)__pk_ptr; \
514 type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
515 ? get_unaligned((type *)(src)) \
516 : *(type *)(src); /* aligned by caller */ \
517 switch (sizeof(type)) { \
518 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
519 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
520 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
521 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
522 default: __err = __put_user_bad(); break; \
529 extern unsigned long __must_check
530 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
532 static inline unsigned long __must_check
533 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
535 unsigned int __ua_flags;
537 __ua_flags = uaccess_save_and_enable();
538 n = arm_copy_from_user(to, from, n);
539 uaccess_restore(__ua_flags);
543 extern unsigned long __must_check
544 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
545 extern unsigned long __must_check
546 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
548 static inline unsigned long __must_check
549 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
551 #ifndef CONFIG_UACCESS_WITH_MEMCPY
552 unsigned int __ua_flags;
553 __ua_flags = uaccess_save_and_enable();
554 n = arm_copy_to_user(to, from, n);
555 uaccess_restore(__ua_flags);
558 return arm_copy_to_user(to, from, n);
562 extern unsigned long __must_check
563 arm_clear_user(void __user *addr, unsigned long n);
564 extern unsigned long __must_check
565 __clear_user_std(void __user *addr, unsigned long n);
567 static inline unsigned long __must_check
568 __clear_user(void __user *addr, unsigned long n)
570 unsigned int __ua_flags = uaccess_save_and_enable();
571 n = arm_clear_user(addr, n);
572 uaccess_restore(__ua_flags);
577 static inline unsigned long
578 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
580 memcpy(to, (const void __force *)from, n);
583 static inline unsigned long
584 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
586 memcpy((void __force *)to, from, n);
589 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
591 #define INLINE_COPY_TO_USER
592 #define INLINE_COPY_FROM_USER
594 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
596 if (access_ok(to, n))
597 n = __clear_user(to, n);
601 /* These are from lib/ code, and use __get_user() and friends */
602 extern long strncpy_from_user(char *dest, const char __user *src, long count);
604 extern __must_check long strnlen_user(const char __user *str, long n);
606 #endif /* _ASMARM_UACCESS_H */