1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/instrumented.h>
9 #include <linux/kasan-checks.h>
10 #include <linux/mm_types.h>
11 #include <linux/string.h>
12 #include <linux/mmap_lock.h>
16 #include <asm/extable.h>
17 #include <asm/tlbflush.h>
19 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
20 static inline bool pagefault_disabled(void);
21 # define WARN_ON_IN_IRQ() \
22 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
24 # define WARN_ON_IN_IRQ()
27 #ifdef CONFIG_ADDRESS_MASKING
29 * Mask out tag bits from the address.
31 * Magic with the 'sign' allows to untag userspace pointer without any branches
32 * while leaving kernel addresses intact.
34 static inline unsigned long __untagged_addr(unsigned long addr)
39 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
40 * in alternative instructions. The relocation gets wrong when gets
41 * copied to the target place.
44 "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
45 "or %%gs:tlbstate_untag_mask, %[sign]\n\t"
46 "and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
47 : [addr] "+r" (addr), [sign] "=r" (sign)
48 : "m" (tlbstate_untag_mask), "[sign]" (addr));
53 #define untagged_addr(addr) ({ \
54 unsigned long __addr = (__force unsigned long)(addr); \
55 (__force __typeof__(addr))__untagged_addr(__addr); \
58 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
61 long sign = addr >> 63;
63 mmap_assert_locked(mm);
64 addr &= (mm)->context.untag_mask | sign;
69 #define untagged_addr_remote(mm, addr) ({ \
70 unsigned long __addr = (__force unsigned long)(addr); \
71 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
75 #define untagged_addr(addr) (addr)
79 * access_ok - Checks if a user space pointer is valid
80 * @addr: User space pointer to start of block to check
81 * @size: Size of block to check
83 * Context: User context only. This function may sleep if pagefaults are
86 * Checks if a pointer to a block of memory in user space is valid.
88 * Note that, depending on architecture, this function probably just
89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT.
92 * Return: true (nonzero) if the memory block may be valid, false (zero)
93 * if it is definitely invalid.
95 #define access_ok(addr, size) \
98 likely(__access_ok(untagged_addr(addr), size)); \
101 #include <asm-generic/access_ok.h>
103 extern int __get_user_1(void);
104 extern int __get_user_2(void);
105 extern int __get_user_4(void);
106 extern int __get_user_8(void);
107 extern int __get_user_nocheck_1(void);
108 extern int __get_user_nocheck_2(void);
109 extern int __get_user_nocheck_4(void);
110 extern int __get_user_nocheck_8(void);
111 extern int __get_user_bad(void);
113 #define __uaccess_begin() stac()
114 #define __uaccess_end() clac()
115 #define __uaccess_begin_nospec() \
122 * This is the smallest unsigned integer type that can fit a value
123 * (up to 'long long')
125 #define __inttype(x) __typeof__( \
127 __typefits(x,short, \
129 __typefits(x,long,0ULL)))))
131 #define __typefits(x,type,not) \
132 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
135 * This is used for both get_user() and __get_user() to expand to
136 * the proper special function call that has odd calling conventions
137 * due to returning both a value and an error, and that depends on
138 * the size of the pointer passed in.
140 * Careful: we have to cast the result to the type of the pointer
143 * The use of _ASM_DX as the register specifier is a bit of a
144 * simplification, as gcc only cares about it as the starting point
145 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
146 * (%ecx being the next register in gcc's x86 register sequence), and
149 * Clang/LLVM cares about the size of the register, but still wants
150 * the base register for something that ends up being a pair.
152 #define do_get_user_call(fn,x,ptr) \
155 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
156 __chk_user_ptr(ptr); \
157 asm volatile("call __" #fn "_%P4" \
158 : "=a" (__ret_gu), "=r" (__val_gu), \
159 ASM_CALL_CONSTRAINT \
160 : "0" (ptr), "i" (sizeof(*(ptr)))); \
161 instrument_get_user(__val_gu); \
162 (x) = (__force __typeof__(*(ptr))) __val_gu; \
163 __builtin_expect(__ret_gu, 0); \
167 * get_user - Get a simple variable from user space.
168 * @x: Variable to store result.
169 * @ptr: Source address, in user space.
171 * Context: User context only. This function may sleep if pagefaults are
174 * This macro copies a single simple variable from user space to kernel
175 * space. It supports simple types like char and int, but not larger
176 * data types like structures or arrays.
178 * @ptr must have pointer-to-simple-variable type, and the result of
179 * dereferencing @ptr must be assignable to @x without a cast.
181 * Return: zero on success, or -EFAULT on error.
182 * On error, the variable @x is set to zero.
184 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
187 * __get_user - Get a simple variable from user space, with less checking.
188 * @x: Variable to store result.
189 * @ptr: Source address, in user space.
191 * Context: User context only. This function may sleep if pagefaults are
194 * This macro copies a single simple variable from user space to kernel
195 * space. It supports simple types like char and int, but not larger
196 * data types like structures or arrays.
198 * @ptr must have pointer-to-simple-variable type, and the result of
199 * dereferencing @ptr must be assignable to @x without a cast.
201 * Caller must check the pointer with access_ok() before calling this
204 * Return: zero on success, or -EFAULT on error.
205 * On error, the variable @x is set to zero.
207 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
211 #define __put_user_goto_u64(x, addr, label) \
212 asm_volatile_goto("\n" \
213 "1: movl %%eax,0(%1)\n" \
214 "2: movl %%edx,4(%1)\n" \
215 _ASM_EXTABLE_UA(1b, %l2) \
216 _ASM_EXTABLE_UA(2b, %l2) \
217 : : "A" (x), "r" (addr) \
221 #define __put_user_goto_u64(x, ptr, label) \
222 __put_user_goto(x, ptr, "q", "er", label)
225 extern void __put_user_bad(void);
228 * Strange magic calling convention: pointer in %ecx,
229 * value in %eax(:%edx), return value in %ecx. clobbers %rbx
231 extern void __put_user_1(void);
232 extern void __put_user_2(void);
233 extern void __put_user_4(void);
234 extern void __put_user_8(void);
235 extern void __put_user_nocheck_1(void);
236 extern void __put_user_nocheck_2(void);
237 extern void __put_user_nocheck_4(void);
238 extern void __put_user_nocheck_8(void);
241 * ptr must be evaluated and assigned to the temporary __ptr_pu before
242 * the assignment of x to __val_pu, to avoid any function calls
243 * involved in the ptr expression (possibly implicitly generated due
244 * to KASAN) from clobbering %ax.
246 #define do_put_user_call(fn,x,ptr) \
249 void __user *__ptr_pu; \
250 register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
251 __typeof__(*(ptr)) __x = (x); /* eval x once */ \
252 __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
253 __chk_user_ptr(__ptr); \
256 asm volatile("call __" #fn "_%P[size]" \
258 ASM_CALL_CONSTRAINT \
261 [size] "i" (sizeof(*(ptr))) \
263 instrument_put_user(__x, __ptr, sizeof(*(ptr))); \
264 __builtin_expect(__ret_pu, 0); \
268 * put_user - Write a simple value into user space.
269 * @x: Value to copy to user space.
270 * @ptr: Destination address, in user space.
272 * Context: User context only. This function may sleep if pagefaults are
275 * This macro copies a single simple value from kernel space to user
276 * space. It supports simple types like char and int, but not larger
277 * data types like structures or arrays.
279 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
280 * to the result of dereferencing @ptr.
282 * Return: zero on success, or -EFAULT on error.
284 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
287 * __put_user - Write a simple value into user space, with less checking.
288 * @x: Value to copy to user space.
289 * @ptr: Destination address, in user space.
291 * Context: User context only. This function may sleep if pagefaults are
294 * This macro copies a single simple value from kernel space to user
295 * space. It supports simple types like char and int, but not larger
296 * data types like structures or arrays.
298 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
299 * to the result of dereferencing @ptr.
301 * Caller must check the pointer with access_ok() before calling this
304 * Return: zero on success, or -EFAULT on error.
306 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
308 #define __put_user_size(x, ptr, size, label) \
310 __typeof__(*(ptr)) __x = (x); /* eval x once */ \
311 __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
312 __chk_user_ptr(__ptr); \
315 __put_user_goto(__x, __ptr, "b", "iq", label); \
318 __put_user_goto(__x, __ptr, "w", "ir", label); \
321 __put_user_goto(__x, __ptr, "l", "ir", label); \
324 __put_user_goto_u64(__x, __ptr, label); \
329 instrument_put_user(__x, __ptr, size); \
332 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
335 #define __get_user_asm_u64(x, ptr, label) do { \
336 unsigned int __gu_low, __gu_high; \
337 const unsigned int __user *__gu_ptr; \
338 __gu_ptr = (const void __user *)(ptr); \
339 __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
340 __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
341 (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
344 #define __get_user_asm_u64(x, ptr, label) \
345 __get_user_asm(x, ptr, "q", "=r", label)
348 #define __get_user_size(x, ptr, size, label) \
350 __chk_user_ptr(ptr); \
353 unsigned char x_u8__; \
354 __get_user_asm(x_u8__, ptr, "b", "=q", label); \
359 __get_user_asm(x, ptr, "w", "=r", label); \
362 __get_user_asm(x, ptr, "l", "=r", label); \
365 __get_user_asm_u64(x, ptr, label); \
368 (x) = __get_user_bad(); \
370 instrument_get_user(x); \
373 #define __get_user_asm(x, addr, itype, ltype, label) \
374 asm_volatile_goto("\n" \
375 "1: mov"itype" %[umem],%[output]\n" \
376 _ASM_EXTABLE_UA(1b, %l2) \
377 : [output] ltype(x) \
378 : [umem] "m" (__m(addr)) \
381 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
384 #define __get_user_asm_u64(x, ptr, retval) \
386 __typeof__(ptr) __ptr = (ptr); \
388 "1: movl %[lowbits],%%eax\n" \
389 "2: movl %[highbits],%%edx\n" \
391 _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
392 EX_FLAG_CLEAR_AX_DX, \
394 _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
395 EX_FLAG_CLEAR_AX_DX, \
397 : [errout] "=r" (retval), \
399 : [lowbits] "m" (__m(__ptr)), \
400 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
405 #define __get_user_asm_u64(x, ptr, retval) \
406 __get_user_asm(x, ptr, retval, "q")
409 #define __get_user_size(x, ptr, size, retval) \
411 unsigned char x_u8__; \
414 __chk_user_ptr(ptr); \
417 __get_user_asm(x_u8__, ptr, retval, "b"); \
421 __get_user_asm(x, ptr, retval, "w"); \
424 __get_user_asm(x, ptr, retval, "l"); \
427 __get_user_asm_u64(x, ptr, retval); \
430 (x) = __get_user_bad(); \
434 #define __get_user_asm(x, addr, err, itype) \
436 "1: mov"itype" %[umem],%[output]\n" \
438 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
441 : [errout] "=r" (err), \
443 : [umem] "m" (__m(addr)), \
446 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
448 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
449 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
451 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
452 __typeof__(*(_ptr)) __old = *_old; \
453 __typeof__(*(_ptr)) __new = (_new); \
454 asm_volatile_goto("\n" \
455 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
456 _ASM_EXTABLE_UA(1b, %l[label]) \
457 : CC_OUT(z) (success), \
458 [ptr] "+m" (*_ptr), \
460 : [new] ltype (__new) \
463 if (unlikely(!success)) \
468 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
470 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
471 __typeof__(*(_ptr)) __old = *_old; \
472 __typeof__(*(_ptr)) __new = (_new); \
473 asm_volatile_goto("\n" \
474 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
475 _ASM_EXTABLE_UA(1b, %l[label]) \
476 : CC_OUT(z) (success), \
479 : "b" ((u32)__new), \
480 "c" ((u32)((u64)__new >> 32)) \
483 if (unlikely(!success)) \
486 #endif // CONFIG_X86_32
487 #else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
488 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
491 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
492 __typeof__(*(_ptr)) __old = *_old; \
493 __typeof__(*(_ptr)) __new = (_new); \
495 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
498 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
500 : CC_OUT(z) (success), \
501 [errout] "+r" (__err), \
502 [ptr] "+m" (*_ptr), \
504 : [new] ltype (__new) \
506 if (unlikely(__err)) \
508 if (unlikely(!success)) \
514 * Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
515 * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
516 * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
517 * both ESI and EDI for the memory operand, compilation will fail if the error
518 * is an input+output as there will be no register available for input.
520 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
522 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
523 __typeof__(*(_ptr)) __old = *_old; \
524 __typeof__(*(_ptr)) __new = (_new); \
526 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
527 "mov $0, %[result]\n\t" \
528 "setz %b[result]\n" \
530 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
532 : [result] "=q" (__result), \
535 : "b" ((u32)__new), \
536 "c" ((u32)((u64)__new >> 32)) \
538 if (unlikely(__result < 0)) \
540 if (unlikely(!__result)) \
543 #endif // CONFIG_X86_32
544 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
546 /* FIXME: this hack is definitely wrong -AK */
547 struct __large_struct { unsigned long buf[100]; };
548 #define __m(x) (*(struct __large_struct __user *)(x))
551 * Tell gcc we read from memory instead of writing: this is because
552 * we do not write to any memory gcc knows about, so there are no
555 #define __put_user_goto(x, addr, itype, ltype, label) \
556 asm_volatile_goto("\n" \
557 "1: mov"itype" %0,%1\n" \
558 _ASM_EXTABLE_UA(1b, %l2) \
559 : : ltype(x), "m" (__m(addr)) \
563 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
564 extern __must_check long
565 strncpy_from_user(char *dst, const char __user *src, long count);
567 extern __must_check long strnlen_user(const char __user *str, long n);
569 #ifdef CONFIG_ARCH_HAS_COPY_MC
570 unsigned long __must_check
571 copy_mc_to_kernel(void *to, const void *from, unsigned len);
572 #define copy_mc_to_kernel copy_mc_to_kernel
574 unsigned long __must_check
575 copy_mc_to_user(void *to, const void *from, unsigned len);
579 * movsl can be slow when source and dest are not both 8-byte aligned
581 #ifdef CONFIG_X86_INTEL_USERCOPY
582 extern struct movsl_mask {
584 } ____cacheline_aligned_in_smp movsl_mask;
587 #define ARCH_HAS_NOCACHE_UACCESS 1
590 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
591 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
592 # include <asm/uaccess_32.h>
594 # include <asm/uaccess_64.h>
598 * The "unsafe" user accesses aren't really "unsafe", but the naming
599 * is a big fat warning: you have to not only do the access_ok()
600 * checking before using them, but you have to surround them with the
601 * user_access_begin/end() pair.
603 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
605 if (unlikely(!access_ok(ptr,len)))
607 __uaccess_begin_nospec();
610 #define user_access_begin(a,b) user_access_begin(a,b)
611 #define user_access_end() __uaccess_end()
613 #define user_access_save() smap_save()
614 #define user_access_restore(x) smap_restore(x)
616 #define unsafe_put_user(x, ptr, label) \
617 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
619 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
620 #define unsafe_get_user(x, ptr, err_label) \
622 __inttype(*(ptr)) __gu_val; \
623 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
624 (x) = (__force __typeof__(*(ptr)))__gu_val; \
626 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
627 #define unsafe_get_user(x, ptr, err_label) \
630 __inttype(*(ptr)) __gu_val; \
631 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
632 (x) = (__force __typeof__(*(ptr)))__gu_val; \
633 if (unlikely(__gu_err)) goto err_label; \
635 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
637 extern void __try_cmpxchg_user_wrong_size(void);
639 #ifndef CONFIG_X86_32
640 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
641 __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
645 * Force the pointer to u<size> to match the size expected by the asm helper.
646 * clang/LLVM compiles all cases and only discards the unused paths after
647 * processing errors, which breaks i386 if the pointer is an 8-byte value.
649 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
651 __chk_user_ptr(_ptr); \
652 switch (sizeof(*(_ptr))) { \
653 case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
654 (__force u8 *)(_ptr), (_oldp), \
657 case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
658 (__force u16 *)(_ptr), (_oldp), \
661 case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
662 (__force u32 *)(_ptr), (_oldp), \
665 case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
668 default: __try_cmpxchg_user_wrong_size(); \
672 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
673 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
674 int __ret = -EFAULT; \
675 __uaccess_begin_nospec(); \
676 __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
683 * We want the unsafe accessors to always be inlined and use
684 * the error labels - thus the macro games.
686 #define unsafe_copy_loop(dst, src, len, type, label) \
687 while (len >= sizeof(type)) { \
688 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
689 dst += sizeof(type); \
690 src += sizeof(type); \
691 len -= sizeof(type); \
694 #define unsafe_copy_to_user(_dst,_src,_len,label) \
696 char __user *__ucu_dst = (_dst); \
697 const char *__ucu_src = (_src); \
698 size_t __ucu_len = (_len); \
699 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
700 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
701 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
702 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
705 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
706 #define __get_kernel_nofault(dst, src, type, err_label) \
707 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
708 sizeof(type), err_label)
709 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
710 #define __get_kernel_nofault(dst, src, type, err_label) \
714 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
715 sizeof(type), __kr_err); \
716 if (unlikely(__kr_err)) \
719 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
721 #define __put_kernel_nofault(dst, src, type, err_label) \
722 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
723 sizeof(type), err_label)
725 #endif /* _ASM_X86_UACCESS_H */