From b9bd9f605c4a6f04a83e6640a7d1d6dda80f17ca Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 2 May 2023 16:39:59 -0700 Subject: [PATCH] x86: uaccess: move 32-bit and 64-bit parts into proper header The x86 file has grown features that are specific to x86-64 like LAM support and the related access_ok() changes. They really should be in the file and not pollute the generic x86 header. Signed-off-by: Linus Torvalds --- arch/x86/include/asm/uaccess.h | 87 ++------------------------------------- arch/x86/include/asm/uaccess_32.h | 3 ++ arch/x86/include/asm/uaccess_64.h | 77 +++++++++++++++++++++++++++++++++- 3 files changed, 82 insertions(+), 85 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index cad17e1..8bae40a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -16,83 +16,10 @@ #include #include -#ifdef CONFIG_ADDRESS_MASKING -/* - * Mask out tag bits from the address. - * - * Magic with the 'sign' allows to untag userspace pointer without any branches - * while leaving kernel addresses intact. - */ -static inline unsigned long __untagged_addr(unsigned long addr) -{ - long sign; - - /* - * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation - * in alternative instructions. The relocation gets wrong when gets - * copied to the target place. - */ - asm (ALTERNATIVE("", - "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */ - "or %%gs:tlbstate_untag_mask, %[sign]\n\t" - "and %[sign], %[addr]\n\t", X86_FEATURE_LAM) - : [addr] "+r" (addr), [sign] "=r" (sign) - : "m" (tlbstate_untag_mask), "[sign]" (addr)); - - return addr; -} - -#define untagged_addr(addr) ({ \ - unsigned long __addr = (__force unsigned long)(addr); \ - (__force __typeof__(addr))__untagged_addr(__addr); \ -}) - -static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, - unsigned long addr) -{ - long sign = addr >> 63; - - mmap_assert_locked(mm); - addr &= (mm)->context.untag_mask | sign; - - return addr; -} - -#define untagged_addr_remote(mm, addr) ({ \ - unsigned long __addr = (__force unsigned long)(addr); \ - (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ -}) - +#ifdef CONFIG_X86_32 +# include #else -#define untagged_addr(addr) (addr) -#endif - -#ifdef CONFIG_X86_64 -/* - * On x86-64, we may have tag bits in the user pointer. Rather than - * mask them off, just change the rules for __access_ok(). - * - * Make the rule be that 'ptr+size' must not overflow, and must not - * have the high bit set. Compilers generally understand about - * unsigned overflow and the CF bit and generate reasonable code for - * this. Although it looks like the combination confuses at least - * clang (and instead of just doing an "add" followed by a test of - * SF and CF, you'll see that unnecessary comparison). - * - * For the common case of small sizes that can be checked at compile - * time, don't even bother with the addition, and just check that the - * base pointer is ok. - */ -static inline bool __access_ok(const void __user *ptr, unsigned long size) -{ - if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { - return (long)ptr >= 0; - } else { - unsigned long sum = size + (unsigned long)ptr; - return (long) sum >= 0 && sum >= (unsigned long)ptr; - } -} -#define __access_ok __access_ok +# include #endif #include @@ -583,14 +510,6 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 -#ifdef CONFIG_X86_32 -unsigned long __must_check clear_user(void __user *mem, unsigned long len); -unsigned long __must_check __clear_user(void __user *mem, unsigned long len); -# include -#else -# include -#endif - /* * The "unsafe" user accesses aren't really "unsafe", but the naming * is a big fat warning: you have to not only do the access_ok() diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 388a406..40379a1 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -33,4 +33,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, return __copy_from_user_ll_nocache_nozero(to, from, n); } +unsigned long __must_check clear_user(void __user *mem, unsigned long len); +unsigned long __must_check __clear_user(void __user *mem, unsigned long len); + #endif /* _ASM_X86_UACCESS_32_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index c972bd2..20411e6 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -12,6 +12,81 @@ #include #include +#ifdef CONFIG_ADDRESS_MASKING +/* + * Mask out tag bits from the address. + * + * Magic with the 'sign' allows to untag userspace pointer without any branches + * while leaving kernel addresses intact. + */ +static inline unsigned long __untagged_addr(unsigned long addr) +{ + long sign; + + /* + * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation + * in alternative instructions. The relocation gets wrong when gets + * copied to the target place. + */ + asm (ALTERNATIVE("", + "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */ + "or %%gs:tlbstate_untag_mask, %[sign]\n\t" + "and %[sign], %[addr]\n\t", X86_FEATURE_LAM) + : [addr] "+r" (addr), [sign] "=r" (sign) + : "m" (tlbstate_untag_mask), "[sign]" (addr)); + + return addr; +} + +#define untagged_addr(addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr(__addr); \ +}) + +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, + unsigned long addr) +{ + long sign = addr >> 63; + + mmap_assert_locked(mm); + addr &= (mm)->context.untag_mask | sign; + + return addr; +} + +#define untagged_addr_remote(mm, addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ +}) + +#endif + +/* + * On x86-64, we may have tag bits in the user pointer. Rather than + * mask them off, just change the rules for __access_ok(). + * + * Make the rule be that 'ptr+size' must not overflow, and must not + * have the high bit set. Compilers generally understand about + * unsigned overflow and the CF bit and generate reasonable code for + * this. Although it looks like the combination confuses at least + * clang (and instead of just doing an "add" followed by a test of + * SF and CF, you'll see that unnecessary comparison). + * + * For the common case of small sizes that can be checked at compile + * time, don't even bother with the addition, and just check that the + * base pointer is ok. + */ +static inline bool __access_ok(const void __user *ptr, unsigned long size) +{ + if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { + return (long)ptr >= 0; + } else { + unsigned long sum = size + (unsigned long)ptr; + return (long) sum >= 0 && sum >= (unsigned long)ptr; + } +} +#define __access_ok __access_ok + /* * Copy To/From Userspace */ @@ -106,7 +181,7 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr static __always_inline unsigned long clear_user(void __user *to, unsigned long n) { - if (access_ok(to, n)) + if (__access_ok(to, n)) return __clear_user(to, n); return n; } -- 2.7.4