arm64: mm: Remove vabits_user
authorSteve Capper <steve.capper@arm.com>
Wed, 7 Aug 2019 15:55:23 +0000 (16:55 +0100)
committerWill Deacon <will@kernel.org>
Fri, 9 Aug 2019 10:17:27 +0000 (11:17 +0100)
Previous patches have enabled 52-bit kernel + user VAs and there is no
longer any scenario where user VA != kernel VA size.

This patch removes the, now redundant, vabits_user variable and replaces
usage with vabits_actual where appropriate.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pointer_auth.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/head.S
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S

index d911d0573460463d55fc73afe23d0c557127f4c0..ecc945ba8607e1d1925bb7b54cece84d7cd22524 100644 (file)
@@ -194,9 +194,6 @@ static inline unsigned long kaslr_offset(void)
        return kimage_vaddr - KIMAGE_VADDR;
 }
 
-/* the actual size of a user virtual address */
-extern u64                     vabits_user;
-
 /*
  * Allow all memory at the discovery stage. We will clip it later.
  */
index d328540cb85edc5f272190e3d36481b4c1987694..7a24bad1a58b89a38390eabf909515082a3b9aaa 100644 (file)
@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
  * The EL0 pointer bits used by a pointer authentication code.
  * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
  */
-#define ptrauth_user_pac_mask()        GENMASK(54, vabits_user)
+#define ptrauth_user_pac_mask()        GENMASK(54, vabits_actual)
 
 /* Only valid for EL0 TTBR0 instruction pointers */
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
index 0e1f2770192a83d200d6e702b434b98a4f1cae3b..e4c93945e4770dd7c9fabe001186dc043bfa953c 100644 (file)
@@ -43,7 +43,7 @@
  */
 
 #define DEFAULT_MAP_WINDOW_64  (UL(1) << VA_BITS_MIN)
-#define TASK_SIZE_64           (UL(1) << vabits_user)
+#define TASK_SIZE_64           (UL(1) << vabits_actual)
 
 #ifdef CONFIG_COMPAT
 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
index c8446f8c81f5c32c73aad17888a1a68dd28441dd..949b001a73bb7d2f5d2afd89e8a8938c039cac42 100644 (file)
@@ -316,11 +316,6 @@ __create_page_tables:
 #endif
        mov     x5, #VA_BITS_MIN
 1:
-       adr_l   x6, vabits_user
-       str     x5, [x6]
-       dmb     sy
-       dc      ivac, x6                // Invalidate potentially stale cache line
-
        adr_l   x6, vabits_actual
        str     x5, [x6]
        dmb     sy
@@ -795,7 +790,7 @@ ENDPROC(__enable_mmu)
 
 ENTRY(__cpu_secondary_check52bitva)
 #ifdef CONFIG_ARM64_VA_BITS_52
-       ldr_l   x0, vabits_user
+       ldr_l   x0, vabits_actual
        cmp     x0, #52
        b.ne    2f
 
index 6b195871769a00316fc713df6c541d2672c544fc..75eff57bd9efc6813c8852ab1f867a241f91a6b1 100644 (file)
@@ -140,8 +140,7 @@ static void show_pte(unsigned long addr)
 
        pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-                mm == &init_mm ? vabits_actual : (int)vabits_user,
-                (unsigned long)virt_to_phys(mm->pgd));
+                vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
        pgdp = pgd_offset(mm, addr);
        pgd = READ_ONCE(*pgdp);
        pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
index 07b30e6d17f8097f8c1f6f6ed89fc1b4a3f13ead..0c8f7e55f8599329e13c431605b12f967828066b 100644 (file)
@@ -40,8 +40,6 @@
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 vabits_user __ro_after_init;
-EXPORT_SYMBOL(vabits_user);
 
 u64 __section(".mmuoff.data.write") vabits_actual;
 EXPORT_SYMBOL(vabits_actual);
index 8b021c5c0884075c378f668173f4b65e872c095c..391f9cabfe600a4668d4ee4acf1cfc05d79aa39f 100644 (file)
@@ -439,7 +439,7 @@ ENTRY(__cpu_setup)
        tcr_clear_errata_bits x10, x9, x5
 
 #ifdef CONFIG_ARM64_VA_BITS_52
-       ldr_l           x9, vabits_user
+       ldr_l           x9, vabits_actual
        sub             x9, xzr, x9
        add             x9, x9, #64
        tcr_set_t1sz    x10, x9