return kimage_vaddr - KIMAGE_VADDR;
}
-/* the actual size of a user virtual address */
-extern u64 vabits_user;
-
/*
* Allow all memory at the discovery stage. We will clip it later.
*/
* The EL0 pointer bits used by a pointer authentication code.
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
*/
-#define ptrauth_user_pac_mask() GENMASK(54, vabits_user)
+#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
/* Only valid for EL0 TTBR0 instruction pointers */
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
*/
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
-#define TASK_SIZE_64 (UL(1) << vabits_user)
+#define TASK_SIZE_64 (UL(1) << vabits_actual)
#ifdef CONFIG_COMPAT
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
#endif
mov x5, #VA_BITS_MIN
1:
- adr_l x6, vabits_user
- str x5, [x6]
- dmb sy
- dc ivac, x6 // Invalidate potentially stale cache line
-
adr_l x6, vabits_actual
str x5, [x6]
dmb sy
ENTRY(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52
- ldr_l x0, vabits_user
+ ldr_l x0, vabits_actual
cmp x0, #52
b.ne 2f
pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- mm == &init_mm ? vabits_actual : (int)vabits_user,
- (unsigned long)virt_to_phys(mm->pgd));
+ vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 vabits_user __ro_after_init;
-EXPORT_SYMBOL(vabits_user);
u64 __section(".mmuoff.data.write") vabits_actual;
EXPORT_SYMBOL(vabits_actual);
tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
- ldr_l x9, vabits_user
+ ldr_l x9, vabits_actual
sub x9, xzr, x9
add x9, x9, #64
tcr_set_t1sz x10, x9