From: Marc Zyngier Date: Thu, 30 Jun 2022 16:04:52 +0000 (+0100) Subject: arm64: Rename the VHE switch to "finalise_el2" X-Git-Tag: v6.1-rc5~793^2~1^2~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7ddb0c3df7881206dcd8339c8dabf0318a781f91;p=platform%2Fkernel%2Flinux-starfive.git arm64: Rename the VHE switch to "finalise_el2" as we are about to perform a lot more in 'mutate_to_vhe' than we currently do, this function really becomes the point where we finalise the basic EL2 configuration. Reflect this into the code by renaming a bunch of things: - HVC_VHE_RESTART -> HVC_FINALISE_EL2 - switch_to_vhe --> finalise_el2 - mutate_to_vhe -> __finalise_el2 No functional changes. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220630160500.1536744-2-maz@kernel.org Signed-off-by: Will Deacon --- diff --git a/Documentation/virt/kvm/arm/hyp-abi.rst b/Documentation/virt/kvm/arm/hyp-abi.rst index 4d43fbc..412b276 100644 --- a/Documentation/virt/kvm/arm/hyp-abi.rst +++ b/Documentation/virt/kvm/arm/hyp-abi.rst @@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h): * :: - x0 = HVC_VHE_RESTART (arm64 only) + x0 = HVC_FINALISE_EL2 (arm64 only) - Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling - the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU - being off, and VHE not being disabled by any other means (command line - option, for example). + Finish configuring EL2 depending on the command-line options, + including an attempt to upgrade the kernel's exception level from + EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU + supporting VHE, the EL2 MMU being off, and VHE not being disabled by + any other means (command line option, for example). Any other value of r0/x0 triggers a hypervisor-specific handling, which is not documented here. diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 0e80db4..dec6eee 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -36,9 +36,9 @@ #define HVC_RESET_VECTORS 2 /* - * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible + * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible */ -#define HVC_VHE_RESTART 3 +#define HVC_FINALISE_EL2 3 /* Max number of HYP stub hypercalls */ #define HVC_STUB_HCALL_NR 4 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ae0a9e4..6feac4e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -459,7 +459,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) mov x0, x22 // pass FDT address in x0 bl init_feature_override // Parse cpu feature overrides mov x0, x20 - bl switch_to_vhe // Prefer VHE if possible + bl finalise_el2 // Prefer VHE if possible ldp x29, x30, [sp], #16 bl start_kernel ASM_BUG() @@ -542,7 +542,7 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) eret __cpu_stick_to_vhe: - mov x0, #HVC_VHE_RESTART + mov x0, #HVC_FINALISE_EL2 hvc #0 mov x0, #BOOT_CPU_MODE_EL2 ret @@ -592,7 +592,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) * Common entry point for secondary CPUs. */ mov x20, x0 // preserve boot mode - bl switch_to_vhe + bl finalise_el2 bl __cpu_secondary_check52bitva #if VA_BITS > 48 ldr_l x0, vabits_actual diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 5bafb53..571286e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -51,8 +51,8 @@ SYM_CODE_START_LOCAL(elx_sync) msr vbar_el2, x1 b 9f -1: cmp x0, #HVC_VHE_RESTART - b.eq mutate_to_vhe +1: cmp x0, #HVC_FINALISE_EL2 + b.eq __finalise_el2 2: cmp x0, #HVC_SOFT_RESTART b.ne 3f @@ -73,8 +73,8 @@ SYM_CODE_START_LOCAL(elx_sync) eret SYM_CODE_END(elx_sync) -// nVHE? No way! Give me the real thing! -SYM_CODE_START_LOCAL(mutate_to_vhe) +SYM_CODE_START_LOCAL(__finalise_el2) + // nVHE? No way! Give me the real thing! // Sanity check: MMU *must* be off mrs x1, sctlr_el2 tbnz x1, #0, 1f @@ -140,10 +140,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe) msr spsr_el1, x0 b enter_vhe -SYM_CODE_END(mutate_to_vhe) +SYM_CODE_END(__finalise_el2) // At the point where we reach enter_vhe(), we run with - // the MMU off (which is enforced by mutate_to_vhe()). + // the MMU off (which is enforced by __finalise_el2()). // We thus need to be in the idmap, or everything will // explode when enabling the MMU. @@ -222,11 +222,11 @@ SYM_FUNC_START(__hyp_reset_vectors) SYM_FUNC_END(__hyp_reset_vectors) /* - * Entry point to switch to VHE if deemed capable + * Entry point to finalise EL2 and switch to VHE if deemed capable * * w0: boot mode, as returned by init_kernel_el() */ -SYM_FUNC_START(switch_to_vhe) +SYM_FUNC_START(finalise_el2) // Need to have booted at EL2 cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -236,9 +236,8 @@ SYM_FUNC_START(switch_to_vhe) cmp x0, #CurrentEL_EL1 b.ne 1f - // Turn the world upside down - mov x0, #HVC_VHE_RESTART + mov x0, #HVC_FINALISE_EL2 hvc #0 1: ret -SYM_FUNC_END(switch_to_vhe) +SYM_FUNC_END(finalise_el2) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index e36b09d..617f78a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) bl init_kernel_el - bl switch_to_vhe + bl finalise_el2 bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir