arm64/cpufeature: Validate hypervisor capabilities during CPU hotplug
authorAnshuman Khandual <anshuman.khandual@arm.com>
Tue, 12 May 2020 01:57:27 +0000 (07:27 +0530)
committerWill Deacon <will@kernel.org>
Wed, 20 May 2020 14:59:23 +0000 (15:59 +0100)
This validates hypervisor capabilities like VMID width, IPA range for any
hot plug CPU against system finalized values. KVM's view of the IPA space
is used while allowing a given CPU to come up. While here, it factors out
get_vmid_bits() for general use.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: kvmarm@lists.cs.columbia.edu
Cc: linux-kernel@vger.kernel.org
Suggested-by: Suzuki Poulose <suzuki.poulose@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/1589248647-22925-1-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kvm/reset.c

index f5c4672..928814d 100644 (file)
@@ -752,6 +752,24 @@ static inline bool cpu_has_hw_af(void)
 extern bool cpu_has_amu_feat(int cpu);
 #endif
 
+static inline unsigned int get_vmid_bits(u64 mmfr1)
+{
+       int vmid_bits;
+
+       vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
+                                               ID_AA64MMFR1_VMIDBITS_SHIFT);
+       if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
+               return 16;
+
+       /*
+        * Return the default here even if any reserved
+        * value is fetched from the system register.
+        */
+       return 8;
+}
+
+u32 get_kvm_ipa_limit(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 30b0e8d..a7137e1 100644 (file)
@@ -416,7 +416,7 @@ static inline unsigned int kvm_get_vmid_bits(void)
 {
        int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 
-       return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+       return get_vmid_bits(reg);
 }
 
 /*
index 9b05843..be8a634 100644 (file)
@@ -2315,6 +2315,35 @@ static void verify_sve_features(void)
        /* Add checks on other ZCR bits here if necessary */
 }
 
+static void verify_hyp_capabilities(void)
+{
+       u64 safe_mmfr1, mmfr0, mmfr1;
+       int parange, ipa_max;
+       unsigned int safe_vmid_bits, vmid_bits;
+
+       if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+               return;
+
+       safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+       mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+       mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+
+       /* Verify VMID bits */
+       safe_vmid_bits = get_vmid_bits(safe_mmfr1);
+       vmid_bits = get_vmid_bits(mmfr1);
+       if (vmid_bits < safe_vmid_bits) {
+               pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
+               cpu_die_early();
+       }
+
+       /* Verify IPA range */
+       parange = mmfr0 & 0x7;
+       ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+       if (ipa_max < get_kvm_ipa_limit()) {
+               pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
+               cpu_die_early();
+       }
+}
 
 /*
  * Run through the enabled system capabilities and enable() it on this CPU.
@@ -2340,6 +2369,9 @@ static void verify_local_cpu_capabilities(void)
 
        if (system_supports_sve())
                verify_sve_features();
+
+       if (is_hyp_mode_available())
+               verify_hyp_capabilities();
 }
 
 void check_local_cpu_capabilities(void)
index 102e5c4..d4eb661 100644 (file)
@@ -332,6 +332,11 @@ out:
        return ret;
 }
 
+u32 get_kvm_ipa_limit(void)
+{
+       return kvm_ipa_limit;
+}
+
 void kvm_set_ipa_limit(void)
 {
        unsigned int ipa_max, pa_max, va_max, parange;