KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
authorFuad Tabba <tabba@google.com>
Tue, 4 Apr 2023 15:23:21 +0000 (16:23 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 4 Apr 2023 15:52:06 +0000 (15:52 +0000)
The existing pKVM code attempts to advertise CSV2/3 using values
initialized to 0, but never set. To advertise CSV2/3 to protected
guests, pass the CSV2/3 values to hyp when initializing hyp's
view of guests' ID_AA64PFR0_EL1.

Similar to non-protected KVM, these are system-wide, rather than
per cpu, for simplicity.

Fixes: 6c30bfb18d0b ("KVM: arm64: Add handlers for protected VM System Registers")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20230404152321.413064-1-tabba@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/sys_regs.c

index 3bd732e..e850aec 100644 (file)
@@ -1889,9 +1889,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
        return ret;
 }
 
+static u64 get_hyp_id_aa64pfr0_el1(void)
+{
+       /*
+        * Track whether the system isn't affected by spectre/meltdown in the
+        * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
+        * Although this is per-CPU, we make it global for simplicity, e.g., not
+        * to have to worry about vcpu migration.
+        *
+        * Unlike for non-protected VMs, userspace cannot override this for
+        * protected VMs.
+        */
+       u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
+                ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
+
+       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+                         arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
+       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+                         arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
+
+       return val;
+}
+
 static void kvm_hyp_init_symbols(void)
 {
-       kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+       kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
        kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
        kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
        kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
index 07edfc7..37440e1 100644 (file)
  * Allow for protected VMs:
  * - Floating-point and Advanced SIMD
  * - Data Independent Timing
+ * - Spectre/Meltdown Mitigation
  */
 #define PVM_ID_AA64PFR0_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
        ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
        )
 
 /*
index 08d2b00..edd969a 100644 (file)
@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
 
 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
 {
-       const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
        u64 set_mask = 0;
        u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
 
        set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
                PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
 
-       /* Spectre and Meltdown mitigation in KVM */
-       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
-                              (u64)kvm->arch.pfr0_csv2);
-       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
-                              (u64)kvm->arch.pfr0_csv3);
-
        return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
 }