Merge branch kvm-arm64/pkvm/fixed-features into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Mon, 18 Oct 2021 16:20:50 +0000 (17:20 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 18 Oct 2021 16:20:50 +0000 (17:20 +0100)
* kvm-arm64/pkvm/fixed-features: (22 commits)
  : .
  : Add the pKVM fixed feature that allows a bunch of exceptions
  : to either be forbidden or be easily handled at EL2.
  : .
  KVM: arm64: pkvm: Give priority to standard traps over pvm handling
  KVM: arm64: pkvm: Pass vpcu instead of kvm to kvm_get_exit_handler_array()
  KVM: arm64: pkvm: Move kvm_handle_pvm_restricted around
  KVM: arm64: pkvm: Consolidate include files
  KVM: arm64: pkvm: Preserve pending SError on exit from AArch32
  KVM: arm64: pkvm: Handle GICv3 traps as required
  KVM: arm64: pkvm: Drop sysregs that should never be routed to the host
  KVM: arm64: pkvm: Drop AArch32-specific registers
  KVM: arm64: pkvm: Make the ERR/ERX*_EL1 registers RAZ/WI
  KVM: arm64: pkvm: Use a single function to expose all id-regs
  KVM: arm64: Fix early exit ptrauth handling
  KVM: arm64: Handle protected guests at 32 bits
  KVM: arm64: Trap access to pVM restricted features
  KVM: arm64: Move sanitized copies of CPU features
  KVM: arm64: Initialize trap registers for protected VMs
  KVM: arm64: Add handlers for protected VM System Registers
  KVM: arm64: Simplify masking out MTE in feature id reg
  KVM: arm64: Add missing field descriptor for MDCR_EL2
  KVM: arm64: Pass struct kvm to per-EC handlers
  KVM: arm64: Move early handlers to per-EC handlers
  ...

Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/sys_regs.c

  
  #include <linux/mm.h>
  
 +enum __kvm_host_smccc_func {
 +      /* Hypercalls available only prior to pKVM finalisation */
 +      /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
 +      __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
 +      __KVM_HOST_SMCCC_FUNC___pkvm_init,
 +      __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
 +      __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
 +      __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
 +      __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
 +
 +      /* Hypercalls available after pKVM finalisation */
 +      __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
 +      __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
 +      __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
 +      __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
 +      __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
 +      __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
 +      __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
 +      __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
 +      __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
++      __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
 +};
 +
  #define DECLARE_KVM_VHE_SYM(sym)      extern char sym[]
  #define DECLARE_KVM_NVHE_SYM(sym)     extern char kvm_nvhe_sym(sym)[]
  
Simple merge
Simple merge
@@@ -183,10 -180,20 +191,11 @@@ static const hcall_t host_hcall[] = 
        HANDLE_FUNC(__kvm_tlb_flush_vmid),
        HANDLE_FUNC(__kvm_flush_cpu_context),
        HANDLE_FUNC(__kvm_timer_set_cntvoff),
 -      HANDLE_FUNC(__kvm_enable_ssbs),
 -      HANDLE_FUNC(__vgic_v3_get_gic_config),
        HANDLE_FUNC(__vgic_v3_read_vmcr),
        HANDLE_FUNC(__vgic_v3_write_vmcr),
 -      HANDLE_FUNC(__vgic_v3_init_lrs),
 -      HANDLE_FUNC(__kvm_get_mdcr_el2),
        HANDLE_FUNC(__vgic_v3_save_aprs),
        HANDLE_FUNC(__vgic_v3_restore_aprs),
 -      HANDLE_FUNC(__pkvm_init),
 -      HANDLE_FUNC(__pkvm_cpu_set_vector),
 -      HANDLE_FUNC(__pkvm_host_share_hyp),
 -      HANDLE_FUNC(__pkvm_create_private_mapping),
 -      HANDLE_FUNC(__pkvm_prot_finalize),
+       HANDLE_FUNC(__pkvm_vcpu_init_traps),
  };
  
  static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
Simple merge
@@@ -1080,21 -1075,10 +1080,15 @@@ static u64 read_id_reg(const struct kvm
                val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
                val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
 +              if (irqchip_in_kernel(vcpu->kvm) &&
 +                  vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
 +                      val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
 +                      val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
 +              }
                break;
        case SYS_ID_AA64PFR1_EL1:
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
-               if (kvm_has_mte(vcpu->kvm)) {
-                       u64 pfr, mte;
-                       pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
-                       mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
-                       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
-               }
+               if (!kvm_has_mte(vcpu->kvm))
+                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
                break;
        case SYS_ID_AA64ISAR1_EL1:
                if (!vcpu_has_ptrauth(vcpu))