KVM: arm64: Protect the .hyp sections from the host
authorQuentin Perret <qperret@google.com>
Fri, 19 Mar 2021 10:01:46 +0000 (10:01 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 19 Mar 2021 12:02:19 +0000 (12:02 +0000)
When KVM runs in nVHE protected mode, use the host stage 2 to unmap the
hypervisor sections by marking them as owned by the hypervisor itself.
The long-term goal is to ensure the EL2 code can remain robust
regardless of the host's state, so this starts by making sure the host
cannot e.g. write to the .hyp sections directly.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-39-qperret@google.com
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c

index 4149283b4cd1e1b5f4aa5669e9cf905574ac756a..cf8df032b9c30e4285df23ec4c8078c8f465857e 100644 (file)
@@ -62,6 +62,7 @@
 #define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping    17
 #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector            18
 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize             19
+#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp                  20
 
 #ifndef __ASSEMBLY__
 
index d237c378e6fbe93c3e3bb728d637134063bd3498..368159021dee4df7e8c031621a3efa0bd736376d 100644 (file)
@@ -1899,11 +1899,57 @@ void _kvm_host_prot_finalize(void *discard)
        WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
 }
 
+static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
+{
+       return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
+}
+
+#define pkvm_mark_hyp_section(__section)               \
+       pkvm_mark_hyp(__pa_symbol(__section##_start),   \
+                       __pa_symbol(__section##_end))
+
 static int finalize_hyp_mode(void)
 {
+       int cpu, ret;
+
        if (!is_protected_kvm_enabled())
                return 0;
 
+       ret = pkvm_mark_hyp_section(__hyp_idmap_text);
+       if (ret)
+               return ret;
+
+       ret = pkvm_mark_hyp_section(__hyp_text);
+       if (ret)
+               return ret;
+
+       ret = pkvm_mark_hyp_section(__hyp_rodata);
+       if (ret)
+               return ret;
+
+       ret = pkvm_mark_hyp_section(__hyp_bss);
+       if (ret)
+               return ret;
+
+       ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
+       if (ret)
+               return ret;
+
+       for_each_possible_cpu(cpu) {
+               phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
+               phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
+
+               ret = pkvm_mark_hyp(start, end);
+               if (ret)
+                       return ret;
+
+               start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
+               end = start + PAGE_SIZE;
+               ret = pkvm_mark_hyp(start, end);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * Flip the static key upfront as that may no longer be possible
         * once the host stage 2 is installed.
index d293cb328cc4892758c06e538d9d5409e9385b4a..42d81ec739facdb9ef683779a74c1a1f2732d365 100644 (file)
@@ -21,6 +21,8 @@ struct host_kvm {
 extern struct host_kvm host_kvm;
 
 int __pkvm_prot_finalize(void);
+int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
+
 int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 
index 69163f2cbb6347245db517d12139b98415d5c513..b4eaa7ef13e04d87ad073e0d7fcca0f9cac92cd6 100644 (file)
@@ -156,6 +156,14 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
 {
        cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
 }
+
+static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
+       DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
+
+       cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
+}
 typedef void (*hcall_t)(struct kvm_cpu_context *);
 
 #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -180,6 +188,7 @@ static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__pkvm_create_mappings),
        HANDLE_FUNC(__pkvm_create_private_mapping),
        HANDLE_FUNC(__pkvm_prot_finalize),
+       HANDLE_FUNC(__pkvm_mark_hyp),
 };
 
 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
index 77b48c47344d717fc91c485a54cd9300fa32f076..808e2471091bf6f40381868aa99714be336ee3e1 100644 (file)
@@ -27,6 +27,8 @@ struct host_kvm host_kvm;
 struct hyp_pool host_s2_mem;
 struct hyp_pool host_s2_dev;
 
+static const u8 pkvm_hyp_id = 1;
+
 static void *host_s2_zalloc_pages_exact(size_t size)
 {
        return hyp_alloc_pages(&host_s2_mem, get_order(size));
@@ -182,6 +184,18 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
        return false;
 }
 
+static bool range_is_memory(u64 start, u64 end)
+{
+       struct kvm_mem_range r1, r2;
+
+       if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+               return false;
+       if (r1.start != r2.start)
+               return false;
+
+       return true;
+}
+
 static inline int __host_stage2_idmap(u64 start, u64 end,
                                      enum kvm_pgtable_prot prot,
                                      struct hyp_pool *pool)
@@ -229,6 +243,25 @@ unlock:
        return ret;
 }
 
+int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
+{
+       int ret;
+
+       /*
+        * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
+        * non-persistent, so don't allow changing page ownership in MMIO range.
+        */
+       if (!range_is_memory(start, end))
+               return -EINVAL;
+
+       hyp_spin_lock(&host_kvm.lock);
+       ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
+                                          &host_s2_mem, pkvm_hyp_id);
+       hyp_spin_unlock(&host_kvm.lock);
+
+       return ret != -EAGAIN ? ret : 0;
+}
+
 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
 {
        struct kvm_vcpu_fault_info fault;