RISC-V: KVM: Add G-stage ioremap() and iounmap() functions
authorAnup Patel <apatel@ventanamicro.com>
Fri, 29 Jul 2022 11:45:06 +0000 (17:15 +0530)
committerAnup Patel <anup@brainfault.org>
Fri, 29 Jul 2022 11:45:06 +0000 (17:15 +0530)
The in-kernel AIA IMSIC support requires on-demand mapping / unmapping
of Guest IMSIC address to Host IMSIC guest files. To help achieve this,
we add kvm_riscv_stage2_ioremap() and kvm_riscv_stage2_iounmap() functions.
These new functions for updating G-stage page table mappings will be called
in atomic context so we have special "in_atomic" parameter for this purpose.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/mmu.c

index 59a0cf2..60c517e 100644 (file)
@@ -284,6 +284,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask);
 
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+                            phys_addr_t hpa, unsigned long size,
+                            bool writable, bool in_atomic);
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
+                             unsigned long size);
 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
                         struct kvm_memory_slot *memslot,
                         gpa_t gpa, unsigned long hva, bool is_write);
index b75d4e2..f7862ca 100644 (file)
@@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
        kvm_flush_remote_tlbs(kvm);
 }
 
-static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
-                         unsigned long size, bool writable)
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+                            phys_addr_t hpa, unsigned long size,
+                            bool writable, bool in_atomic)
 {
        pte_t pte;
        int ret = 0;
@@ -353,6 +354,7 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
        struct kvm_mmu_memory_cache pcache;
 
        memset(&pcache, 0, sizeof(pcache));
+       pcache.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0;
        pcache.gfp_zero = __GFP_ZERO;
 
        end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
@@ -382,6 +384,13 @@ out:
        return ret;
 }
 
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
+{
+       spin_lock(&kvm->mmu_lock);
+       gstage_unmap_range(kvm, gpa, size, false);
+       spin_unlock(&kvm->mmu_lock);
+}
+
 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
                                             struct kvm_memory_slot *slot,
                                             gfn_t gfn_offset,
@@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                goto out;
                        }
 
-                       ret = gstage_ioremap(kvm, gpa, pa,
-                                            vm_end - vm_start, writable);
+                       ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
+                                                      vm_end - vm_start,
+                                                      writable, false);
                        if (ret)
                                break;
                }