1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/kvm_vcpu_fp.h>
19 #include <asm/kvm_vcpu_insn.h>
20 #include <asm/kvm_vcpu_sbi.h>
21 #include <asm/kvm_vcpu_timer.h>
22 #include <asm/kvm_vcpu_pmu.h>
24 #define KVM_MAX_VCPUS 1024
26 #define KVM_HALT_POLL_NS_DEFAULT 500000
28 #define KVM_VCPU_MAX_FEATURES 0
30 #define KVM_REQ_SLEEP \
31 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
32 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
33 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
34 #define KVM_REQ_FENCE_I \
35 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
36 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
37 #define KVM_REQ_HFENCE_VVMA_ALL \
38 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE \
40 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 enum kvm_riscv_hfence_type {
43 KVM_RISCV_HFENCE_UNKNOWN = 0,
44 KVM_RISCV_HFENCE_GVMA_VMID_GPA,
45 KVM_RISCV_HFENCE_VVMA_ASID_GVA,
46 KVM_RISCV_HFENCE_VVMA_ASID_ALL,
47 KVM_RISCV_HFENCE_VVMA_GVA,
50 struct kvm_riscv_hfence {
51 enum kvm_riscv_hfence_type type;
58 #define KVM_RISCV_VCPU_MAX_HFENCE 64
61 struct kvm_vm_stat_generic generic;
64 struct kvm_vcpu_stat {
65 struct kvm_vcpu_stat_generic generic;
76 struct kvm_arch_memory_slot {
81 * Writes to vmid_version and vmid happen with vmid_lock held
82 * whereas reads happen without any lock held.
84 unsigned long vmid_version;
92 /* G-stage page table */
97 struct kvm_guest_timer timer;
99 /* AIA Guest/VM context */
103 struct kvm_cpu_trap {
105 unsigned long scause;
108 unsigned long htinst;
111 struct kvm_cpu_context {
145 unsigned long sstatus;
146 unsigned long hstatus;
147 union __riscv_fp_state fp;
150 struct kvm_vcpu_csr {
151 unsigned long vsstatus;
153 unsigned long vstvec;
154 unsigned long vsscratch;
156 unsigned long vscause;
157 unsigned long vstval;
160 unsigned long scounteren;
163 struct kvm_vcpu_arch {
164 /* VCPU ran at least once */
165 bool ran_atleast_once;
167 /* Last Host CPU on which Guest VCPU exited */
170 /* ISA feature bits (similar to MISA) */
171 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
173 /* Vendor, Arch, and Implementation details */
174 unsigned long mvendorid;
175 unsigned long marchid;
176 unsigned long mimpid;
178 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
179 unsigned long host_sscratch;
180 unsigned long host_stvec;
181 unsigned long host_scounteren;
183 /* CPU context of Host */
184 struct kvm_cpu_context host_context;
186 /* CPU context of Guest VCPU */
187 struct kvm_cpu_context guest_context;
189 /* CPU CSR context of Guest VCPU */
190 struct kvm_vcpu_csr guest_csr;
192 /* CPU context upon Guest VCPU reset */
193 struct kvm_cpu_context guest_reset_context;
195 /* CPU CSR context upon Guest VCPU reset */
196 struct kvm_vcpu_csr guest_reset_csr;
201 * We have a lockless approach for tracking pending VCPU interrupts
202 * implemented using atomic bitops. The irqs_pending bitmap represent
203 * pending interrupts whereas irqs_pending_mask represent bits changed
204 * in irqs_pending. Our approach is modeled around multiple producer
205 * and single consumer problem where the consumer is the VCPU itself.
207 #define KVM_RISCV_VCPU_NR_IRQS 64
208 DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
209 DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
212 struct kvm_vcpu_timer timer;
214 /* HFENCE request queue */
215 spinlock_t hfence_lock;
216 unsigned long hfence_head;
217 unsigned long hfence_tail;
218 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
220 /* MMIO instruction details */
221 struct kvm_mmio_decode mmio_decode;
223 /* CSR instruction details */
224 struct kvm_csr_decode csr_decode;
227 struct kvm_vcpu_sbi_context sbi_context;
229 /* AIA VCPU context */
230 struct kvm_vcpu_aia aia_context;
232 /* Cache pages needed to program page tables with spinlock held */
233 struct kvm_mmu_memory_cache mmu_page_cache;
235 /* VCPU power-off state */
238 /* Don't run the VCPU (blocked) */
241 /* Performance monitoring context */
242 struct kvm_pmu pmu_context;
245 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
246 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
248 #define KVM_ARCH_WANT_MMU_NOTIFIER
250 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
252 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
253 gpa_t gpa, gpa_t gpsz,
254 unsigned long order);
255 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
256 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
257 unsigned long order);
258 void kvm_riscv_local_hfence_gvma_all(void);
259 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
263 unsigned long order);
264 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
266 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
267 unsigned long gva, unsigned long gvsz,
268 unsigned long order);
269 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
271 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
273 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
274 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
275 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
276 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
278 void kvm_riscv_fence_i(struct kvm *kvm,
279 unsigned long hbase, unsigned long hmask);
280 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
281 unsigned long hbase, unsigned long hmask,
282 gpa_t gpa, gpa_t gpsz,
283 unsigned long order);
284 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
285 unsigned long hbase, unsigned long hmask);
286 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
287 unsigned long hbase, unsigned long hmask,
288 unsigned long gva, unsigned long gvsz,
289 unsigned long order, unsigned long asid);
290 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
291 unsigned long hbase, unsigned long hmask,
293 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
294 unsigned long hbase, unsigned long hmask,
295 unsigned long gva, unsigned long gvsz,
296 unsigned long order);
297 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
298 unsigned long hbase, unsigned long hmask);
300 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
301 phys_addr_t hpa, unsigned long size,
302 bool writable, bool in_atomic);
303 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
305 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
306 struct kvm_memory_slot *memslot,
307 gpa_t gpa, unsigned long hva, bool is_write);
308 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
309 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
310 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
311 void __init kvm_riscv_gstage_mode_detect(void);
312 unsigned long __init kvm_riscv_gstage_mode(void);
313 int kvm_riscv_gstage_gpa_bits(void);
315 void __init kvm_riscv_gstage_vmid_detect(void);
316 unsigned long kvm_riscv_gstage_vmid_bits(void);
317 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
318 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
319 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
321 void __kvm_riscv_unpriv_trap(void);
323 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
325 unsigned long guest_addr,
326 struct kvm_cpu_trap *trap);
327 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
328 struct kvm_cpu_trap *trap);
329 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
330 struct kvm_cpu_trap *trap);
332 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
334 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
335 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
336 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
337 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
338 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
339 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
340 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
342 #endif /* __RISCV_KVM_HOST_H__ */