const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
- void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
+ void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu,
bool write_fault);
extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run);
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu);
/* COP0 */
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
int r;
enum emulation_result er;
u32 rt;
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
unsigned int imme;
unsigned long curr_pc;
}
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
- u32 cause, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+ u32 cause, struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int r;
enum emulation_result er;
unsigned long curr_pc;
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
unsigned long curr_pc,
unsigned long addr,
- struct kvm_run *run,
struct kvm_vcpu *vcpu,
u32 cause)
{
/* no matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
+ kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT;
case KVM_MIPS_TLBINV:
/* invalid matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
+ kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT;
default:
break;
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
* guest's behalf.
*/
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
} else if (op_inst == Hit_Invalidate_I) {
/* Perform the icache synchronisation on the guest's behalf */
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
er = kvm_mips_guest_cache_op(protected_flush_icache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
}
enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
union mips_instruction inst;
switch (inst.r_format.opcode) {
case cop0_op:
- er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+ er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+ er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
break;
#else
case spec3_op:
case cache6_op:
++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run,
+ er = kvm_mips_emulate_cache(inst, opc, cause,
vcpu);
break;
default:
enum emulation_result kvm_mips_emulate_syscall(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
}
enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
* branch target), and pass the RI exception to the guest OS.
*/
vcpu->arch.pc = curr_pc;
- return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
}
-enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
static enum emulation_result kvm_mips_emulate_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
}
if (er == EMULATE_PRIV_FAIL)
- kvm_mips_emulate_exc(cause, opc, run, vcpu);
+ kvm_mips_emulate_exc(cause, opc, vcpu);
return er;
}
*/
enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu,
bool write_fault)
{
KVM_ENTRYHI_ASID));
if (index < 0) {
if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
} else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
exccode);
*/
if (!TLB_IS_VALID(*tlb, va)) {
if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+ er = kvm_mips_emulate_tlbinv_ld(cause, opc,
vcpu);
} else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+ er = kvm_mips_emulate_tlbinv_st(cause, opc,
vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
int r = -EINTR;
vcpu_load(vcpu);
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
- kvm_mips_complete_mmio_load(vcpu, run);
+ kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
}
- if (run->immediate_exit)
+ if (vcpu->run->immediate_exit)
goto out;
lose_fpu(1);
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(run, vcpu);
+ r = kvm_mips_callbacks->vcpu_run(vcpu);
trace_kvm_out(vcpu);
guest_exit_irqoff();
* end up causing an exception to be delivered to the Guest
* Kernel
*/
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ er = kvm_mips_check_privilege(cause, opc, vcpu);
if (er == EMULATE_PRIV_FAIL) {
goto skip_emul;
} else if (er == EMULATE_FAIL) {
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- kvm_mips_callbacks->vcpu_reenter(run, vcpu);
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
* Unusable/no FPU in guest:
* deliver guest COP1 Unusable Exception
*/
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
} else {
/* Restore FPU state */
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
} else {
- er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_inst(cause, opc, vcpu);
}
switch (er) {
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
case EMULATE_WAIT:
- run->exit_reason = KVM_EXIT_INTR;
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
ret = RESUME_HOST;
break;
return ret;
}
-static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Emulate the load */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Emulate load from MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else {
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
}
return RESUME_HOST;
}
-static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Emulate the store */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Emulate store to MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else {
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
}
return RESUME_HOST;
}
-static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
+static int kvm_mips_bad_access(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu, bool store)
{
if (store)
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
else
- return kvm_mips_bad_load(cause, opc, run, vcpu);
+ return kvm_mips_bad_load(cause, opc, vcpu);
}
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
* They would indicate stale host TLB entries.
*/
if (unlikely(index < 0)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
tlb = vcpu->arch.guest_tlb + index;
if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
* exception. Relay that on to the guest so it can handle it.
*/
if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
- kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+ kvm_mips_emulate_tlbmod(cause, opc, vcpu);
return RESUME_GUEST;
}
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
true))
/* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST;
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
/* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST;
} else {
/* host kernel addresses are all handled as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
}
}
* into the shadow host TLB
*/
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
+ er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
* not expect to ever get them
*/
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
/*
* With EVA we may get a TLB exception instead of an address
* error when the guest performs MMIO to KSeg1 addresses.
*/
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else {
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
store ? "ST" : "LD", cause, opc, badvaddr);
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- ret = kvm_mips_bad_store(cause, opc, run, vcpu);
+ ret = kvm_mips_bad_store(cause, opc, vcpu);
} else {
kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
int ret = RESUME_GUEST;
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
- ret = kvm_mips_bad_load(cause, opc, run, vcpu);
+ ret = kvm_mips_bad_load(cause, opc, vcpu);
} else {
kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_syscall(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+ er = kvm_mips_handle_ri(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
* No MSA in guest, or FPU enabled and not in FR=1 mode,
* guest reserved instruction exception
*/
- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
/* MSA disabled by guest, guest MSA disabled exception */
- er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
} else {
/* Restore MSA/FPU state */
kvm_own_msa(vcpu);
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
local_irq_enable();
}
-static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
{
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
check_mmu_context(mm);
}
-static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
- kvm_trap_emul_vcpu_reenter(run, vcpu);
+ kvm_trap_emul_vcpu_reenter(vcpu);
/*
* We use user accessors to access guest memory, but we don't want to
*/
kvm_mips_suspend_mm(cpu);
- r = vcpu->arch.vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
/* We may have migrated while handling guest exits */
cpu = smp_processor_id();
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
- struct kvm_run *run = vcpu->run;
union mips_instruction inst;
int rd, rt, sel;
int err;
switch (inst.r_format.opcode) {
case cop0_op:
- er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
case rdhwr_op:
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
-
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
}
/* Treat as MMIO */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
}
/* Treat as MMIO */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
kvm_vz_flush_shadow_all(kvm);
}
-static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int preserve_guest_tlb;
kvm_vz_vcpu_load_wired(vcpu);
}
-static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
- r = vcpu->arch.vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
kvm_vz_vcpu_save_wired(vcpu);