/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/* Process MMIO value returned from user-space */
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
if (ret) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return ret;
}
}
if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
if (ret) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return ret;
}
}
if (run->immediate_exit) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return -EINTR;
}
*/
vcpu->mode = IN_GUEST_MODE;
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
smp_mb__after_srcu_read_unlock();
/*
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
continue;
}
preempt_enable();
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
}
vcpu_put(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
return ret;
}
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_halt(vcpu);
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}