if (vcpu->arch.sysregs_loaded_on_cpu)
return read_sysreg_el1(SYS_SPSR);
else
- return vcpu->arch.ctxt.spsr_el1;
+ return __vcpu_sys_reg(vcpu, SPSR_EL1);
}
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
if (vcpu->arch.sysregs_loaded_on_cpu)
write_sysreg_el1(v, SYS_SPSR);
else
- vcpu->arch.ctxt.spsr_el1 = v;
+ __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
}
/*
ELR_EL1,
SP_EL1,
+ SPSR_EL1,
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */
- u64 spsr_el1; /* aka spsr_svc */
u64 spsr_abt;
u64 spsr_und;
u64 spsr_irq;
return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
- return &vcpu->arch.ctxt.spsr_el1;
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
return &vcpu->arch.ctxt.spsr_abt;
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
- ctxt->spsr_el1 = read_sysreg_el1(SYS_SPSR);
+ ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
}
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1);
write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
- write_sysreg_el1(ctxt->spsr_el1, SYS_SPSR);
+ write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
}
static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
if (!vcpu->arch.sysregs_loaded_on_cpu) {
switch (spsr_idx) {
case KVM_SPSR_SVC:
- return vcpu->arch.ctxt.spsr_el1;
+ return __vcpu_sys_reg(vcpu, SPSR_EL1);
case KVM_SPSR_ABT:
return vcpu->arch.ctxt.spsr_abt;
case KVM_SPSR_UND:
if (!vcpu->arch.sysregs_loaded_on_cpu) {
switch (spsr_idx) {
case KVM_SPSR_SVC:
- vcpu->arch.ctxt.spsr_el1 = v;
+ __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
break;
case KVM_SPSR_ABT:
vcpu->arch.ctxt.spsr_abt = v;