mtmsrd r0,1
.endm
+#define VCPU_LOAD_NVGPRS(vcpu) \
+ ld r14, VCPU_GPR(r14)(vcpu); \
+ ld r15, VCPU_GPR(r15)(vcpu); \
+ ld r16, VCPU_GPR(r16)(vcpu); \
+ ld r17, VCPU_GPR(r17)(vcpu); \
+ ld r18, VCPU_GPR(r18)(vcpu); \
+ ld r19, VCPU_GPR(r19)(vcpu); \
+ ld r20, VCPU_GPR(r20)(vcpu); \
+ ld r21, VCPU_GPR(r21)(vcpu); \
+ ld r22, VCPU_GPR(r22)(vcpu); \
+ ld r23, VCPU_GPR(r23)(vcpu); \
+ ld r24, VCPU_GPR(r24)(vcpu); \
+ ld r25, VCPU_GPR(r25)(vcpu); \
+ ld r26, VCPU_GPR(r26)(vcpu); \
+ ld r27, VCPU_GPR(r27)(vcpu); \
+ ld r28, VCPU_GPR(r28)(vcpu); \
+ ld r29, VCPU_GPR(r29)(vcpu); \
+ ld r30, VCPU_GPR(r30)(vcpu); \
+ ld r31, VCPU_GPR(r31)(vcpu); \
+
/*****************************************************************************
* *
* Guest entry / exit code that is in kernel module memory (highmem) *
SAVE_NVGPRS(r1)
/* Save LR */
- mflr r14
- std r14, _LINK(r1)
+ std r0, _LINK(r1)
+
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
-/* XXX optimize non-volatile loading away */
kvm_start_lightweight:
+ ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
+ ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
+
DISABLE_INTERRUPTS
/* Save R1/R2 in the PACA */
ld r3, VCPU_HIGHMEM_HANDLER(r4)
std r3, PACASAVEDMSR(r13)
- /* Load non-volatile guest state from the vcpu */
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
-
- ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
- ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
-
ld r3, VCPU_TRAMPOLINE_ENTER(r4)
mtsrr0 r3
no_dcbz32_off:
- /* XXX maybe skip on lightweight? */
std r14, VCPU_GPR(r14)(r12)
std r15, VCPU_GPR(r15)(r12)
std r16, VCPU_GPR(r16)(r12)
std r30, VCPU_GPR(r30)(r12)
std r31, VCPU_GPR(r31)(r12)
- /* Restore non-volatile host registers (r14 - r31) */
- REST_NVGPRS(r1)
-
/* Save guest PC (R10) */
std r10, VCPU_PC(r12)
/* Jump back to lightweight entry if we're supposed to */
/* go back into the guest */
+
+ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r3
+
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
bl KVMPPC_HANDLE_EXIT
-#if 0 /* XXX get lightweight exits back */
+ /* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ beq kvm_loop_lightweight
- /* put VCPU and KVM_RUN back into place and roll again! */
- REST_2GPRS(3, r1)
- b kvm_start_lightweight
+ cmpwi r3, RESUME_GUEST_NV
+ beq kvm_loop_heavyweight
-kvm_exit_heavyweight:
- /* Restore non-volatile host registers */
- ld r14, _LINK(r1)
- mtlr r14
- REST_NVGPRS(r1)
+kvm_exit_loop:
- addi r1, r1, SWITCH_FRAME_SIZE
-#else
ld r4, _LINK(r1)
mtlr r4
- cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ blr
+
+kvm_loop_heavyweight:
+ ld r4, _LINK(r1)
+ std r4, (16 + SWITCH_FRAME_SIZE)(r1)
+
+ /* Load vcpu and cpu_run */
REST_2GPRS(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
- b kvm_start_entry
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
-kvm_exit_heavyweight:
+kvm_loop_lightweight:
- addi r1, r1, SWITCH_FRAME_SIZE
-#endif
+ /* We'll need the vcpu pointer */
+ REST_GPR(4, r1)
+
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
- blr