Merge tag 'powerpc-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[platform/kernel/linux-rpi.git] / arch / powerpc / kvm / book3s_hv.c
index 3b70b5f..36b295e 100644 (file)
@@ -477,7 +477,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
        for (r = 0; r < vcpu->arch.slb_max; ++r)
                pr_err("  ESID = %.16llx VSID = %.16llx\n",
                       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
-       pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
+       pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n",
               vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
               vcpu->arch.last_inst);
 }
@@ -1415,7 +1415,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
 
 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
 {
-       u32 last_inst;
+       ppc_inst_t last_inst;
 
        if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
                                        EMULATE_DONE) {
@@ -1426,12 +1426,13 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
                return RESUME_GUEST;
        }
 
-       if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
+       if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) {
                vcpu->run->exit_reason = KVM_EXIT_DEBUG;
                vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
                return RESUME_HOST;
        } else {
-               kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+               kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
+                               (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
                return RESUME_GUEST;
        }
 }
@@ -1479,9 +1480,11 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
        unsigned long arg;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_vcpu *tvcpu;
+       ppc_inst_t pinst;
 
-       if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
+       if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE)
                return RESUME_GUEST;
+       inst = ppc_inst_val(pinst);
        if (get_op(inst) != 31)
                return EMULATE_FAIL;
        rb = get_rb(inst);
@@ -1633,7 +1636,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                 * so that it knows that the machine check occurred.
                 */
                if (!vcpu->kvm->arch.fwnmi_enabled) {
-                       ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
+                       ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
+                                       (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
                        kvmppc_core_queue_machine_check(vcpu, flags);
                        r = RESUME_GUEST;
                        break;
@@ -1662,7 +1666,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                 * as a result of a hypervisor emulation interrupt
                 * (e40) getting turned into a 700 by BML RTAS.
                 */
-               flags = vcpu->arch.shregs.msr & 0x1f0000ull;
+               flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
+                       (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
                kvmppc_core_queue_program(vcpu, flags);
                r = RESUME_GUEST;
                break;
@@ -1743,6 +1748,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
 
                if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
                        kvmppc_core_queue_data_storage(vcpu,
+                               kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
                        r = RESUME_GUEST;
                        break;
@@ -1761,6 +1767,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                        r = RESUME_PAGE_FAULT;
                } else {
                        kvmppc_core_queue_data_storage(vcpu,
+                               kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
                                vcpu->arch.fault_dar, err);
                        r = RESUME_GUEST;
                }
@@ -1788,7 +1795,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
 
                if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
                        kvmppc_core_queue_inst_storage(vcpu,
-                               vcpu->arch.fault_dsisr);
+                               vcpu->arch.fault_dsisr |
+                               (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
                        r = RESUME_GUEST;
                        break;
                }
@@ -1805,7 +1813,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                } else if (err == -1) {
                        r = RESUME_PAGE_FAULT;
                } else {
-                       kvmppc_core_queue_inst_storage(vcpu, err);
+                       kvmppc_core_queue_inst_storage(vcpu,
+                               err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
                        r = RESUME_GUEST;
                }
                break;
@@ -1826,7 +1835,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
                        r = kvmppc_emulate_debug_inst(vcpu);
                } else {
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
+                               (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
                        r = RESUME_GUEST;
                }
                break;
@@ -1867,7 +1877,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                                r = kvmppc_tm_unavailable(vcpu);
                }
                if (r == EMULATE_FAIL) {
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+                       kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
+                               (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
                        r = RESUME_GUEST;
                }
                break;
@@ -1997,14 +2008,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
                 */
                if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
                                (vcpu->arch.nested_hfscr & (1UL << cause))) {
+                       ppc_inst_t pinst;
                        vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
 
                        /*
                         * If the fetch failed, return to guest and
                         * try executing it again.
                         */
-                       r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
-                                                &vcpu->arch.emul_inst);
+                       r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
+                       vcpu->arch.emul_inst = ppc_inst_val(pinst);
                        if (r != EMULATE_DONE)
                                r = RESUME_GUEST;
                        else
@@ -2921,13 +2933,18 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
 
        /*
         * Set the default HFSCR for the guest from the host value.
-        * This value is only used on POWER9.
-        * On POWER9, we want to virtualize the doorbell facility, so we
+        * This value is only used on POWER9 and later.
+        * On >= POWER9, we want to virtualize the doorbell facility, so we
         * don't set the HFSCR_MSGP bit, and that causes those instructions
         * to trap and then we emulate them.
         */
        vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
                HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+
+       /* On POWER10 and later, allow prefixed instructions */
+       if (cpu_has_feature(CPU_FTR_ARCH_31))
+               vcpu->arch.hfscr |= HFSCR_PREFIX;
+
        if (cpu_has_feature(CPU_FTR_HVMODE)) {
                vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM