1 // SPDX-License-Identifier: GPL-2.0
3 * handling privileged instructions
5 * Copyright IBM Corp. 2008, 2020
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/compat.h>
15 #include <linux/mm_types.h>
16 #include <linux/pgtable.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/page-states.h>
27 #include <asm/ptrace.h>
34 static int handle_ri(struct kvm_vcpu *vcpu)
36 vcpu->stat.instruction_ri++;
38 if (test_kvm_facility(vcpu->kvm, 64)) {
39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
40 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
41 kvm_s390_retry_instr(vcpu);
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
50 return handle_ri(vcpu);
55 static int handle_gs(struct kvm_vcpu *vcpu)
57 vcpu->stat.instruction_gs++;
59 if (test_kvm_facility(vcpu->kvm, 133)) {
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 restore_gs_cb(current->thread.gs_cb);
66 vcpu->arch.sie_block->ecb |= ECB_GS;
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
68 vcpu->arch.gs_enabled = 1;
69 kvm_s390_retry_instr(vcpu);
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
77 int code = vcpu->arch.sie_block->ipb & 0xff;
79 if (code == 0x49 || code == 0x4d)
80 return handle_gs(vcpu);
84 /* Handle SCK (SET CLOCK) interception */
85 static int handle_set_clock(struct kvm_vcpu *vcpu)
87 struct kvm_s390_vm_tod_clock gtod = { 0 };
92 vcpu->stat.instruction_sck++;
94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
98 if (op2 & 7) /* Operand must be on a doubleword boundary */
99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
102 return kvm_s390_inject_prog_cond(vcpu, rc);
104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
106 * To set the TOD clock the kvm lock must be taken, but the vcpu lock
107 * is already held in handle_set_clock. The usual lock order is the
108 * opposite. As SCK is deprecated and should not be used in several
109 * cases, for example when the multiple epoch facility or TOD clock
110 * steering facility is installed (see Principles of Operation), a
111 * slow path can be used. If the lock can not be taken via try_lock,
112 * the instruction will be retried via -EAGAIN at a later point in
115 if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) {
116 kvm_s390_retry_instr(vcpu);
120 kvm_s390_set_psw_cc(vcpu, 0);
124 static int handle_set_prefix(struct kvm_vcpu *vcpu)
131 vcpu->stat.instruction_spx++;
133 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
134 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
136 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
138 /* must be word boundary */
140 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
143 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
145 return kvm_s390_inject_prog_cond(vcpu, rc);
147 address &= 0x7fffe000u;
150 * Make sure the new value is valid memory. We only need to check the
151 * first page, since address is 8k aligned and memory pieces are always
152 * at least 1MB aligned and have at least a size of 1MB.
154 if (kvm_is_error_gpa(vcpu->kvm, address))
155 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
157 kvm_s390_set_prefix(vcpu, address);
158 trace_kvm_s390_handle_prefix(vcpu, 1, address);
162 static int handle_store_prefix(struct kvm_vcpu *vcpu)
169 vcpu->stat.instruction_stpx++;
171 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
172 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
174 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
176 /* must be word boundary */
178 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
180 address = kvm_s390_get_prefix(vcpu);
183 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
185 return kvm_s390_inject_prog_cond(vcpu, rc);
187 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
188 trace_kvm_s390_handle_prefix(vcpu, 0, address);
192 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
194 u16 vcpu_id = vcpu->vcpu_id;
199 vcpu->stat.instruction_stap++;
201 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
202 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
204 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
207 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
209 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
211 return kvm_s390_inject_prog_cond(vcpu, rc);
213 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
214 trace_kvm_s390_handle_stap(vcpu, ga);
218 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
222 trace_kvm_s390_skey_related_inst(vcpu);
223 /* Already enabled? */
224 if (vcpu->arch.skey_enabled)
227 rc = s390_enable_skey();
228 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
232 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
233 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
234 if (!vcpu->kvm->arch.use_skf)
235 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
237 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
238 vcpu->arch.skey_enabled = true;
242 static int try_handle_skey(struct kvm_vcpu *vcpu)
246 rc = kvm_s390_skey_check_enable(vcpu);
249 if (vcpu->kvm->arch.use_skf) {
250 /* with storage-key facility, SIE interprets it for us */
251 kvm_s390_retry_instr(vcpu);
252 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
258 static int handle_iske(struct kvm_vcpu *vcpu)
260 unsigned long gaddr, vmaddr;
266 vcpu->stat.instruction_iske++;
268 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
269 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
271 rc = try_handle_skey(vcpu);
273 return rc != -EAGAIN ? rc : 0;
275 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
277 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
278 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
279 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
280 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
281 if (kvm_is_error_hva(vmaddr))
282 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 mmap_read_lock(current->mm);
286 rc = get_guest_storage_key(current->mm, vmaddr, &key);
289 rc = fixup_user_fault(current->mm, vmaddr,
290 FAULT_FLAG_WRITE, &unlocked);
292 mmap_read_unlock(current->mm);
296 mmap_read_unlock(current->mm);
298 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
301 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
302 vcpu->run->s.regs.gprs[reg1] |= key;
306 static int handle_rrbe(struct kvm_vcpu *vcpu)
308 unsigned long vmaddr, gaddr;
313 vcpu->stat.instruction_rrbe++;
315 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
316 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
318 rc = try_handle_skey(vcpu);
320 return rc != -EAGAIN ? rc : 0;
322 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
324 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
325 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
326 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
327 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
328 if (kvm_is_error_hva(vmaddr))
329 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
332 mmap_read_lock(current->mm);
333 rc = reset_guest_reference_bit(current->mm, vmaddr);
335 rc = fixup_user_fault(current->mm, vmaddr,
336 FAULT_FLAG_WRITE, &unlocked);
338 mmap_read_unlock(current->mm);
342 mmap_read_unlock(current->mm);
344 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
347 kvm_s390_set_psw_cc(vcpu, rc);
355 static int handle_sske(struct kvm_vcpu *vcpu)
357 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
358 unsigned long start, end;
359 unsigned char key, oldkey;
364 vcpu->stat.instruction_sske++;
366 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
367 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
369 rc = try_handle_skey(vcpu);
371 return rc != -EAGAIN ? rc : 0;
373 if (!test_kvm_facility(vcpu->kvm, 8))
375 if (!test_kvm_facility(vcpu->kvm, 10))
376 m3 &= ~(SSKE_MC | SSKE_MR);
377 if (!test_kvm_facility(vcpu->kvm, 14))
380 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
382 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
383 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
384 start = kvm_s390_logical_to_effective(vcpu, start);
386 /* start already designates an absolute address */
387 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
389 start = kvm_s390_real_to_abs(vcpu, start);
390 end = start + PAGE_SIZE;
393 while (start != end) {
394 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
397 if (kvm_is_error_hva(vmaddr))
398 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
400 mmap_read_lock(current->mm);
401 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
402 m3 & SSKE_NQ, m3 & SSKE_MR,
406 rc = fixup_user_fault(current->mm, vmaddr,
407 FAULT_FLAG_WRITE, &unlocked);
408 rc = !rc ? -EAGAIN : rc;
410 mmap_read_unlock(current->mm);
412 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
420 if (m3 & (SSKE_MC | SSKE_MR)) {
422 /* skey in reg1 is unpredictable */
423 kvm_s390_set_psw_cc(vcpu, 3);
425 kvm_s390_set_psw_cc(vcpu, rc);
426 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
427 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
431 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
432 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
434 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
435 end = kvm_s390_logical_to_effective(vcpu, end);
436 vcpu->run->s.regs.gprs[reg2] |= end;
441 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
443 vcpu->stat.instruction_ipte_interlock++;
444 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
445 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
446 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
447 kvm_s390_retry_instr(vcpu);
448 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
452 static int handle_test_block(struct kvm_vcpu *vcpu)
457 vcpu->stat.instruction_tb++;
459 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
460 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
462 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
463 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
464 addr = kvm_s390_logical_to_effective(vcpu, addr);
465 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
466 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
467 addr = kvm_s390_real_to_abs(vcpu, addr);
469 if (kvm_is_error_gpa(vcpu->kvm, addr))
470 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
472 * We don't expect errors on modern systems, and do not care
473 * about storage keys (yet), so let's just clear the page.
475 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
477 kvm_s390_set_psw_cc(vcpu, 0);
478 vcpu->run->s.regs.gprs[0] = 0;
482 static int handle_tpi(struct kvm_vcpu *vcpu)
484 struct kvm_s390_interrupt_info *inti;
491 vcpu->stat.instruction_tpi++;
493 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
495 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
497 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
499 kvm_s390_set_psw_cc(vcpu, 0);
503 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
504 tpi_data[1] = inti->io.io_int_parm;
505 tpi_data[2] = inti->io.io_int_word;
508 * Store the two-word I/O interruption code into the
511 len = sizeof(tpi_data) - 4;
512 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
514 rc = kvm_s390_inject_prog_cond(vcpu, rc);
515 goto reinject_interrupt;
519 * Store the three-word I/O interruption code into
520 * the appropriate lowcore area.
522 len = sizeof(tpi_data);
523 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
524 /* failed writes to the low core are not recoverable */
526 goto reinject_interrupt;
530 /* irq was successfully handed to the guest */
532 kvm_s390_set_psw_cc(vcpu, 1);
536 * If we encounter a problem storing the interruption code, the
537 * instruction is suppressed from the guest's view: reinject the
540 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
544 /* don't set the cc, a pgm irq was injected or we drop to user space */
545 return rc ? -EFAULT : 0;
548 static int handle_tsch(struct kvm_vcpu *vcpu)
550 struct kvm_s390_interrupt_info *inti = NULL;
551 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
553 vcpu->stat.instruction_tsch++;
555 /* a valid schid has at least one bit set */
556 if (vcpu->run->s.regs.gprs[1])
557 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
558 vcpu->run->s.regs.gprs[1]);
561 * Prepare exit to userspace.
562 * We indicate whether we dequeued a pending I/O interrupt
563 * so that userspace can re-inject it if the instruction gets
564 * a program check. While this may re-order the pending I/O
565 * interrupts, this is no problem since the priority is kept
568 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
569 vcpu->run->s390_tsch.dequeued = !!inti;
571 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
572 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
573 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
574 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
576 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
581 static int handle_io_inst(struct kvm_vcpu *vcpu)
583 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
585 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
586 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
588 if (vcpu->kvm->arch.css_support) {
590 * Most I/O instructions will be handled by userspace.
591 * Exceptions are tpi and the interrupt portion of tsch.
593 if (vcpu->arch.sie_block->ipa == 0xb236)
594 return handle_tpi(vcpu);
595 if (vcpu->arch.sie_block->ipa == 0xb235)
596 return handle_tsch(vcpu);
597 /* Handle in userspace. */
598 vcpu->stat.instruction_io_other++;
602 * Set condition code 3 to stop the guest from issuing channel
605 kvm_s390_set_psw_cc(vcpu, 3);
611 * handle_pqap: Handling pqap interception
612 * @vcpu: the vcpu having issue the pqap instruction
614 * We now support PQAP/AQIC instructions and we need to correctly
615 * answer the guest even if no dedicated driver's hook is available.
617 * The intercepting code calls a dedicated callback for this instruction
618 * if a driver did register one in the CRYPTO satellite of the
621 * If no callback is available, the queues are not available, return this
622 * response code to the caller and set CC to 3.
623 * Else return the response code returned by the callback.
625 static int handle_pqap(struct kvm_vcpu *vcpu)
627 struct ap_queue_status status = {};
628 crypto_hook pqap_hook;
633 /* Verify that the AP instruction are available */
634 if (!ap_instructions_available())
636 /* Verify that the guest is allowed to use AP instructions */
637 if (!(vcpu->arch.sie_block->eca & ECA_APIE))
640 * The only possibly intercepted functions when AP instructions are
641 * available for the guest are AQIC and TAPQ with the t bit set
642 * since we do not set IC.3 (FIII) we currently will only intercept
643 * the AQIC function code.
644 * Note: running nested under z/VM can result in intercepts for other
645 * function codes, e.g. PQAP(QCI). We do not support this and bail out.
647 reg0 = vcpu->run->s.regs.gprs[0];
648 fc = (reg0 >> 24) & 0xff;
652 /* PQAP instruction is allowed for guest kernel only */
653 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
654 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
656 /* Common PQAP instruction specification exceptions */
657 /* bits 41-47 must all be zeros */
658 if (reg0 & 0x007f0000UL)
659 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
660 /* APFT not install and T bit set */
661 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
662 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
663 /* APXA not installed and APID greater 64 or APQI greater 16 */
664 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
665 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
667 /* AQIC function code specific exception */
668 /* facility 65 not present for AQIC function code */
669 if (!test_kvm_facility(vcpu->kvm, 65))
670 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
673 * If the hook callback is registered, there will be a pointer to the
674 * hook function pointer in the kvm_s390_crypto structure. Lock the
675 * owner, retrieve the hook function pointer and call the hook.
677 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
678 if (vcpu->kvm->arch.crypto.pqap_hook) {
679 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
680 ret = pqap_hook(vcpu);
681 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
682 kvm_s390_set_psw_cc(vcpu, 3);
683 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
686 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
688 * A vfio_driver must register a hook.
689 * No hook means no driver to enable the SIE CRYCB and no queues.
690 * We send this response to the guest.
692 status.response_code = 0x01;
693 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
694 kvm_s390_set_psw_cc(vcpu, 3);
698 static int handle_stfl(struct kvm_vcpu *vcpu)
703 vcpu->stat.instruction_stfl++;
705 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
706 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
709 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
710 * into a u32 memory representation. They will remain bits 0-31.
712 fac = *vcpu->kvm->arch.model.fac_list >> 32;
713 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
717 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
718 trace_kvm_s390_handle_stfl(vcpu, fac);
722 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
723 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
724 #define PSW_ADDR_24 0x0000000000ffffffUL
725 #define PSW_ADDR_31 0x000000007fffffffUL
727 int is_valid_psw(psw_t *psw)
729 if (psw->mask & PSW_MASK_UNASSIGNED)
731 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
732 if (psw->addr & ~PSW_ADDR_31)
735 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
737 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
744 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
746 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
747 psw_compat_t new_psw;
752 vcpu->stat.instruction_lpsw++;
754 if (gpsw->mask & PSW_MASK_PSTATE)
755 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
757 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
759 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
761 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
763 return kvm_s390_inject_prog_cond(vcpu, rc);
764 if (!(new_psw.mask & PSW32_MASK_BASE))
765 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
766 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
767 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
768 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
769 if (!is_valid_psw(gpsw))
770 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
774 static int handle_lpswe(struct kvm_vcpu *vcpu)
781 vcpu->stat.instruction_lpswe++;
783 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
784 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
786 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
788 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
789 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
791 return kvm_s390_inject_prog_cond(vcpu, rc);
792 vcpu->arch.sie_block->gpsw = new_psw;
793 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
794 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
798 static int handle_stidp(struct kvm_vcpu *vcpu)
800 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
805 vcpu->stat.instruction_stidp++;
807 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
808 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
810 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
813 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
815 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
817 return kvm_s390_inject_prog_cond(vcpu, rc);
819 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
823 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
828 cpus = atomic_read(&vcpu->kvm->online_vcpus);
830 /* deal with other level 3 hypervisors */
831 if (stsi(mem, 3, 2, 2))
835 for (n = mem->count - 1; n > 0 ; n--)
836 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
838 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
839 mem->vm[0].cpus_total = cpus;
840 mem->vm[0].cpus_configured = cpus;
841 mem->vm[0].cpus_standby = 0;
842 mem->vm[0].cpus_reserved = 0;
843 mem->vm[0].caf = 1000;
844 memcpy(mem->vm[0].name, "KVMguest", 8);
845 ASCEBC(mem->vm[0].name, 8);
846 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
847 ASCEBC(mem->vm[0].cpi, 16);
850 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
851 u8 fc, u8 sel1, u16 sel2)
853 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
854 vcpu->run->s390_stsi.addr = addr;
855 vcpu->run->s390_stsi.ar = ar;
856 vcpu->run->s390_stsi.fc = fc;
857 vcpu->run->s390_stsi.sel1 = sel1;
858 vcpu->run->s390_stsi.sel2 = sel2;
861 static int handle_stsi(struct kvm_vcpu *vcpu)
863 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
864 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
865 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
866 unsigned long mem = 0;
871 vcpu->stat.instruction_stsi++;
872 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
874 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
875 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
878 kvm_s390_set_psw_cc(vcpu, 3);
882 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
883 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
884 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
887 vcpu->run->s.regs.gprs[0] = 3 << 28;
888 kvm_s390_set_psw_cc(vcpu, 0);
892 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
894 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
895 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
898 case 1: /* same handling for 1 and 2 */
900 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
903 if (stsi((void *) mem, fc, sel1, sel2))
907 if (sel1 != 2 || sel2 != 2)
909 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
912 handle_stsi_3_2_2(vcpu, (void *) mem);
915 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
916 memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
920 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
923 rc = kvm_s390_inject_prog_cond(vcpu, rc);
926 if (vcpu->kvm->arch.user_stsi) {
927 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
930 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
932 kvm_s390_set_psw_cc(vcpu, 0);
933 vcpu->run->s.regs.gprs[0] = 0;
936 kvm_s390_set_psw_cc(vcpu, 3);
942 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
944 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
946 return handle_stidp(vcpu);
948 return handle_set_clock(vcpu);
950 return handle_set_prefix(vcpu);
952 return handle_store_prefix(vcpu);
954 return handle_store_cpu_address(vcpu);
956 return kvm_s390_handle_vsie(vcpu);
959 return handle_ipte_interlock(vcpu);
961 return handle_iske(vcpu);
963 return handle_rrbe(vcpu);
965 return handle_sske(vcpu);
967 return handle_test_block(vcpu);
984 return handle_io_inst(vcpu);
986 return handle_sthyi(vcpu);
988 return handle_stsi(vcpu);
990 return handle_pqap(vcpu);
992 return handle_stfl(vcpu);
994 return handle_lpswe(vcpu);
1000 static int handle_epsw(struct kvm_vcpu *vcpu)
1004 vcpu->stat.instruction_epsw++;
1006 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1008 /* This basically extracts the mask half of the psw. */
1009 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
1010 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
1012 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
1013 vcpu->run->s.regs.gprs[reg2] |=
1014 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
1019 #define PFMF_RESERVED 0xfffc0101UL
1020 #define PFMF_SK 0x00020000UL
1021 #define PFMF_CF 0x00010000UL
1022 #define PFMF_UI 0x00008000UL
1023 #define PFMF_FSC 0x00007000UL
1024 #define PFMF_NQ 0x00000800UL
1025 #define PFMF_MR 0x00000400UL
1026 #define PFMF_MC 0x00000200UL
1027 #define PFMF_KEY 0x000000feUL
1029 static int handle_pfmf(struct kvm_vcpu *vcpu)
1031 bool mr = false, mc = false, nq;
1033 unsigned long start, end;
1036 vcpu->stat.instruction_pfmf++;
1038 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1040 if (!test_kvm_facility(vcpu->kvm, 8))
1041 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1043 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1044 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1046 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1047 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1049 /* Only provide non-quiescing support if enabled for the guest */
1050 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1051 !test_kvm_facility(vcpu->kvm, 14))
1052 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1054 /* Only provide conditional-SSKE support if enabled for the guest */
1055 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1056 test_kvm_facility(vcpu->kvm, 10)) {
1057 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1058 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1061 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1062 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1063 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1064 start = kvm_s390_logical_to_effective(vcpu, start);
1066 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1067 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1068 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1071 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1073 /* only 4k frames specify a real address */
1074 start = kvm_s390_real_to_abs(vcpu, start);
1075 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1078 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1081 /* only support 2G frame size if EDAT2 is available and we are
1082 not in 24-bit addressing mode */
1083 if (!test_kvm_facility(vcpu->kvm, 78) ||
1084 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1085 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1086 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1089 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1092 while (start != end) {
1093 unsigned long vmaddr;
1094 bool unlocked = false;
1096 /* Translate guest address to host address */
1097 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1098 if (kvm_is_error_hva(vmaddr))
1099 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1101 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1102 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1103 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1106 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1107 int rc = kvm_s390_skey_check_enable(vcpu);
1111 mmap_read_lock(current->mm);
1112 rc = cond_set_guest_storage_key(current->mm, vmaddr,
1113 key, NULL, nq, mr, mc);
1115 rc = fixup_user_fault(current->mm, vmaddr,
1116 FAULT_FLAG_WRITE, &unlocked);
1117 rc = !rc ? -EAGAIN : rc;
1119 mmap_read_unlock(current->mm);
1121 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1129 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1130 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1131 vcpu->run->s.regs.gprs[reg2] = end;
1133 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1134 end = kvm_s390_logical_to_effective(vcpu, end);
1135 vcpu->run->s.regs.gprs[reg2] |= end;
1142 * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
1144 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1146 int r1, r2, nappended, entries;
1147 unsigned long gfn, hva, res, pgstev, ptev;
1148 unsigned long *cbrlo;
1151 * We don't need to set SD.FPF.SK to 1 here, because if we have a
1152 * machine check here we either handle it or crash
1155 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1156 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1157 hva = gfn_to_hva(vcpu->kvm, gfn);
1158 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1160 if (kvm_is_error_hva(hva))
1161 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1163 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1164 if (nappended < 0) {
1165 res = orc ? 0x10 : 0;
1166 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1169 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1171 * Set the block-content state part of the result. 0 means resident, so
1172 * nothing to do if the page is valid. 2 is for preserved pages
1173 * (non-present and non-zero), and 3 for zero pages (non-present and
1176 if (ptev & _PAGE_INVALID) {
1178 if (pgstev & _PGSTE_GPS_ZERO)
1181 if (pgstev & _PGSTE_GPS_NODAT)
1183 vcpu->run->s.regs.gprs[r1] = res;
1185 * It is possible that all the normal 511 slots were full, in which case
1186 * we will now write in the 512th slot, which is reserved for host use.
1187 * In both cases we let the normal essa handling code process all the
1188 * slots, including the reserved one, if needed.
1190 if (nappended > 0) {
1191 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1192 cbrlo[entries] = gfn << PAGE_SHIFT;
1196 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1198 /* Increment only if we are really flipping the bit */
1199 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1200 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1206 static int handle_essa(struct kvm_vcpu *vcpu)
1208 /* entries expected to be 1FF */
1209 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1210 unsigned long *cbrlo;
1214 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1215 gmap = vcpu->arch.gmap;
1216 vcpu->stat.instruction_essa++;
1217 if (!vcpu->kvm->arch.use_cmma)
1218 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1220 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1221 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1222 /* Check for invalid operation request code */
1223 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1224 /* ORCs 0-6 are always valid */
1225 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1226 : ESSA_SET_STABLE_IF_RESIDENT))
1227 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1229 if (!vcpu->kvm->arch.migration_mode) {
1231 * CMMA is enabled in the KVM settings, but is disabled in
1232 * the SIE block and in the mm_context, and we are not doing
1233 * a migration. Enable CMMA in the mm_context.
1234 * Since we need to take a write lock to write to the context
1235 * to avoid races with storage keys handling, we check if the
1236 * value really needs to be written to; if the value is
1237 * already correct, we do nothing and avoid the lock.
1239 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1240 mmap_write_lock(vcpu->kvm->mm);
1241 vcpu->kvm->mm->context.uses_cmm = 1;
1242 mmap_write_unlock(vcpu->kvm->mm);
1245 * If we are here, we are supposed to have CMMA enabled in
1246 * the SIE block. Enabling CMMA works on a per-CPU basis,
1247 * while the context use_cmma flag is per process.
1248 * It's possible that the context flag is enabled and the
1249 * SIE flag is not, so we set the flag always; if it was
1250 * already set, nothing changes, otherwise we enable it
1253 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1254 /* Retry the ESSA instruction */
1255 kvm_s390_retry_instr(vcpu);
1259 mmap_read_lock(vcpu->kvm->mm);
1260 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1261 i = __do_essa(vcpu, orc);
1262 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1263 mmap_read_unlock(vcpu->kvm->mm);
1266 /* Account for the possible extra cbrl entry */
1269 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1270 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1271 mmap_read_lock(gmap->mm);
1272 for (i = 0; i < entries; ++i)
1273 __gmap_zap(gmap, cbrlo[i]);
1274 mmap_read_unlock(gmap->mm);
1278 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1280 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1284 return handle_ipte_interlock(vcpu);
1286 return handle_epsw(vcpu);
1288 return handle_essa(vcpu);
1290 return handle_pfmf(vcpu);
1296 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1298 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1299 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1300 int reg, rc, nr_regs;
1305 vcpu->stat.instruction_lctl++;
1307 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1308 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1310 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1313 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1315 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1316 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1318 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1319 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1321 return kvm_s390_inject_prog_cond(vcpu, rc);
1325 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1326 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1329 reg = (reg + 1) % 16;
1331 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1335 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1337 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1338 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1339 int reg, rc, nr_regs;
1344 vcpu->stat.instruction_stctl++;
1346 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1347 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1349 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1352 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1354 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1355 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1360 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1363 reg = (reg + 1) % 16;
1365 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1366 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1369 static int handle_lctlg(struct kvm_vcpu *vcpu)
1371 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1372 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1373 int reg, rc, nr_regs;
1378 vcpu->stat.instruction_lctlg++;
1380 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1381 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1383 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1386 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1388 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1389 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1391 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1392 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1394 return kvm_s390_inject_prog_cond(vcpu, rc);
1398 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1401 reg = (reg + 1) % 16;
1403 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1407 static int handle_stctg(struct kvm_vcpu *vcpu)
1409 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1410 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1411 int reg, rc, nr_regs;
1416 vcpu->stat.instruction_stctg++;
1418 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1419 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1421 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1424 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1426 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1427 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1432 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1435 reg = (reg + 1) % 16;
1437 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1438 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1441 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1443 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1445 return handle_stctg(vcpu);
1447 return handle_lctlg(vcpu);
1451 return handle_ri(vcpu);
1457 static int handle_tprot(struct kvm_vcpu *vcpu)
1459 u64 address1, address2;
1460 unsigned long hva, gpa;
1461 int ret = 0, cc = 0;
1465 vcpu->stat.instruction_tprot++;
1467 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1468 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1470 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1472 /* we only handle the Linux memory detection case:
1474 * everything else goes to userspace. */
1475 if (address2 & 0xf0)
1477 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1479 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1480 if (ret == PGM_PROTECTION) {
1481 /* Write protected? Try again with read-only... */
1483 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1487 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1488 ret = kvm_s390_inject_program_int(vcpu, ret);
1489 } else if (ret > 0) {
1490 /* Translation not available */
1491 kvm_s390_set_psw_cc(vcpu, 3);
1497 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1498 if (kvm_is_error_hva(hva)) {
1499 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1502 cc = 1; /* Write not permitted ==> read-only */
1503 kvm_s390_set_psw_cc(vcpu, cc);
1504 /* Note: CC2 only occurs for storage keys (not supported yet) */
1507 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1512 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1514 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1516 return handle_tprot(vcpu);
1522 static int handle_sckpf(struct kvm_vcpu *vcpu)
1526 vcpu->stat.instruction_sckpf++;
1528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1531 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1532 return kvm_s390_inject_program_int(vcpu,
1535 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1536 vcpu->arch.sie_block->todpr = value;
1541 static int handle_ptff(struct kvm_vcpu *vcpu)
1543 vcpu->stat.instruction_ptff++;
1545 /* we don't emulate any control instructions yet */
1546 kvm_s390_set_psw_cc(vcpu, 3);
1550 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1552 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1554 return handle_ptff(vcpu);
1556 return handle_sckpf(vcpu);