2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define CREATE_TRACE_POINTS
37 #include "trace-s390.h"
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
43 { "exit_null", VCPU_STAT(exit_null) },
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80 { "diagnose_10", VCPU_STAT(diagnose_10) },
81 { "diagnose_44", VCPU_STAT(diagnose_44) },
82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
86 static unsigned long long *facilities;
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
91 /* every s390 is virtualization enabled ;-) */
95 void kvm_arch_hardware_disable(void *garbage)
99 int kvm_arch_hardware_setup(void)
104 void kvm_arch_hardware_unsetup(void)
108 void kvm_arch_check_processor_compat(void *rtn)
112 int kvm_arch_init(void *opaque)
117 void kvm_arch_exit(void)
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
130 int kvm_dev_ioctl_check_extension(long ext)
135 case KVM_CAP_S390_PSW:
136 case KVM_CAP_S390_GMAP:
137 case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
141 case KVM_CAP_SYNC_REGS:
142 case KVM_CAP_ONE_REG:
143 case KVM_CAP_ENABLE_CAP:
144 case KVM_CAP_S390_CSS_SUPPORT:
147 case KVM_CAP_NR_VCPUS:
148 case KVM_CAP_MAX_VCPUS:
151 case KVM_CAP_S390_COW:
152 r = sclp_get_fac85() & 0x2;
160 /* Section: vm related */
162 * Get (and clear) the dirty memory log for a memory slot.
164 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
165 struct kvm_dirty_log *log)
170 long kvm_arch_vm_ioctl(struct file *filp,
171 unsigned int ioctl, unsigned long arg)
173 struct kvm *kvm = filp->private_data;
174 void __user *argp = (void __user *)arg;
178 case KVM_S390_INTERRUPT: {
179 struct kvm_s390_interrupt s390int;
182 if (copy_from_user(&s390int, argp, sizeof(s390int)))
184 r = kvm_s390_inject_vm(kvm, &s390int);
194 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
200 #ifdef CONFIG_KVM_S390_UCONTROL
201 if (type & ~KVM_VM_S390_UCONTROL)
203 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
210 rc = s390_enable_sie();
216 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
220 sprintf(debug_name, "kvm-%u", current->pid);
222 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
226 spin_lock_init(&kvm->arch.float_int.lock);
227 INIT_LIST_HEAD(&kvm->arch.float_int.list);
229 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
230 VM_EVENT(kvm, 3, "%s", "vm created");
232 if (type & KVM_VM_S390_UCONTROL) {
233 kvm->arch.gmap = NULL;
235 kvm->arch.gmap = gmap_alloc(current->mm);
240 kvm->arch.css_support = 0;
244 debug_unregister(kvm->arch.dbf);
246 free_page((unsigned long)(kvm->arch.sca));
251 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
253 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
254 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
255 if (!kvm_is_ucontrol(vcpu->kvm)) {
256 clear_bit(63 - vcpu->vcpu_id,
257 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
258 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
259 (__u64) vcpu->arch.sie_block)
260 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
264 if (kvm_is_ucontrol(vcpu->kvm))
265 gmap_free(vcpu->arch.gmap);
267 free_page((unsigned long)(vcpu->arch.sie_block));
268 kvm_vcpu_uninit(vcpu);
272 static void kvm_free_vcpus(struct kvm *kvm)
275 struct kvm_vcpu *vcpu;
277 kvm_for_each_vcpu(i, vcpu, kvm)
278 kvm_arch_vcpu_destroy(vcpu);
280 mutex_lock(&kvm->lock);
281 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
282 kvm->vcpus[i] = NULL;
284 atomic_set(&kvm->online_vcpus, 0);
285 mutex_unlock(&kvm->lock);
288 void kvm_arch_sync_events(struct kvm *kvm)
292 void kvm_arch_destroy_vm(struct kvm *kvm)
295 free_page((unsigned long)(kvm->arch.sca));
296 debug_unregister(kvm->arch.dbf);
297 if (!kvm_is_ucontrol(kvm))
298 gmap_free(kvm->arch.gmap);
301 /* Section: vcpu related */
302 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
304 if (kvm_is_ucontrol(vcpu->kvm)) {
305 vcpu->arch.gmap = gmap_alloc(current->mm);
306 if (!vcpu->arch.gmap)
311 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
312 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
319 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
324 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
326 save_fp_regs(&vcpu->arch.host_fpregs);
327 save_access_regs(vcpu->arch.host_acrs);
328 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
329 restore_fp_regs(&vcpu->arch.guest_fpregs);
330 restore_access_regs(vcpu->run->s.regs.acrs);
331 gmap_enable(vcpu->arch.gmap);
332 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
335 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
337 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
338 gmap_disable(vcpu->arch.gmap);
339 save_fp_regs(&vcpu->arch.guest_fpregs);
340 save_access_regs(vcpu->run->s.regs.acrs);
341 restore_fp_regs(&vcpu->arch.host_fpregs);
342 restore_access_regs(vcpu->arch.host_acrs);
345 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
347 /* this equals initial cpu reset in pop, but we don't switch to ESA */
348 vcpu->arch.sie_block->gpsw.mask = 0UL;
349 vcpu->arch.sie_block->gpsw.addr = 0UL;
350 kvm_s390_set_prefix(vcpu, 0);
351 vcpu->arch.sie_block->cputm = 0UL;
352 vcpu->arch.sie_block->ckc = 0UL;
353 vcpu->arch.sie_block->todpr = 0;
354 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
355 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
356 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
357 vcpu->arch.guest_fpregs.fpc = 0;
358 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
359 vcpu->arch.sie_block->gbea = 1;
360 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
363 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
368 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
370 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
373 vcpu->arch.sie_block->ecb = 6;
374 vcpu->arch.sie_block->eca = 0xC1002001U;
375 vcpu->arch.sie_block->fac = (int) (long) facilities;
376 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
377 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
378 (unsigned long) vcpu);
379 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
380 get_cpu_id(&vcpu->arch.cpu_id);
381 vcpu->arch.cpu_id.version = 0xff;
385 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
388 struct kvm_vcpu *vcpu;
391 if (id >= KVM_MAX_VCPUS)
396 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
400 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
401 get_zeroed_page(GFP_KERNEL);
403 if (!vcpu->arch.sie_block)
406 vcpu->arch.sie_block->icpua = id;
407 if (!kvm_is_ucontrol(kvm)) {
408 if (!kvm->arch.sca) {
412 if (!kvm->arch.sca->cpu[id].sda)
413 kvm->arch.sca->cpu[id].sda =
414 (__u64) vcpu->arch.sie_block;
415 vcpu->arch.sie_block->scaoh =
416 (__u32)(((__u64)kvm->arch.sca) >> 32);
417 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
418 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
421 spin_lock_init(&vcpu->arch.local_int.lock);
422 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
423 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
424 spin_lock(&kvm->arch.float_int.lock);
425 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
426 init_waitqueue_head(&vcpu->arch.local_int.wq);
427 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
428 spin_unlock(&kvm->arch.float_int.lock);
430 rc = kvm_vcpu_init(vcpu, kvm, id);
432 goto out_free_sie_block;
433 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
434 vcpu->arch.sie_block);
435 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
439 free_page((unsigned long)(vcpu->arch.sie_block));
446 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
448 /* kvm common code refers to this, but never calls it */
453 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
455 /* kvm common code refers to this, but never calls it */
460 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
461 struct kvm_one_reg *reg)
466 case KVM_REG_S390_TODPR:
467 r = put_user(vcpu->arch.sie_block->todpr,
468 (u32 __user *)reg->addr);
470 case KVM_REG_S390_EPOCHDIFF:
471 r = put_user(vcpu->arch.sie_block->epoch,
472 (u64 __user *)reg->addr);
474 case KVM_REG_S390_CPU_TIMER:
475 r = put_user(vcpu->arch.sie_block->cputm,
476 (u64 __user *)reg->addr);
478 case KVM_REG_S390_CLOCK_COMP:
479 r = put_user(vcpu->arch.sie_block->ckc,
480 (u64 __user *)reg->addr);
489 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
490 struct kvm_one_reg *reg)
495 case KVM_REG_S390_TODPR:
496 r = get_user(vcpu->arch.sie_block->todpr,
497 (u32 __user *)reg->addr);
499 case KVM_REG_S390_EPOCHDIFF:
500 r = get_user(vcpu->arch.sie_block->epoch,
501 (u64 __user *)reg->addr);
503 case KVM_REG_S390_CPU_TIMER:
504 r = get_user(vcpu->arch.sie_block->cputm,
505 (u64 __user *)reg->addr);
507 case KVM_REG_S390_CLOCK_COMP:
508 r = get_user(vcpu->arch.sie_block->ckc,
509 (u64 __user *)reg->addr);
518 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
520 kvm_s390_vcpu_initial_reset(vcpu);
524 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
526 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
530 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
532 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
536 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
537 struct kvm_sregs *sregs)
539 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
540 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
541 restore_access_regs(vcpu->run->s.regs.acrs);
545 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
546 struct kvm_sregs *sregs)
548 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
549 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
553 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
555 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
556 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
557 restore_fp_regs(&vcpu->arch.guest_fpregs);
561 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
563 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
564 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
568 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
572 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
575 vcpu->run->psw_mask = psw.mask;
576 vcpu->run->psw_addr = psw.addr;
581 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
582 struct kvm_translation *tr)
584 return -EINVAL; /* not implemented yet */
587 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
588 struct kvm_guest_debug *dbg)
590 return -EINVAL; /* not implemented yet */
593 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
594 struct kvm_mp_state *mp_state)
596 return -EINVAL; /* not implemented yet */
599 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
600 struct kvm_mp_state *mp_state)
602 return -EINVAL; /* not implemented yet */
605 static int __vcpu_run(struct kvm_vcpu *vcpu)
609 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
614 if (test_thread_flag(TIF_MCCK_PENDING))
617 if (!kvm_is_ucontrol(vcpu->kvm))
618 kvm_s390_deliver_pending_interrupts(vcpu);
620 vcpu->arch.sie_block->icptcode = 0;
622 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
623 atomic_read(&vcpu->arch.sie_block->cpuflags));
624 trace_kvm_s390_sie_enter(vcpu,
625 atomic_read(&vcpu->arch.sie_block->cpuflags));
626 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
628 if (kvm_is_ucontrol(vcpu->kvm)) {
629 rc = SIE_INTERCEPT_UCONTROL;
631 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
632 trace_kvm_s390_sie_fault(vcpu);
633 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
637 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
638 vcpu->arch.sie_block->icptcode);
639 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
642 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
646 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
652 if (vcpu->sigset_active)
653 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
655 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
657 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
659 switch (kvm_run->exit_reason) {
660 case KVM_EXIT_S390_SIEIC:
661 case KVM_EXIT_UNKNOWN:
663 case KVM_EXIT_S390_RESET:
664 case KVM_EXIT_S390_UCONTROL:
665 case KVM_EXIT_S390_TSCH:
671 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
672 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
673 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
674 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
675 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
677 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
678 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
679 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
680 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
686 rc = __vcpu_run(vcpu);
689 if (kvm_is_ucontrol(vcpu->kvm))
692 rc = kvm_handle_sie_intercept(vcpu);
693 } while (!signal_pending(current) && !rc);
695 if (rc == SIE_INTERCEPT_RERUNVCPU)
698 if (signal_pending(current) && !rc) {
699 kvm_run->exit_reason = KVM_EXIT_INTR;
703 #ifdef CONFIG_KVM_S390_UCONTROL
704 if (rc == SIE_INTERCEPT_UCONTROL) {
705 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
706 kvm_run->s390_ucontrol.trans_exc_code =
707 current->thread.gmap_addr;
708 kvm_run->s390_ucontrol.pgm_code = 0x10;
713 if (rc == -EOPNOTSUPP) {
714 /* intercept cannot be handled in-kernel, prepare kvm-run */
715 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
716 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
717 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
718 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
722 if (rc == -EREMOTE) {
723 /* intercept was handled, but userspace support is needed
724 * kvm_run has been prepared by the handler */
728 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
729 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
730 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
731 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
733 if (vcpu->sigset_active)
734 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
736 vcpu->stat.exit_userspace++;
740 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
741 unsigned long n, int prefix)
744 return copy_to_guest(vcpu, guestdest, from, n);
746 return copy_to_guest_absolute(vcpu, guestdest, from, n);
750 * store status at address
751 * we use have two special cases:
752 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
753 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
755 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
757 unsigned char archmode = 1;
760 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
761 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
763 addr = SAVE_AREA_BASE;
765 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
766 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
768 addr = SAVE_AREA_BASE;
774 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
775 * copying in vcpu load/put. Lets update our copies before we save
776 * it into the save area
778 save_fp_regs(&vcpu->arch.guest_fpregs);
779 save_access_regs(vcpu->run->s.regs.acrs);
781 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
782 vcpu->arch.guest_fpregs.fprs, 128, prefix))
785 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
786 vcpu->run->s.regs.gprs, 128, prefix))
789 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
790 &vcpu->arch.sie_block->gpsw, 16, prefix))
793 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
794 &vcpu->arch.sie_block->prefix, 4, prefix))
797 if (__guestcopy(vcpu,
798 addr + offsetof(struct save_area, fp_ctrl_reg),
799 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
802 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
803 &vcpu->arch.sie_block->todpr, 4, prefix))
806 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
807 &vcpu->arch.sie_block->cputm, 8, prefix))
810 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
811 &vcpu->arch.sie_block->ckc, 8, prefix))
814 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
815 &vcpu->run->s.regs.acrs, 64, prefix))
818 if (__guestcopy(vcpu,
819 addr + offsetof(struct save_area, ctrl_regs),
820 &vcpu->arch.sie_block->gcr, 128, prefix))
825 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
826 struct kvm_enable_cap *cap)
834 case KVM_CAP_S390_CSS_SUPPORT:
835 if (!vcpu->kvm->arch.css_support) {
836 vcpu->kvm->arch.css_support = 1;
837 trace_kvm_s390_enable_css(vcpu->kvm);
848 long kvm_arch_vcpu_ioctl(struct file *filp,
849 unsigned int ioctl, unsigned long arg)
851 struct kvm_vcpu *vcpu = filp->private_data;
852 void __user *argp = (void __user *)arg;
856 case KVM_S390_INTERRUPT: {
857 struct kvm_s390_interrupt s390int;
860 if (copy_from_user(&s390int, argp, sizeof(s390int)))
862 r = kvm_s390_inject_vcpu(vcpu, &s390int);
865 case KVM_S390_STORE_STATUS:
866 r = kvm_s390_vcpu_store_status(vcpu, arg);
868 case KVM_S390_SET_INITIAL_PSW: {
872 if (copy_from_user(&psw, argp, sizeof(psw)))
874 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
877 case KVM_S390_INITIAL_RESET:
878 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
880 case KVM_SET_ONE_REG:
881 case KVM_GET_ONE_REG: {
882 struct kvm_one_reg reg;
884 if (copy_from_user(®, argp, sizeof(reg)))
886 if (ioctl == KVM_SET_ONE_REG)
887 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
889 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
892 #ifdef CONFIG_KVM_S390_UCONTROL
893 case KVM_S390_UCAS_MAP: {
894 struct kvm_s390_ucas_mapping ucasmap;
896 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
901 if (!kvm_is_ucontrol(vcpu->kvm)) {
906 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
907 ucasmap.vcpu_addr, ucasmap.length);
910 case KVM_S390_UCAS_UNMAP: {
911 struct kvm_s390_ucas_mapping ucasmap;
913 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
918 if (!kvm_is_ucontrol(vcpu->kvm)) {
923 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
928 case KVM_S390_VCPU_FAULT: {
929 r = gmap_fault(arg, vcpu->arch.gmap);
930 if (!IS_ERR_VALUE(r))
936 struct kvm_enable_cap cap;
938 if (copy_from_user(&cap, argp, sizeof(cap)))
940 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
949 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
951 #ifdef CONFIG_KVM_S390_UCONTROL
952 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
953 && (kvm_is_ucontrol(vcpu->kvm))) {
954 vmf->page = virt_to_page(vcpu->arch.sie_block);
959 return VM_FAULT_SIGBUS;
962 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
963 struct kvm_memory_slot *dont)
967 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
972 /* Section: memory related */
973 int kvm_arch_prepare_memory_region(struct kvm *kvm,
974 struct kvm_memory_slot *memslot,
975 struct kvm_memory_slot old,
976 struct kvm_userspace_memory_region *mem,
979 /* A few sanity checks. We can have exactly one memory slot which has
980 to start at guest virtual zero and which has to be located at a
981 page boundary in userland and which has to end at a page boundary.
982 The memory in userland is ok to be fragmented into various different
983 vmas. It is okay to mmap() and munmap() stuff in this slot after
984 doing this call at any time */
989 if (mem->guest_phys_addr)
992 if (mem->userspace_addr & 0xffffful)
995 if (mem->memory_size & 0xffffful)
1004 void kvm_arch_commit_memory_region(struct kvm *kvm,
1005 struct kvm_userspace_memory_region *mem,
1006 struct kvm_memory_slot old,
1012 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1013 mem->guest_phys_addr, mem->memory_size);
1015 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1019 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1023 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1024 struct kvm_memory_slot *slot)
1028 static int __init kvm_s390_init(void)
1031 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1036 * guests can ask for up to 255+1 double words, we need a full page
1037 * to hold the maximum amount of facilities. On the other hand, we
1038 * only set facilities that are known to work in KVM.
1040 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1045 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1046 facilities[0] &= 0xff00fff3f47c0000ULL;
1047 facilities[1] &= 0x001c000000000000ULL;
1051 static void __exit kvm_s390_exit(void)
1053 free_page((unsigned long) facilities);
1057 module_init(kvm_s390_init);
1058 module_exit(kvm_s390_exit);