2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define CREATE_TRACE_POINTS
37 #include "trace-s390.h"
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
43 { "exit_null", VCPU_STAT(exit_null) },
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80 { "diagnose_10", VCPU_STAT(diagnose_10) },
81 { "diagnose_44", VCPU_STAT(diagnose_44) },
82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
86 static unsigned long long *facilities;
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
91 /* every s390 is virtualization enabled ;-) */
95 void kvm_arch_hardware_disable(void *garbage)
99 int kvm_arch_hardware_setup(void)
104 void kvm_arch_hardware_unsetup(void)
108 void kvm_arch_check_processor_compat(void *rtn)
112 int kvm_arch_init(void *opaque)
117 void kvm_arch_exit(void)
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
130 int kvm_dev_ioctl_check_extension(long ext)
135 case KVM_CAP_S390_PSW:
136 case KVM_CAP_S390_GMAP:
137 case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
141 case KVM_CAP_SYNC_REGS:
142 case KVM_CAP_ONE_REG:
145 case KVM_CAP_NR_VCPUS:
146 case KVM_CAP_MAX_VCPUS:
149 case KVM_CAP_S390_COW:
150 r = sclp_get_fac85() & 0x2;
158 /* Section: vm related */
160 * Get (and clear) the dirty memory log for a memory slot.
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163 struct kvm_dirty_log *log)
168 long kvm_arch_vm_ioctl(struct file *filp,
169 unsigned int ioctl, unsigned long arg)
171 struct kvm *kvm = filp->private_data;
172 void __user *argp = (void __user *)arg;
176 case KVM_S390_INTERRUPT: {
177 struct kvm_s390_interrupt s390int;
180 if (copy_from_user(&s390int, argp, sizeof(s390int)))
182 r = kvm_s390_inject_vm(kvm, &s390int);
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
198 #ifdef CONFIG_KVM_S390_UCONTROL
199 if (type & ~KVM_VM_S390_UCONTROL)
201 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
208 rc = s390_enable_sie();
214 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
218 sprintf(debug_name, "kvm-%u", current->pid);
220 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
224 spin_lock_init(&kvm->arch.float_int.lock);
225 INIT_LIST_HEAD(&kvm->arch.float_int.list);
227 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228 VM_EVENT(kvm, 3, "%s", "vm created");
230 if (type & KVM_VM_S390_UCONTROL) {
231 kvm->arch.gmap = NULL;
233 kvm->arch.gmap = gmap_alloc(current->mm);
239 debug_unregister(kvm->arch.dbf);
241 free_page((unsigned long)(kvm->arch.sca));
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250 if (!kvm_is_ucontrol(vcpu->kvm)) {
251 clear_bit(63 - vcpu->vcpu_id,
252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254 (__u64) vcpu->arch.sie_block)
255 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
259 if (kvm_is_ucontrol(vcpu->kvm))
260 gmap_free(vcpu->arch.gmap);
262 free_page((unsigned long)(vcpu->arch.sie_block));
263 kvm_vcpu_uninit(vcpu);
267 static void kvm_free_vcpus(struct kvm *kvm)
270 struct kvm_vcpu *vcpu;
272 kvm_for_each_vcpu(i, vcpu, kvm)
273 kvm_arch_vcpu_destroy(vcpu);
275 mutex_lock(&kvm->lock);
276 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277 kvm->vcpus[i] = NULL;
279 atomic_set(&kvm->online_vcpus, 0);
280 mutex_unlock(&kvm->lock);
283 void kvm_arch_sync_events(struct kvm *kvm)
287 void kvm_arch_destroy_vm(struct kvm *kvm)
290 free_page((unsigned long)(kvm->arch.sca));
291 debug_unregister(kvm->arch.dbf);
292 if (!kvm_is_ucontrol(kvm))
293 gmap_free(kvm->arch.gmap);
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
299 if (kvm_is_ucontrol(vcpu->kvm)) {
300 vcpu->arch.gmap = gmap_alloc(current->mm);
301 if (!vcpu->arch.gmap)
306 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
321 save_fp_regs(&vcpu->arch.host_fpregs);
322 save_access_regs(vcpu->arch.host_acrs);
323 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324 restore_fp_regs(&vcpu->arch.guest_fpregs);
325 restore_access_regs(vcpu->run->s.regs.acrs);
326 gmap_enable(vcpu->arch.gmap);
327 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
332 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333 gmap_disable(vcpu->arch.gmap);
334 save_fp_regs(&vcpu->arch.guest_fpregs);
335 save_access_regs(vcpu->run->s.regs.acrs);
336 restore_fp_regs(&vcpu->arch.host_fpregs);
337 restore_access_regs(vcpu->arch.host_acrs);
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
342 /* this equals initial cpu reset in pop, but we don't switch to ESA */
343 vcpu->arch.sie_block->gpsw.mask = 0UL;
344 vcpu->arch.sie_block->gpsw.addr = 0UL;
345 kvm_s390_set_prefix(vcpu, 0);
346 vcpu->arch.sie_block->cputm = 0UL;
347 vcpu->arch.sie_block->ckc = 0UL;
348 vcpu->arch.sie_block->todpr = 0;
349 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
351 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352 vcpu->arch.guest_fpregs.fpc = 0;
353 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354 vcpu->arch.sie_block->gbea = 1;
355 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
358 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
360 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
363 vcpu->arch.sie_block->ecb = 6;
364 vcpu->arch.sie_block->eca = 0xC1002001U;
365 vcpu->arch.sie_block->fac = (int) (long) facilities;
366 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
367 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
368 (unsigned long) vcpu);
369 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
370 get_cpu_id(&vcpu->arch.cpu_id);
371 vcpu->arch.cpu_id.version = 0xff;
375 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
378 struct kvm_vcpu *vcpu;
381 if (id >= KVM_MAX_VCPUS)
386 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
390 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
391 get_zeroed_page(GFP_KERNEL);
393 if (!vcpu->arch.sie_block)
396 vcpu->arch.sie_block->icpua = id;
397 if (!kvm_is_ucontrol(kvm)) {
398 if (!kvm->arch.sca) {
402 if (!kvm->arch.sca->cpu[id].sda)
403 kvm->arch.sca->cpu[id].sda =
404 (__u64) vcpu->arch.sie_block;
405 vcpu->arch.sie_block->scaoh =
406 (__u32)(((__u64)kvm->arch.sca) >> 32);
407 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
408 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
411 spin_lock_init(&vcpu->arch.local_int.lock);
412 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
413 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
414 spin_lock(&kvm->arch.float_int.lock);
415 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
416 init_waitqueue_head(&vcpu->arch.local_int.wq);
417 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
418 spin_unlock(&kvm->arch.float_int.lock);
420 rc = kvm_vcpu_init(vcpu, kvm, id);
422 goto out_free_sie_block;
423 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
424 vcpu->arch.sie_block);
425 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
429 free_page((unsigned long)(vcpu->arch.sie_block));
436 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
438 /* kvm common code refers to this, but never calls it */
443 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
445 /* kvm common code refers to this, but never calls it */
450 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
451 struct kvm_one_reg *reg)
456 case KVM_REG_S390_TODPR:
457 r = put_user(vcpu->arch.sie_block->todpr,
458 (u32 __user *)reg->addr);
460 case KVM_REG_S390_EPOCHDIFF:
461 r = put_user(vcpu->arch.sie_block->epoch,
462 (u64 __user *)reg->addr);
464 case KVM_REG_S390_CPU_TIMER:
465 r = put_user(vcpu->arch.sie_block->cputm,
466 (u64 __user *)reg->addr);
468 case KVM_REG_S390_CLOCK_COMP:
469 r = put_user(vcpu->arch.sie_block->ckc,
470 (u64 __user *)reg->addr);
479 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
480 struct kvm_one_reg *reg)
485 case KVM_REG_S390_TODPR:
486 r = get_user(vcpu->arch.sie_block->todpr,
487 (u32 __user *)reg->addr);
489 case KVM_REG_S390_EPOCHDIFF:
490 r = get_user(vcpu->arch.sie_block->epoch,
491 (u64 __user *)reg->addr);
493 case KVM_REG_S390_CPU_TIMER:
494 r = get_user(vcpu->arch.sie_block->cputm,
495 (u64 __user *)reg->addr);
497 case KVM_REG_S390_CLOCK_COMP:
498 r = get_user(vcpu->arch.sie_block->ckc,
499 (u64 __user *)reg->addr);
508 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
510 kvm_s390_vcpu_initial_reset(vcpu);
514 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
516 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
520 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
522 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
526 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
527 struct kvm_sregs *sregs)
529 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
530 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
531 restore_access_regs(vcpu->run->s.regs.acrs);
535 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
536 struct kvm_sregs *sregs)
538 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
539 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
543 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
545 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
546 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
547 restore_fp_regs(&vcpu->arch.guest_fpregs);
551 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
553 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
554 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
558 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
562 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
565 vcpu->run->psw_mask = psw.mask;
566 vcpu->run->psw_addr = psw.addr;
571 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
572 struct kvm_translation *tr)
574 return -EINVAL; /* not implemented yet */
577 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
578 struct kvm_guest_debug *dbg)
580 return -EINVAL; /* not implemented yet */
583 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
584 struct kvm_mp_state *mp_state)
586 return -EINVAL; /* not implemented yet */
589 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
590 struct kvm_mp_state *mp_state)
592 return -EINVAL; /* not implemented yet */
595 static int __vcpu_run(struct kvm_vcpu *vcpu)
599 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
604 if (test_thread_flag(TIF_MCCK_PENDING))
607 if (!kvm_is_ucontrol(vcpu->kvm))
608 kvm_s390_deliver_pending_interrupts(vcpu);
610 vcpu->arch.sie_block->icptcode = 0;
614 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
615 atomic_read(&vcpu->arch.sie_block->cpuflags));
616 trace_kvm_s390_sie_enter(vcpu,
617 atomic_read(&vcpu->arch.sie_block->cpuflags));
618 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
620 if (kvm_is_ucontrol(vcpu->kvm)) {
621 rc = SIE_INTERCEPT_UCONTROL;
623 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
624 trace_kvm_s390_sie_fault(vcpu);
625 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
629 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
630 vcpu->arch.sie_block->icptcode);
631 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
636 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
640 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
646 if (vcpu->sigset_active)
647 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
649 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
651 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
653 switch (kvm_run->exit_reason) {
654 case KVM_EXIT_S390_SIEIC:
655 case KVM_EXIT_UNKNOWN:
657 case KVM_EXIT_S390_RESET:
658 case KVM_EXIT_S390_UCONTROL:
664 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
665 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
666 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
667 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
668 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
670 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
671 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
672 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
673 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
679 rc = __vcpu_run(vcpu);
682 if (kvm_is_ucontrol(vcpu->kvm))
685 rc = kvm_handle_sie_intercept(vcpu);
686 } while (!signal_pending(current) && !rc);
688 if (rc == SIE_INTERCEPT_RERUNVCPU)
691 if (signal_pending(current) && !rc) {
692 kvm_run->exit_reason = KVM_EXIT_INTR;
696 #ifdef CONFIG_KVM_S390_UCONTROL
697 if (rc == SIE_INTERCEPT_UCONTROL) {
698 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
699 kvm_run->s390_ucontrol.trans_exc_code =
700 current->thread.gmap_addr;
701 kvm_run->s390_ucontrol.pgm_code = 0x10;
706 if (rc == -EOPNOTSUPP) {
707 /* intercept cannot be handled in-kernel, prepare kvm-run */
708 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
709 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
710 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
711 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
715 if (rc == -EREMOTE) {
716 /* intercept was handled, but userspace support is needed
717 * kvm_run has been prepared by the handler */
721 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
722 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
723 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
724 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
726 if (vcpu->sigset_active)
727 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
729 vcpu->stat.exit_userspace++;
733 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
734 unsigned long n, int prefix)
737 return copy_to_guest(vcpu, guestdest, from, n);
739 return copy_to_guest_absolute(vcpu, guestdest, from, n);
743 * store status at address
744 * we use have two special cases:
745 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
746 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
748 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
750 unsigned char archmode = 1;
753 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
754 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
756 addr = SAVE_AREA_BASE;
758 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
759 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
761 addr = SAVE_AREA_BASE;
766 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
767 vcpu->arch.guest_fpregs.fprs, 128, prefix))
770 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
771 vcpu->run->s.regs.gprs, 128, prefix))
774 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
775 &vcpu->arch.sie_block->gpsw, 16, prefix))
778 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
779 &vcpu->arch.sie_block->prefix, 4, prefix))
782 if (__guestcopy(vcpu,
783 addr + offsetof(struct save_area, fp_ctrl_reg),
784 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
787 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
788 &vcpu->arch.sie_block->todpr, 4, prefix))
791 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
792 &vcpu->arch.sie_block->cputm, 8, prefix))
795 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
796 &vcpu->arch.sie_block->ckc, 8, prefix))
799 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
800 &vcpu->run->s.regs.acrs, 64, prefix))
803 if (__guestcopy(vcpu,
804 addr + offsetof(struct save_area, ctrl_regs),
805 &vcpu->arch.sie_block->gcr, 128, prefix))
810 long kvm_arch_vcpu_ioctl(struct file *filp,
811 unsigned int ioctl, unsigned long arg)
813 struct kvm_vcpu *vcpu = filp->private_data;
814 void __user *argp = (void __user *)arg;
818 case KVM_S390_INTERRUPT: {
819 struct kvm_s390_interrupt s390int;
822 if (copy_from_user(&s390int, argp, sizeof(s390int)))
824 r = kvm_s390_inject_vcpu(vcpu, &s390int);
827 case KVM_S390_STORE_STATUS:
828 r = kvm_s390_vcpu_store_status(vcpu, arg);
830 case KVM_S390_SET_INITIAL_PSW: {
834 if (copy_from_user(&psw, argp, sizeof(psw)))
836 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
839 case KVM_S390_INITIAL_RESET:
840 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
842 case KVM_SET_ONE_REG:
843 case KVM_GET_ONE_REG: {
844 struct kvm_one_reg reg;
846 if (copy_from_user(®, argp, sizeof(reg)))
848 if (ioctl == KVM_SET_ONE_REG)
849 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
851 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
854 #ifdef CONFIG_KVM_S390_UCONTROL
855 case KVM_S390_UCAS_MAP: {
856 struct kvm_s390_ucas_mapping ucasmap;
858 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
863 if (!kvm_is_ucontrol(vcpu->kvm)) {
868 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
869 ucasmap.vcpu_addr, ucasmap.length);
872 case KVM_S390_UCAS_UNMAP: {
873 struct kvm_s390_ucas_mapping ucasmap;
875 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
880 if (!kvm_is_ucontrol(vcpu->kvm)) {
885 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
890 case KVM_S390_VCPU_FAULT: {
891 r = gmap_fault(arg, vcpu->arch.gmap);
892 if (!IS_ERR_VALUE(r))
902 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
904 #ifdef CONFIG_KVM_S390_UCONTROL
905 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
906 && (kvm_is_ucontrol(vcpu->kvm))) {
907 vmf->page = virt_to_page(vcpu->arch.sie_block);
912 return VM_FAULT_SIGBUS;
915 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
916 struct kvm_memory_slot *dont)
920 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
925 /* Section: memory related */
926 int kvm_arch_prepare_memory_region(struct kvm *kvm,
927 struct kvm_memory_slot *memslot,
928 struct kvm_memory_slot old,
929 struct kvm_userspace_memory_region *mem,
932 /* A few sanity checks. We can have exactly one memory slot which has
933 to start at guest virtual zero and which has to be located at a
934 page boundary in userland and which has to end at a page boundary.
935 The memory in userland is ok to be fragmented into various different
936 vmas. It is okay to mmap() and munmap() stuff in this slot after
937 doing this call at any time */
942 if (mem->guest_phys_addr)
945 if (mem->userspace_addr & 0xffffful)
948 if (mem->memory_size & 0xffffful)
957 void kvm_arch_commit_memory_region(struct kvm *kvm,
958 struct kvm_userspace_memory_region *mem,
959 struct kvm_memory_slot old,
965 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
966 mem->guest_phys_addr, mem->memory_size);
968 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
972 void kvm_arch_flush_shadow_all(struct kvm *kvm)
976 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
977 struct kvm_memory_slot *slot)
981 static int __init kvm_s390_init(void)
984 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
989 * guests can ask for up to 255+1 double words, we need a full page
990 * to hold the maximum amount of facilities. On the other hand, we
991 * only set facilities that are known to work in KVM.
993 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
998 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
999 facilities[0] &= 0xff00fff3f47c0000ULL;
1000 facilities[1] &= 0x201c000000000000ULL;
1004 static void __exit kvm_s390_exit(void)
1006 free_page((unsigned long) facilities);
1010 module_init(kvm_s390_init);
1011 module_exit(kvm_s390_exit);