2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
80 static unsigned long long *facilities;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
106 int kvm_arch_init(void *opaque)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
132 #ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
143 /* Section: vm related */
145 * Get (and clear) the dirty memory log for a memory slot.
147 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
148 struct kvm_dirty_log *log)
153 long kvm_arch_vm_ioctl(struct file *filp,
154 unsigned int ioctl, unsigned long arg)
156 struct kvm *kvm = filp->private_data;
157 void __user *argp = (void __user *)arg;
161 case KVM_S390_INTERRUPT: {
162 struct kvm_s390_interrupt s390int;
165 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167 r = kvm_s390_inject_vm(kvm, &s390int);
177 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
183 #ifdef CONFIG_KVM_S390_UCONTROL
184 if (type & ~KVM_VM_S390_UCONTROL)
186 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
193 rc = s390_enable_sie();
199 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
203 sprintf(debug_name, "kvm-%u", current->pid);
205 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
209 spin_lock_init(&kvm->arch.float_int.lock);
210 INIT_LIST_HEAD(&kvm->arch.float_int.list);
212 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
213 VM_EVENT(kvm, 3, "%s", "vm created");
215 if (type & KVM_VM_S390_UCONTROL) {
216 kvm->arch.gmap = NULL;
218 kvm->arch.gmap = gmap_alloc(current->mm);
224 debug_unregister(kvm->arch.dbf);
226 free_page((unsigned long)(kvm->arch.sca));
231 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
234 if (!kvm_is_ucontrol(vcpu->kvm)) {
235 clear_bit(63 - vcpu->vcpu_id,
236 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
237 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
238 (__u64) vcpu->arch.sie_block)
239 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
243 if (kvm_is_ucontrol(vcpu->kvm))
244 gmap_free(vcpu->arch.gmap);
246 free_page((unsigned long)(vcpu->arch.sie_block));
247 kvm_vcpu_uninit(vcpu);
251 static void kvm_free_vcpus(struct kvm *kvm)
254 struct kvm_vcpu *vcpu;
256 kvm_for_each_vcpu(i, vcpu, kvm)
257 kvm_arch_vcpu_destroy(vcpu);
259 mutex_lock(&kvm->lock);
260 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
261 kvm->vcpus[i] = NULL;
263 atomic_set(&kvm->online_vcpus, 0);
264 mutex_unlock(&kvm->lock);
267 void kvm_arch_sync_events(struct kvm *kvm)
271 void kvm_arch_destroy_vm(struct kvm *kvm)
274 free_page((unsigned long)(kvm->arch.sca));
275 debug_unregister(kvm->arch.dbf);
276 if (!kvm_is_ucontrol(kvm))
277 gmap_free(kvm->arch.gmap);
280 /* Section: vcpu related */
281 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283 if (kvm_is_ucontrol(vcpu->kvm)) {
284 vcpu->arch.gmap = gmap_alloc(current->mm);
285 if (!vcpu->arch.gmap)
290 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
294 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
299 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
301 save_fp_regs(&vcpu->arch.host_fpregs);
302 save_access_regs(vcpu->arch.host_acrs);
303 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
304 restore_fp_regs(&vcpu->arch.guest_fpregs);
305 restore_access_regs(vcpu->arch.guest_acrs);
306 gmap_enable(vcpu->arch.gmap);
307 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
310 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
312 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
313 gmap_disable(vcpu->arch.gmap);
314 save_fp_regs(&vcpu->arch.guest_fpregs);
315 save_access_regs(vcpu->arch.guest_acrs);
316 restore_fp_regs(&vcpu->arch.host_fpregs);
317 restore_access_regs(vcpu->arch.host_acrs);
320 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
322 /* this equals initial cpu reset in pop, but we don't switch to ESA */
323 vcpu->arch.sie_block->gpsw.mask = 0UL;
324 vcpu->arch.sie_block->gpsw.addr = 0UL;
325 vcpu->arch.sie_block->prefix = 0UL;
326 vcpu->arch.sie_block->ihcpu = 0xffff;
327 vcpu->arch.sie_block->cputm = 0UL;
328 vcpu->arch.sie_block->ckc = 0UL;
329 vcpu->arch.sie_block->todpr = 0;
330 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
331 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
332 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
333 vcpu->arch.guest_fpregs.fpc = 0;
334 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
335 vcpu->arch.sie_block->gbea = 1;
338 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
340 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
343 vcpu->arch.sie_block->ecb = 6;
344 vcpu->arch.sie_block->eca = 0xC1002001U;
345 vcpu->arch.sie_block->fac = (int) (long) facilities;
346 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
347 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
348 (unsigned long) vcpu);
349 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
350 get_cpu_id(&vcpu->arch.cpu_id);
351 vcpu->arch.cpu_id.version = 0xff;
355 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
358 struct kvm_vcpu *vcpu;
361 if (id >= KVM_MAX_VCPUS)
366 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
370 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
371 get_zeroed_page(GFP_KERNEL);
373 if (!vcpu->arch.sie_block)
376 vcpu->arch.sie_block->icpua = id;
377 if (!kvm_is_ucontrol(kvm)) {
378 if (!kvm->arch.sca) {
382 if (!kvm->arch.sca->cpu[id].sda)
383 kvm->arch.sca->cpu[id].sda =
384 (__u64) vcpu->arch.sie_block;
385 vcpu->arch.sie_block->scaoh =
386 (__u32)(((__u64)kvm->arch.sca) >> 32);
387 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
388 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
391 spin_lock_init(&vcpu->arch.local_int.lock);
392 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
393 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
394 spin_lock(&kvm->arch.float_int.lock);
395 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
396 init_waitqueue_head(&vcpu->arch.local_int.wq);
397 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
398 spin_unlock(&kvm->arch.float_int.lock);
400 rc = kvm_vcpu_init(vcpu, kvm, id);
402 goto out_free_sie_block;
403 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
404 vcpu->arch.sie_block);
408 free_page((unsigned long)(vcpu->arch.sie_block));
415 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
417 /* kvm common code refers to this, but never calls it */
422 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
424 kvm_s390_vcpu_initial_reset(vcpu);
428 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
430 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
434 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
436 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
440 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
441 struct kvm_sregs *sregs)
443 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
444 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
445 restore_access_regs(vcpu->arch.guest_acrs);
449 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
450 struct kvm_sregs *sregs)
452 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
453 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
457 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
459 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
460 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
461 restore_fp_regs(&vcpu->arch.guest_fpregs);
465 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
467 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
468 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
472 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
476 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
479 vcpu->run->psw_mask = psw.mask;
480 vcpu->run->psw_addr = psw.addr;
485 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
486 struct kvm_translation *tr)
488 return -EINVAL; /* not implemented yet */
491 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
492 struct kvm_guest_debug *dbg)
494 return -EINVAL; /* not implemented yet */
497 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
498 struct kvm_mp_state *mp_state)
500 return -EINVAL; /* not implemented yet */
503 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
504 struct kvm_mp_state *mp_state)
506 return -EINVAL; /* not implemented yet */
509 static int __vcpu_run(struct kvm_vcpu *vcpu)
513 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
518 if (test_thread_flag(TIF_MCCK_PENDING))
521 if (!kvm_is_ucontrol(vcpu->kvm))
522 kvm_s390_deliver_pending_interrupts(vcpu);
524 vcpu->arch.sie_block->icptcode = 0;
528 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
529 atomic_read(&vcpu->arch.sie_block->cpuflags));
530 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
532 if (kvm_is_ucontrol(vcpu->kvm)) {
533 rc = SIE_INTERCEPT_UCONTROL;
535 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
536 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
540 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
541 vcpu->arch.sie_block->icptcode);
546 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
550 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
556 if (vcpu->sigset_active)
557 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
559 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
561 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
563 switch (kvm_run->exit_reason) {
564 case KVM_EXIT_S390_SIEIC:
565 case KVM_EXIT_UNKNOWN:
567 case KVM_EXIT_S390_RESET:
568 case KVM_EXIT_S390_UCONTROL:
574 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
575 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580 rc = __vcpu_run(vcpu);
583 if (kvm_is_ucontrol(vcpu->kvm))
586 rc = kvm_handle_sie_intercept(vcpu);
587 } while (!signal_pending(current) && !rc);
589 if (rc == SIE_INTERCEPT_RERUNVCPU)
592 if (signal_pending(current) && !rc) {
593 kvm_run->exit_reason = KVM_EXIT_INTR;
597 #ifdef CONFIG_KVM_S390_UCONTROL
598 if (rc == SIE_INTERCEPT_UCONTROL) {
599 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
600 kvm_run->s390_ucontrol.trans_exc_code =
601 current->thread.gmap_addr;
602 kvm_run->s390_ucontrol.pgm_code = 0x10;
607 if (rc == -EOPNOTSUPP) {
608 /* intercept cannot be handled in-kernel, prepare kvm-run */
609 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
610 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
611 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
612 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
616 if (rc == -EREMOTE) {
617 /* intercept was handled, but userspace support is needed
618 * kvm_run has been prepared by the handler */
622 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
623 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
625 if (vcpu->sigset_active)
626 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
628 vcpu->stat.exit_userspace++;
632 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
633 unsigned long n, int prefix)
636 return copy_to_guest(vcpu, guestdest, from, n);
638 return copy_to_guest_absolute(vcpu, guestdest, from, n);
642 * store status at address
643 * we use have two special cases:
644 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
645 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
647 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
649 unsigned char archmode = 1;
652 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
653 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
655 addr = SAVE_AREA_BASE;
657 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
658 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
660 addr = SAVE_AREA_BASE;
665 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
666 vcpu->arch.guest_fpregs.fprs, 128, prefix))
669 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
670 vcpu->arch.guest_gprs, 128, prefix))
673 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
674 &vcpu->arch.sie_block->gpsw, 16, prefix))
677 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
678 &vcpu->arch.sie_block->prefix, 4, prefix))
681 if (__guestcopy(vcpu,
682 addr + offsetof(struct save_area, fp_ctrl_reg),
683 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
686 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
687 &vcpu->arch.sie_block->todpr, 4, prefix))
690 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
691 &vcpu->arch.sie_block->cputm, 8, prefix))
694 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
695 &vcpu->arch.sie_block->ckc, 8, prefix))
698 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
699 &vcpu->arch.guest_acrs, 64, prefix))
702 if (__guestcopy(vcpu,
703 addr + offsetof(struct save_area, ctrl_regs),
704 &vcpu->arch.sie_block->gcr, 128, prefix))
709 long kvm_arch_vcpu_ioctl(struct file *filp,
710 unsigned int ioctl, unsigned long arg)
712 struct kvm_vcpu *vcpu = filp->private_data;
713 void __user *argp = (void __user *)arg;
717 case KVM_S390_INTERRUPT: {
718 struct kvm_s390_interrupt s390int;
721 if (copy_from_user(&s390int, argp, sizeof(s390int)))
723 r = kvm_s390_inject_vcpu(vcpu, &s390int);
726 case KVM_S390_STORE_STATUS:
727 r = kvm_s390_vcpu_store_status(vcpu, arg);
729 case KVM_S390_SET_INITIAL_PSW: {
733 if (copy_from_user(&psw, argp, sizeof(psw)))
735 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
738 case KVM_S390_INITIAL_RESET:
739 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
741 #ifdef CONFIG_KVM_S390_UCONTROL
742 case KVM_S390_UCAS_MAP: {
743 struct kvm_s390_ucas_mapping ucasmap;
745 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
750 if (!kvm_is_ucontrol(vcpu->kvm)) {
755 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
756 ucasmap.vcpu_addr, ucasmap.length);
759 case KVM_S390_UCAS_UNMAP: {
760 struct kvm_s390_ucas_mapping ucasmap;
762 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
767 if (!kvm_is_ucontrol(vcpu->kvm)) {
772 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
777 case KVM_S390_VCPU_FAULT: {
778 r = gmap_fault(arg, vcpu->arch.gmap);
779 if (!IS_ERR_VALUE(r))
789 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
791 #ifdef CONFIG_KVM_S390_UCONTROL
792 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
793 && (kvm_is_ucontrol(vcpu->kvm))) {
794 vmf->page = virt_to_page(vcpu->arch.sie_block);
799 return VM_FAULT_SIGBUS;
802 /* Section: memory related */
803 int kvm_arch_prepare_memory_region(struct kvm *kvm,
804 struct kvm_memory_slot *memslot,
805 struct kvm_memory_slot old,
806 struct kvm_userspace_memory_region *mem,
809 /* A few sanity checks. We can have exactly one memory slot which has
810 to start at guest virtual zero and which has to be located at a
811 page boundary in userland and which has to end at a page boundary.
812 The memory in userland is ok to be fragmented into various different
813 vmas. It is okay to mmap() and munmap() stuff in this slot after
814 doing this call at any time */
819 if (mem->guest_phys_addr)
822 if (mem->userspace_addr & 0xffffful)
825 if (mem->memory_size & 0xffffful)
834 void kvm_arch_commit_memory_region(struct kvm *kvm,
835 struct kvm_userspace_memory_region *mem,
836 struct kvm_memory_slot old,
842 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
843 mem->guest_phys_addr, mem->memory_size);
845 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
849 void kvm_arch_flush_shadow(struct kvm *kvm)
853 static int __init kvm_s390_init(void)
856 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
861 * guests can ask for up to 255+1 double words, we need a full page
862 * to hold the maximum amount of facilities. On the other hand, we
863 * only set facilities that are known to work in KVM.
865 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
870 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
871 facilities[0] &= 0xff00fff3f47c0000ULL;
872 facilities[1] &= 0x201c000000000000ULL;
876 static void __exit kvm_s390_exit(void)
878 free_page((unsigned long) facilities);
882 module_init(kvm_s390_init);
883 module_exit(kvm_s390_exit);