2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define CREATE_TRACE_POINTS
37 #include "trace-s390.h"
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
43 { "exit_null", VCPU_STAT(exit_null) },
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80 { "diagnose_10", VCPU_STAT(diagnose_10) },
81 { "diagnose_44", VCPU_STAT(diagnose_44) },
82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
86 static unsigned long long *facilities;
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
91 /* every s390 is virtualization enabled ;-) */
95 void kvm_arch_hardware_disable(void *garbage)
99 int kvm_arch_hardware_setup(void)
104 void kvm_arch_hardware_unsetup(void)
108 void kvm_arch_check_processor_compat(void *rtn)
112 int kvm_arch_init(void *opaque)
117 void kvm_arch_exit(void)
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
130 int kvm_dev_ioctl_check_extension(long ext)
135 case KVM_CAP_S390_PSW:
136 case KVM_CAP_S390_GMAP:
137 case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
141 case KVM_CAP_SYNC_REGS:
142 case KVM_CAP_ONE_REG:
145 case KVM_CAP_NR_VCPUS:
146 case KVM_CAP_MAX_VCPUS:
149 case KVM_CAP_S390_COW:
150 r = sclp_get_fac85() & 0x2;
158 /* Section: vm related */
160 * Get (and clear) the dirty memory log for a memory slot.
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163 struct kvm_dirty_log *log)
168 long kvm_arch_vm_ioctl(struct file *filp,
169 unsigned int ioctl, unsigned long arg)
171 struct kvm *kvm = filp->private_data;
172 void __user *argp = (void __user *)arg;
176 case KVM_S390_INTERRUPT: {
177 struct kvm_s390_interrupt s390int;
180 if (copy_from_user(&s390int, argp, sizeof(s390int)))
182 r = kvm_s390_inject_vm(kvm, &s390int);
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
198 #ifdef CONFIG_KVM_S390_UCONTROL
199 if (type & ~KVM_VM_S390_UCONTROL)
201 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
208 rc = s390_enable_sie();
214 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
218 sprintf(debug_name, "kvm-%u", current->pid);
220 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
224 spin_lock_init(&kvm->arch.float_int.lock);
225 INIT_LIST_HEAD(&kvm->arch.float_int.list);
227 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228 VM_EVENT(kvm, 3, "%s", "vm created");
230 if (type & KVM_VM_S390_UCONTROL) {
231 kvm->arch.gmap = NULL;
233 kvm->arch.gmap = gmap_alloc(current->mm);
239 debug_unregister(kvm->arch.dbf);
241 free_page((unsigned long)(kvm->arch.sca));
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250 if (!kvm_is_ucontrol(vcpu->kvm)) {
251 clear_bit(63 - vcpu->vcpu_id,
252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254 (__u64) vcpu->arch.sie_block)
255 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
259 if (kvm_is_ucontrol(vcpu->kvm))
260 gmap_free(vcpu->arch.gmap);
262 free_page((unsigned long)(vcpu->arch.sie_block));
263 kvm_vcpu_uninit(vcpu);
267 static void kvm_free_vcpus(struct kvm *kvm)
270 struct kvm_vcpu *vcpu;
272 kvm_for_each_vcpu(i, vcpu, kvm)
273 kvm_arch_vcpu_destroy(vcpu);
275 mutex_lock(&kvm->lock);
276 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277 kvm->vcpus[i] = NULL;
279 atomic_set(&kvm->online_vcpus, 0);
280 mutex_unlock(&kvm->lock);
283 void kvm_arch_sync_events(struct kvm *kvm)
287 void kvm_arch_destroy_vm(struct kvm *kvm)
290 free_page((unsigned long)(kvm->arch.sca));
291 debug_unregister(kvm->arch.dbf);
292 if (!kvm_is_ucontrol(kvm))
293 gmap_free(kvm->arch.gmap);
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
299 if (kvm_is_ucontrol(vcpu->kvm)) {
300 vcpu->arch.gmap = gmap_alloc(current->mm);
301 if (!vcpu->arch.gmap)
306 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
321 save_fp_regs(&vcpu->arch.host_fpregs);
322 save_access_regs(vcpu->arch.host_acrs);
323 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324 restore_fp_regs(&vcpu->arch.guest_fpregs);
325 restore_access_regs(vcpu->run->s.regs.acrs);
326 gmap_enable(vcpu->arch.gmap);
327 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
332 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333 gmap_disable(vcpu->arch.gmap);
334 save_fp_regs(&vcpu->arch.guest_fpregs);
335 save_access_regs(vcpu->run->s.regs.acrs);
336 restore_fp_regs(&vcpu->arch.host_fpregs);
337 restore_access_regs(vcpu->arch.host_acrs);
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
342 /* this equals initial cpu reset in pop, but we don't switch to ESA */
343 vcpu->arch.sie_block->gpsw.mask = 0UL;
344 vcpu->arch.sie_block->gpsw.addr = 0UL;
345 kvm_s390_set_prefix(vcpu, 0);
346 vcpu->arch.sie_block->cputm = 0UL;
347 vcpu->arch.sie_block->ckc = 0UL;
348 vcpu->arch.sie_block->todpr = 0;
349 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
351 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352 vcpu->arch.guest_fpregs.fpc = 0;
353 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354 vcpu->arch.sie_block->gbea = 1;
355 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
358 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
363 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
365 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
368 vcpu->arch.sie_block->ecb = 6;
369 vcpu->arch.sie_block->eca = 0xC1002001U;
370 vcpu->arch.sie_block->fac = (int) (long) facilities;
371 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
372 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
373 (unsigned long) vcpu);
374 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
375 get_cpu_id(&vcpu->arch.cpu_id);
376 vcpu->arch.cpu_id.version = 0xff;
380 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
383 struct kvm_vcpu *vcpu;
386 if (id >= KVM_MAX_VCPUS)
391 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
395 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
396 get_zeroed_page(GFP_KERNEL);
398 if (!vcpu->arch.sie_block)
401 vcpu->arch.sie_block->icpua = id;
402 if (!kvm_is_ucontrol(kvm)) {
403 if (!kvm->arch.sca) {
407 if (!kvm->arch.sca->cpu[id].sda)
408 kvm->arch.sca->cpu[id].sda =
409 (__u64) vcpu->arch.sie_block;
410 vcpu->arch.sie_block->scaoh =
411 (__u32)(((__u64)kvm->arch.sca) >> 32);
412 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
413 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
416 spin_lock_init(&vcpu->arch.local_int.lock);
417 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
418 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
419 spin_lock(&kvm->arch.float_int.lock);
420 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
421 init_waitqueue_head(&vcpu->arch.local_int.wq);
422 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
423 spin_unlock(&kvm->arch.float_int.lock);
425 rc = kvm_vcpu_init(vcpu, kvm, id);
427 goto out_free_sie_block;
428 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
429 vcpu->arch.sie_block);
430 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
434 free_page((unsigned long)(vcpu->arch.sie_block));
441 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
443 /* kvm common code refers to this, but never calls it */
448 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
450 /* kvm common code refers to this, but never calls it */
455 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
456 struct kvm_one_reg *reg)
461 case KVM_REG_S390_TODPR:
462 r = put_user(vcpu->arch.sie_block->todpr,
463 (u32 __user *)reg->addr);
465 case KVM_REG_S390_EPOCHDIFF:
466 r = put_user(vcpu->arch.sie_block->epoch,
467 (u64 __user *)reg->addr);
469 case KVM_REG_S390_CPU_TIMER:
470 r = put_user(vcpu->arch.sie_block->cputm,
471 (u64 __user *)reg->addr);
473 case KVM_REG_S390_CLOCK_COMP:
474 r = put_user(vcpu->arch.sie_block->ckc,
475 (u64 __user *)reg->addr);
484 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
485 struct kvm_one_reg *reg)
490 case KVM_REG_S390_TODPR:
491 r = get_user(vcpu->arch.sie_block->todpr,
492 (u32 __user *)reg->addr);
494 case KVM_REG_S390_EPOCHDIFF:
495 r = get_user(vcpu->arch.sie_block->epoch,
496 (u64 __user *)reg->addr);
498 case KVM_REG_S390_CPU_TIMER:
499 r = get_user(vcpu->arch.sie_block->cputm,
500 (u64 __user *)reg->addr);
502 case KVM_REG_S390_CLOCK_COMP:
503 r = get_user(vcpu->arch.sie_block->ckc,
504 (u64 __user *)reg->addr);
513 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
515 kvm_s390_vcpu_initial_reset(vcpu);
519 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
525 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
527 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
531 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
532 struct kvm_sregs *sregs)
534 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
535 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
536 restore_access_regs(vcpu->run->s.regs.acrs);
540 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
541 struct kvm_sregs *sregs)
543 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
544 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
548 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
550 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
551 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
552 restore_fp_regs(&vcpu->arch.guest_fpregs);
556 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
558 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
559 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
563 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
567 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
570 vcpu->run->psw_mask = psw.mask;
571 vcpu->run->psw_addr = psw.addr;
576 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
577 struct kvm_translation *tr)
579 return -EINVAL; /* not implemented yet */
582 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
583 struct kvm_guest_debug *dbg)
585 return -EINVAL; /* not implemented yet */
588 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
589 struct kvm_mp_state *mp_state)
591 return -EINVAL; /* not implemented yet */
594 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
595 struct kvm_mp_state *mp_state)
597 return -EINVAL; /* not implemented yet */
600 static int __vcpu_run(struct kvm_vcpu *vcpu)
604 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
609 if (test_thread_flag(TIF_MCCK_PENDING))
612 if (!kvm_is_ucontrol(vcpu->kvm))
613 kvm_s390_deliver_pending_interrupts(vcpu);
615 vcpu->arch.sie_block->icptcode = 0;
617 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
618 atomic_read(&vcpu->arch.sie_block->cpuflags));
619 trace_kvm_s390_sie_enter(vcpu,
620 atomic_read(&vcpu->arch.sie_block->cpuflags));
621 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
623 if (kvm_is_ucontrol(vcpu->kvm)) {
624 rc = SIE_INTERCEPT_UCONTROL;
626 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
627 trace_kvm_s390_sie_fault(vcpu);
628 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
632 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
633 vcpu->arch.sie_block->icptcode);
634 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
637 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
641 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
647 if (vcpu->sigset_active)
648 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
650 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
652 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
654 switch (kvm_run->exit_reason) {
655 case KVM_EXIT_S390_SIEIC:
656 case KVM_EXIT_UNKNOWN:
658 case KVM_EXIT_S390_RESET:
659 case KVM_EXIT_S390_UCONTROL:
665 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
666 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
667 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
668 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
669 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
671 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
672 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
673 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
674 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
680 rc = __vcpu_run(vcpu);
683 if (kvm_is_ucontrol(vcpu->kvm))
686 rc = kvm_handle_sie_intercept(vcpu);
687 } while (!signal_pending(current) && !rc);
689 if (rc == SIE_INTERCEPT_RERUNVCPU)
692 if (signal_pending(current) && !rc) {
693 kvm_run->exit_reason = KVM_EXIT_INTR;
697 #ifdef CONFIG_KVM_S390_UCONTROL
698 if (rc == SIE_INTERCEPT_UCONTROL) {
699 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
700 kvm_run->s390_ucontrol.trans_exc_code =
701 current->thread.gmap_addr;
702 kvm_run->s390_ucontrol.pgm_code = 0x10;
707 if (rc == -EOPNOTSUPP) {
708 /* intercept cannot be handled in-kernel, prepare kvm-run */
709 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
710 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
711 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
712 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
716 if (rc == -EREMOTE) {
717 /* intercept was handled, but userspace support is needed
718 * kvm_run has been prepared by the handler */
722 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
723 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
724 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
725 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
727 if (vcpu->sigset_active)
728 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
730 vcpu->stat.exit_userspace++;
734 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
735 unsigned long n, int prefix)
738 return copy_to_guest(vcpu, guestdest, from, n);
740 return copy_to_guest_absolute(vcpu, guestdest, from, n);
744 * store status at address
745 * we use have two special cases:
746 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
747 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
749 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
751 unsigned char archmode = 1;
754 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
755 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
757 addr = SAVE_AREA_BASE;
759 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
760 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
762 addr = SAVE_AREA_BASE;
767 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
768 vcpu->arch.guest_fpregs.fprs, 128, prefix))
771 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
772 vcpu->run->s.regs.gprs, 128, prefix))
775 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
776 &vcpu->arch.sie_block->gpsw, 16, prefix))
779 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
780 &vcpu->arch.sie_block->prefix, 4, prefix))
783 if (__guestcopy(vcpu,
784 addr + offsetof(struct save_area, fp_ctrl_reg),
785 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
788 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
789 &vcpu->arch.sie_block->todpr, 4, prefix))
792 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
793 &vcpu->arch.sie_block->cputm, 8, prefix))
796 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
797 &vcpu->arch.sie_block->ckc, 8, prefix))
800 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
801 &vcpu->run->s.regs.acrs, 64, prefix))
804 if (__guestcopy(vcpu,
805 addr + offsetof(struct save_area, ctrl_regs),
806 &vcpu->arch.sie_block->gcr, 128, prefix))
811 long kvm_arch_vcpu_ioctl(struct file *filp,
812 unsigned int ioctl, unsigned long arg)
814 struct kvm_vcpu *vcpu = filp->private_data;
815 void __user *argp = (void __user *)arg;
819 case KVM_S390_INTERRUPT: {
820 struct kvm_s390_interrupt s390int;
823 if (copy_from_user(&s390int, argp, sizeof(s390int)))
825 r = kvm_s390_inject_vcpu(vcpu, &s390int);
828 case KVM_S390_STORE_STATUS:
829 r = kvm_s390_vcpu_store_status(vcpu, arg);
831 case KVM_S390_SET_INITIAL_PSW: {
835 if (copy_from_user(&psw, argp, sizeof(psw)))
837 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
840 case KVM_S390_INITIAL_RESET:
841 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
843 case KVM_SET_ONE_REG:
844 case KVM_GET_ONE_REG: {
845 struct kvm_one_reg reg;
847 if (copy_from_user(®, argp, sizeof(reg)))
849 if (ioctl == KVM_SET_ONE_REG)
850 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
852 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
855 #ifdef CONFIG_KVM_S390_UCONTROL
856 case KVM_S390_UCAS_MAP: {
857 struct kvm_s390_ucas_mapping ucasmap;
859 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
864 if (!kvm_is_ucontrol(vcpu->kvm)) {
869 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
870 ucasmap.vcpu_addr, ucasmap.length);
873 case KVM_S390_UCAS_UNMAP: {
874 struct kvm_s390_ucas_mapping ucasmap;
876 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
881 if (!kvm_is_ucontrol(vcpu->kvm)) {
886 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
891 case KVM_S390_VCPU_FAULT: {
892 r = gmap_fault(arg, vcpu->arch.gmap);
893 if (!IS_ERR_VALUE(r))
903 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
905 #ifdef CONFIG_KVM_S390_UCONTROL
906 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
907 && (kvm_is_ucontrol(vcpu->kvm))) {
908 vmf->page = virt_to_page(vcpu->arch.sie_block);
913 return VM_FAULT_SIGBUS;
916 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
917 struct kvm_memory_slot *dont)
921 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
926 /* Section: memory related */
927 int kvm_arch_prepare_memory_region(struct kvm *kvm,
928 struct kvm_memory_slot *memslot,
929 struct kvm_memory_slot old,
930 struct kvm_userspace_memory_region *mem,
933 /* A few sanity checks. We can have exactly one memory slot which has
934 to start at guest virtual zero and which has to be located at a
935 page boundary in userland and which has to end at a page boundary.
936 The memory in userland is ok to be fragmented into various different
937 vmas. It is okay to mmap() and munmap() stuff in this slot after
938 doing this call at any time */
943 if (mem->guest_phys_addr)
946 if (mem->userspace_addr & 0xffffful)
949 if (mem->memory_size & 0xffffful)
958 void kvm_arch_commit_memory_region(struct kvm *kvm,
959 struct kvm_userspace_memory_region *mem,
960 struct kvm_memory_slot old,
966 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
967 mem->guest_phys_addr, mem->memory_size);
969 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
973 void kvm_arch_flush_shadow_all(struct kvm *kvm)
977 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
978 struct kvm_memory_slot *slot)
982 static int __init kvm_s390_init(void)
985 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
990 * guests can ask for up to 255+1 double words, we need a full page
991 * to hold the maximum amount of facilities. On the other hand, we
992 * only set facilities that are known to work in KVM.
994 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
999 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1000 facilities[0] &= 0xff00fff3f47c0000ULL;
1001 facilities[1] &= 0x001c000000000000ULL;
1005 static void __exit kvm_s390_exit(void)
1007 free_page((unsigned long) facilities);
1011 module_init(kvm_s390_init);
1012 module_exit(kvm_s390_exit);