2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/lowcore.h>
34 #include <asm/pgtable.h>
37 #include <asm/switch_to.h>
40 #include <asm/cpacf.h>
41 #include <asm/timex.h>
45 #define KMSG_COMPONENT "kvm-s390"
47 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49 #define CREATE_TRACE_POINTS
51 #include "trace-s390.h"
53 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
55 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
58 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
62 { "exit_null", VCPU_STAT(exit_null) },
63 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_pei", VCPU_STAT(exit_pei) },
69 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
70 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
71 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
76 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
77 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
78 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
79 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
80 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
81 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
82 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
83 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
84 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
85 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
86 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
87 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
88 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
89 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
90 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
91 { "instruction_spx", VCPU_STAT(instruction_spx) },
92 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
93 { "instruction_stap", VCPU_STAT(instruction_stap) },
94 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
95 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
96 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
97 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
98 { "instruction_essa", VCPU_STAT(instruction_essa) },
99 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
100 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
101 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
102 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
103 { "instruction_sie", VCPU_STAT(instruction_sie) },
104 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
105 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
106 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
107 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
108 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
109 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
110 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
111 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
112 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
113 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
114 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
115 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
116 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
117 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
118 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
119 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
120 { "diagnose_10", VCPU_STAT(diagnose_10) },
121 { "diagnose_44", VCPU_STAT(diagnose_44) },
122 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
123 { "diagnose_258", VCPU_STAT(diagnose_258) },
124 { "diagnose_308", VCPU_STAT(diagnose_308) },
125 { "diagnose_500", VCPU_STAT(diagnose_500) },
129 /* allow nested virtualization in KVM (if enabled by user space) */
131 module_param(nested, int, S_IRUGO);
132 MODULE_PARM_DESC(nested, "Nested virtualization support");
134 /* upper facilities limit for kvm */
135 unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
137 unsigned long kvm_s390_fac_list_mask_size(void)
139 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
140 return ARRAY_SIZE(kvm_s390_fac_list_mask);
143 /* available cpu features supported by kvm */
144 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
145 /* available subfunctions indicated via query / "test bit" */
146 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
148 static struct gmap_notifier gmap_notifier;
149 static struct gmap_notifier vsie_gmap_notifier;
150 debug_info_t *kvm_s390_dbf;
152 /* Section: not file related */
153 int kvm_arch_hardware_enable(void)
155 /* every s390 is virtualization enabled ;-) */
159 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
163 * This callback is executed during stop_machine(). All CPUs are therefore
164 * temporarily stopped. In order not to change guest behavior, we have to
165 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
166 * so a CPU won't be stopped while calculating with the epoch.
168 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
172 struct kvm_vcpu *vcpu;
174 unsigned long long *delta = v;
176 list_for_each_entry(kvm, &vm_list, vm_list) {
177 kvm->arch.epoch -= *delta;
178 kvm_for_each_vcpu(i, vcpu, kvm) {
179 vcpu->arch.sie_block->epoch -= *delta;
180 if (vcpu->arch.cputm_enabled)
181 vcpu->arch.cputm_start += *delta;
182 if (vcpu->arch.vsie_block)
183 vcpu->arch.vsie_block->epoch -= *delta;
189 static struct notifier_block kvm_clock_notifier = {
190 .notifier_call = kvm_clock_sync,
193 int kvm_arch_hardware_setup(void)
195 gmap_notifier.notifier_call = kvm_gmap_notifier;
196 gmap_register_pte_notifier(&gmap_notifier);
197 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
198 gmap_register_pte_notifier(&vsie_gmap_notifier);
199 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
200 &kvm_clock_notifier);
204 void kvm_arch_hardware_unsetup(void)
206 gmap_unregister_pte_notifier(&gmap_notifier);
207 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
208 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
209 &kvm_clock_notifier);
212 static void allow_cpu_feat(unsigned long nr)
214 set_bit_inv(nr, kvm_s390_available_cpu_feat);
217 static inline int plo_test_bit(unsigned char nr)
219 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
223 /* Parameter registers are ignored for "test bit" */
233 static void kvm_s390_cpu_feat_init(void)
237 for (i = 0; i < 256; ++i) {
239 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
242 if (test_facility(28)) /* TOD-clock steering */
243 ptff(kvm_s390_available_subfunc.ptff,
244 sizeof(kvm_s390_available_subfunc.ptff),
247 if (test_facility(17)) { /* MSA */
248 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
249 kvm_s390_available_subfunc.kmac);
250 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
251 kvm_s390_available_subfunc.kmc);
252 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.km);
254 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kimd);
256 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.klmd);
259 if (test_facility(76)) /* MSA3 */
260 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.pckmo);
262 if (test_facility(77)) { /* MSA4 */
263 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.kmctr);
265 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
266 kvm_s390_available_subfunc.kmf);
267 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmo);
269 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.pcc);
272 if (test_facility(57)) /* MSA5 */
273 __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.ppno);
276 if (MACHINE_HAS_ESOP)
277 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
279 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
280 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
282 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
283 !test_facility(3) || !nested)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
286 if (sclp.has_64bscao)
287 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
289 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
291 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
301 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
302 * all skey handling functions read/set the skey from the PGSTE
303 * instead of the real storage key.
305 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
306 * pages being detected as preserved although they are resident.
308 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
309 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
311 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
312 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
313 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
315 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
316 * cannot easily shadow the SCA because of the ipte lock.
320 int kvm_arch_init(void *opaque)
322 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
326 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
327 debug_unregister(kvm_s390_dbf);
331 kvm_s390_cpu_feat_init();
333 /* Register floating interrupt controller interface. */
334 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
337 void kvm_arch_exit(void)
339 debug_unregister(kvm_s390_dbf);
342 /* Section: device related */
343 long kvm_arch_dev_ioctl(struct file *filp,
344 unsigned int ioctl, unsigned long arg)
346 if (ioctl == KVM_S390_ENABLE_SIE)
347 return s390_enable_sie();
351 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
356 case KVM_CAP_S390_PSW:
357 case KVM_CAP_S390_GMAP:
358 case KVM_CAP_SYNC_MMU:
359 #ifdef CONFIG_KVM_S390_UCONTROL
360 case KVM_CAP_S390_UCONTROL:
362 case KVM_CAP_ASYNC_PF:
363 case KVM_CAP_SYNC_REGS:
364 case KVM_CAP_ONE_REG:
365 case KVM_CAP_ENABLE_CAP:
366 case KVM_CAP_S390_CSS_SUPPORT:
367 case KVM_CAP_IOEVENTFD:
368 case KVM_CAP_DEVICE_CTRL:
369 case KVM_CAP_ENABLE_CAP_VM:
370 case KVM_CAP_S390_IRQCHIP:
371 case KVM_CAP_VM_ATTRIBUTES:
372 case KVM_CAP_MP_STATE:
373 case KVM_CAP_S390_INJECT_IRQ:
374 case KVM_CAP_S390_USER_SIGP:
375 case KVM_CAP_S390_USER_STSI:
376 case KVM_CAP_S390_SKEYS:
377 case KVM_CAP_S390_IRQ_STATE:
378 case KVM_CAP_S390_USER_INSTR0:
381 case KVM_CAP_S390_MEM_OP:
384 case KVM_CAP_NR_VCPUS:
385 case KVM_CAP_MAX_VCPUS:
386 r = KVM_S390_BSCA_CPU_SLOTS;
387 if (!kvm_s390_use_sca_entries())
389 else if (sclp.has_esca && sclp.has_64bscao)
390 r = KVM_S390_ESCA_CPU_SLOTS;
392 case KVM_CAP_NR_MEMSLOTS:
393 r = KVM_USER_MEM_SLOTS;
395 case KVM_CAP_S390_COW:
396 r = MACHINE_HAS_ESOP;
398 case KVM_CAP_S390_VECTOR_REGISTERS:
401 case KVM_CAP_S390_RI:
402 r = test_facility(64);
410 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
411 struct kvm_memory_slot *memslot)
413 gfn_t cur_gfn, last_gfn;
414 unsigned long address;
415 struct gmap *gmap = kvm->arch.gmap;
417 /* Loop over all guest pages */
418 last_gfn = memslot->base_gfn + memslot->npages;
419 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
420 address = gfn_to_hva_memslot(memslot, cur_gfn);
422 if (test_and_clear_guest_dirty(gmap->mm, address))
423 mark_page_dirty(kvm, cur_gfn);
424 if (fatal_signal_pending(current))
430 /* Section: vm related */
431 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
434 * Get (and clear) the dirty memory log for a memory slot.
436 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
437 struct kvm_dirty_log *log)
441 struct kvm_memslots *slots;
442 struct kvm_memory_slot *memslot;
445 if (kvm_is_ucontrol(kvm))
448 mutex_lock(&kvm->slots_lock);
451 if (log->slot >= KVM_USER_MEM_SLOTS)
454 slots = kvm_memslots(kvm);
455 memslot = id_to_memslot(slots, log->slot);
457 if (!memslot->dirty_bitmap)
460 kvm_s390_sync_dirty_log(kvm, memslot);
461 r = kvm_get_dirty_log(kvm, log, &is_dirty);
465 /* Clear the dirty log */
467 n = kvm_dirty_bitmap_bytes(memslot);
468 memset(memslot->dirty_bitmap, 0, n);
472 mutex_unlock(&kvm->slots_lock);
476 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
479 struct kvm_vcpu *vcpu;
481 kvm_for_each_vcpu(i, vcpu, kvm) {
482 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
486 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
494 case KVM_CAP_S390_IRQCHIP:
495 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
496 kvm->arch.use_irqchip = 1;
499 case KVM_CAP_S390_USER_SIGP:
500 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
501 kvm->arch.user_sigp = 1;
504 case KVM_CAP_S390_VECTOR_REGISTERS:
505 mutex_lock(&kvm->lock);
506 if (kvm->created_vcpus) {
508 } else if (MACHINE_HAS_VX) {
509 set_kvm_facility(kvm->arch.model.fac_mask, 129);
510 set_kvm_facility(kvm->arch.model.fac_list, 129);
511 if (test_facility(134)) {
512 set_kvm_facility(kvm->arch.model.fac_mask, 134);
513 set_kvm_facility(kvm->arch.model.fac_list, 134);
515 if (test_facility(135)) {
516 set_kvm_facility(kvm->arch.model.fac_mask, 135);
517 set_kvm_facility(kvm->arch.model.fac_list, 135);
522 mutex_unlock(&kvm->lock);
523 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
524 r ? "(not available)" : "(success)");
526 case KVM_CAP_S390_RI:
528 mutex_lock(&kvm->lock);
529 if (kvm->created_vcpus) {
531 } else if (test_facility(64)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 64);
533 set_kvm_facility(kvm->arch.model.fac_list, 64);
536 mutex_unlock(&kvm->lock);
537 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
538 r ? "(not available)" : "(success)");
540 case KVM_CAP_S390_USER_STSI:
541 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
542 kvm->arch.user_stsi = 1;
545 case KVM_CAP_S390_USER_INSTR0:
546 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
547 kvm->arch.user_instr0 = 1;
548 icpt_operexc_on_all_vcpus(kvm);
558 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
562 switch (attr->attr) {
563 case KVM_S390_VM_MEM_LIMIT_SIZE:
565 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
566 kvm->arch.mem_limit);
567 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
577 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
581 switch (attr->attr) {
582 case KVM_S390_VM_MEM_ENABLE_CMMA:
588 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
589 mutex_lock(&kvm->lock);
590 if (!kvm->created_vcpus) {
591 kvm->arch.use_cmma = 1;
594 mutex_unlock(&kvm->lock);
596 case KVM_S390_VM_MEM_CLR_CMMA:
601 if (!kvm->arch.use_cmma)
604 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
605 mutex_lock(&kvm->lock);
606 idx = srcu_read_lock(&kvm->srcu);
607 s390_reset_cmma(kvm->arch.gmap->mm);
608 srcu_read_unlock(&kvm->srcu, idx);
609 mutex_unlock(&kvm->lock);
612 case KVM_S390_VM_MEM_LIMIT_SIZE: {
613 unsigned long new_limit;
615 if (kvm_is_ucontrol(kvm))
618 if (get_user(new_limit, (u64 __user *)attr->addr))
621 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
622 new_limit > kvm->arch.mem_limit)
628 /* gmap_create takes last usable address */
629 if (new_limit != KVM_S390_NO_MEM_LIMIT)
633 mutex_lock(&kvm->lock);
634 if (!kvm->created_vcpus) {
635 /* gmap_create will round the limit up */
636 struct gmap *new = gmap_create(current->mm, new_limit);
641 gmap_remove(kvm->arch.gmap);
643 kvm->arch.gmap = new;
647 mutex_unlock(&kvm->lock);
648 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
649 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
650 (void *) kvm->arch.gmap->asce);
660 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
662 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
664 struct kvm_vcpu *vcpu;
667 if (!test_kvm_facility(kvm, 76))
670 mutex_lock(&kvm->lock);
671 switch (attr->attr) {
672 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
674 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
675 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
676 kvm->arch.crypto.aes_kw = 1;
677 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
679 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
681 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
682 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
683 kvm->arch.crypto.dea_kw = 1;
684 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
686 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
687 kvm->arch.crypto.aes_kw = 0;
688 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
689 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
690 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
692 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
693 kvm->arch.crypto.dea_kw = 0;
694 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
695 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
696 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
699 mutex_unlock(&kvm->lock);
703 kvm_for_each_vcpu(i, vcpu, kvm) {
704 kvm_s390_vcpu_crypto_setup(vcpu);
707 mutex_unlock(&kvm->lock);
711 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
715 if (copy_from_user(>od_high, (void __user *)attr->addr,
721 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
726 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
730 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
733 kvm_s390_set_tod_clock(kvm, gtod);
734 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
738 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
745 switch (attr->attr) {
746 case KVM_S390_VM_TOD_HIGH:
747 ret = kvm_s390_set_tod_high(kvm, attr);
749 case KVM_S390_VM_TOD_LOW:
750 ret = kvm_s390_set_tod_low(kvm, attr);
759 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
763 if (copy_to_user((void __user *)attr->addr, >od_high,
766 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
771 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
775 gtod = kvm_s390_get_tod_clock_fast(kvm);
776 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
778 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
783 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
790 switch (attr->attr) {
791 case KVM_S390_VM_TOD_HIGH:
792 ret = kvm_s390_get_tod_high(kvm, attr);
794 case KVM_S390_VM_TOD_LOW:
795 ret = kvm_s390_get_tod_low(kvm, attr);
804 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
806 struct kvm_s390_vm_cpu_processor *proc;
807 u16 lowest_ibc, unblocked_ibc;
810 mutex_lock(&kvm->lock);
811 if (kvm->created_vcpus) {
815 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
820 if (!copy_from_user(proc, (void __user *)attr->addr,
822 kvm->arch.model.cpuid = proc->cpuid;
823 lowest_ibc = sclp.ibc >> 16 & 0xfff;
824 unblocked_ibc = sclp.ibc & 0xfff;
825 if (lowest_ibc && proc->ibc) {
826 if (proc->ibc > unblocked_ibc)
827 kvm->arch.model.ibc = unblocked_ibc;
828 else if (proc->ibc < lowest_ibc)
829 kvm->arch.model.ibc = lowest_ibc;
831 kvm->arch.model.ibc = proc->ibc;
833 memcpy(kvm->arch.model.fac_list, proc->fac_list,
834 S390_ARCH_FAC_LIST_SIZE_BYTE);
835 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
837 kvm->arch.model.cpuid);
838 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
839 kvm->arch.model.fac_list[0],
840 kvm->arch.model.fac_list[1],
841 kvm->arch.model.fac_list[2]);
846 mutex_unlock(&kvm->lock);
850 static int kvm_s390_set_processor_feat(struct kvm *kvm,
851 struct kvm_device_attr *attr)
853 struct kvm_s390_vm_cpu_feat data;
856 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
858 if (!bitmap_subset((unsigned long *) data.feat,
859 kvm_s390_available_cpu_feat,
860 KVM_S390_VM_CPU_FEAT_NR_BITS))
863 mutex_lock(&kvm->lock);
864 if (!atomic_read(&kvm->online_vcpus)) {
865 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
866 KVM_S390_VM_CPU_FEAT_NR_BITS);
869 mutex_unlock(&kvm->lock);
873 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
874 struct kvm_device_attr *attr)
877 * Once supported by kernel + hw, we have to store the subfunctions
878 * in kvm->arch and remember that user space configured them.
883 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
887 switch (attr->attr) {
888 case KVM_S390_VM_CPU_PROCESSOR:
889 ret = kvm_s390_set_processor(kvm, attr);
891 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
892 ret = kvm_s390_set_processor_feat(kvm, attr);
894 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
895 ret = kvm_s390_set_processor_subfunc(kvm, attr);
901 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
903 struct kvm_s390_vm_cpu_processor *proc;
906 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
911 proc->cpuid = kvm->arch.model.cpuid;
912 proc->ibc = kvm->arch.model.ibc;
913 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
914 S390_ARCH_FAC_LIST_SIZE_BYTE);
915 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
917 kvm->arch.model.cpuid);
918 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
919 kvm->arch.model.fac_list[0],
920 kvm->arch.model.fac_list[1],
921 kvm->arch.model.fac_list[2]);
922 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
929 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
931 struct kvm_s390_vm_cpu_machine *mach;
934 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
939 get_cpu_id((struct cpuid *) &mach->cpuid);
940 mach->ibc = sclp.ibc;
941 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
942 S390_ARCH_FAC_LIST_SIZE_BYTE);
943 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
944 sizeof(S390_lowcore.stfle_fac_list));
945 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
947 kvm->arch.model.cpuid);
948 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
952 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
956 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
963 static int kvm_s390_get_processor_feat(struct kvm *kvm,
964 struct kvm_device_attr *attr)
966 struct kvm_s390_vm_cpu_feat data;
968 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
969 KVM_S390_VM_CPU_FEAT_NR_BITS);
970 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
975 static int kvm_s390_get_machine_feat(struct kvm *kvm,
976 struct kvm_device_attr *attr)
978 struct kvm_s390_vm_cpu_feat data;
980 bitmap_copy((unsigned long *) data.feat,
981 kvm_s390_available_cpu_feat,
982 KVM_S390_VM_CPU_FEAT_NR_BITS);
983 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
988 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
989 struct kvm_device_attr *attr)
992 * Once we can actually configure subfunctions (kernel + hw support),
993 * we have to check if they were already set by user space, if so copy
994 * them from kvm->arch.
999 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1000 struct kvm_device_attr *attr)
1002 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1003 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1007 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1011 switch (attr->attr) {
1012 case KVM_S390_VM_CPU_PROCESSOR:
1013 ret = kvm_s390_get_processor(kvm, attr);
1015 case KVM_S390_VM_CPU_MACHINE:
1016 ret = kvm_s390_get_machine(kvm, attr);
1018 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1019 ret = kvm_s390_get_processor_feat(kvm, attr);
1021 case KVM_S390_VM_CPU_MACHINE_FEAT:
1022 ret = kvm_s390_get_machine_feat(kvm, attr);
1024 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1025 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1027 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1028 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1034 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1038 switch (attr->group) {
1039 case KVM_S390_VM_MEM_CTRL:
1040 ret = kvm_s390_set_mem_control(kvm, attr);
1042 case KVM_S390_VM_TOD:
1043 ret = kvm_s390_set_tod(kvm, attr);
1045 case KVM_S390_VM_CPU_MODEL:
1046 ret = kvm_s390_set_cpu_model(kvm, attr);
1048 case KVM_S390_VM_CRYPTO:
1049 ret = kvm_s390_vm_set_crypto(kvm, attr);
1059 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1063 switch (attr->group) {
1064 case KVM_S390_VM_MEM_CTRL:
1065 ret = kvm_s390_get_mem_control(kvm, attr);
1067 case KVM_S390_VM_TOD:
1068 ret = kvm_s390_get_tod(kvm, attr);
1070 case KVM_S390_VM_CPU_MODEL:
1071 ret = kvm_s390_get_cpu_model(kvm, attr);
1081 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1085 switch (attr->group) {
1086 case KVM_S390_VM_MEM_CTRL:
1087 switch (attr->attr) {
1088 case KVM_S390_VM_MEM_ENABLE_CMMA:
1089 case KVM_S390_VM_MEM_CLR_CMMA:
1090 ret = sclp.has_cmma ? 0 : -ENXIO;
1092 case KVM_S390_VM_MEM_LIMIT_SIZE:
1100 case KVM_S390_VM_TOD:
1101 switch (attr->attr) {
1102 case KVM_S390_VM_TOD_LOW:
1103 case KVM_S390_VM_TOD_HIGH:
1111 case KVM_S390_VM_CPU_MODEL:
1112 switch (attr->attr) {
1113 case KVM_S390_VM_CPU_PROCESSOR:
1114 case KVM_S390_VM_CPU_MACHINE:
1115 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1116 case KVM_S390_VM_CPU_MACHINE_FEAT:
1117 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1120 /* configuring subfunctions is not supported yet */
1121 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1127 case KVM_S390_VM_CRYPTO:
1128 switch (attr->attr) {
1129 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1130 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1131 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1132 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1148 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1154 if (args->flags != 0)
1157 /* Is this guest using storage keys? */
1158 if (!mm_use_skey(current->mm))
1159 return KVM_S390_GET_SKEYS_NONE;
1161 /* Enforce sane limit on memory allocation */
1162 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1165 keys = kmalloc_array(args->count, sizeof(uint8_t),
1166 GFP_KERNEL | __GFP_NOWARN);
1168 keys = vmalloc(sizeof(uint8_t) * args->count);
1172 down_read(¤t->mm->mmap_sem);
1173 for (i = 0; i < args->count; i++) {
1174 hva = gfn_to_hva(kvm, args->start_gfn + i);
1175 if (kvm_is_error_hva(hva)) {
1180 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1184 up_read(¤t->mm->mmap_sem);
1187 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1188 sizeof(uint8_t) * args->count);
1197 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1203 if (args->flags != 0)
1206 /* Enforce sane limit on memory allocation */
1207 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1210 keys = kmalloc_array(args->count, sizeof(uint8_t),
1211 GFP_KERNEL | __GFP_NOWARN);
1213 keys = vmalloc(sizeof(uint8_t) * args->count);
1217 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1218 sizeof(uint8_t) * args->count);
1224 /* Enable storage key handling for the guest */
1225 r = s390_enable_skey();
1229 down_read(¤t->mm->mmap_sem);
1230 for (i = 0; i < args->count; i++) {
1231 hva = gfn_to_hva(kvm, args->start_gfn + i);
1232 if (kvm_is_error_hva(hva)) {
1237 /* Lowest order bit is reserved */
1238 if (keys[i] & 0x01) {
1243 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1247 up_read(¤t->mm->mmap_sem);
1253 long kvm_arch_vm_ioctl(struct file *filp,
1254 unsigned int ioctl, unsigned long arg)
1256 struct kvm *kvm = filp->private_data;
1257 void __user *argp = (void __user *)arg;
1258 struct kvm_device_attr attr;
1262 case KVM_S390_INTERRUPT: {
1263 struct kvm_s390_interrupt s390int;
1266 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1268 r = kvm_s390_inject_vm(kvm, &s390int);
1271 case KVM_ENABLE_CAP: {
1272 struct kvm_enable_cap cap;
1274 if (copy_from_user(&cap, argp, sizeof(cap)))
1276 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1279 case KVM_CREATE_IRQCHIP: {
1280 struct kvm_irq_routing_entry routing;
1283 if (kvm->arch.use_irqchip) {
1284 /* Set up dummy routing. */
1285 memset(&routing, 0, sizeof(routing));
1286 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
1290 case KVM_SET_DEVICE_ATTR: {
1292 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1294 r = kvm_s390_vm_set_attr(kvm, &attr);
1297 case KVM_GET_DEVICE_ATTR: {
1299 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1301 r = kvm_s390_vm_get_attr(kvm, &attr);
1304 case KVM_HAS_DEVICE_ATTR: {
1306 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1308 r = kvm_s390_vm_has_attr(kvm, &attr);
1311 case KVM_S390_GET_SKEYS: {
1312 struct kvm_s390_skeys args;
1315 if (copy_from_user(&args, argp,
1316 sizeof(struct kvm_s390_skeys)))
1318 r = kvm_s390_get_skeys(kvm, &args);
1321 case KVM_S390_SET_SKEYS: {
1322 struct kvm_s390_skeys args;
1325 if (copy_from_user(&args, argp,
1326 sizeof(struct kvm_s390_skeys)))
1328 r = kvm_s390_set_skeys(kvm, &args);
1338 static int kvm_s390_query_ap_config(u8 *config)
1340 u32 fcn_code = 0x04000000UL;
1343 memset(config, 0, 128);
1347 ".long 0xb2af0000\n" /* PQAP(QCI) */
1353 : "r" (fcn_code), "r" (config)
1354 : "cc", "0", "2", "memory"
1360 static int kvm_s390_apxa_installed(void)
1365 if (test_facility(12)) {
1366 cc = kvm_s390_query_ap_config(config);
1369 pr_err("PQAP(QCI) failed with cc=%d", cc);
1371 return config[0] & 0x40;
1377 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1379 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1381 if (kvm_s390_apxa_installed())
1382 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1384 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1387 static u64 kvm_s390_get_initial_cpuid(void)
1392 cpuid.version = 0xff;
1393 return *((u64 *) &cpuid);
1396 static void kvm_s390_crypto_init(struct kvm *kvm)
1398 if (!test_kvm_facility(kvm, 76))
1401 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
1402 kvm_s390_set_crycb_format(kvm);
1404 /* Enable AES/DEA protected key functions by default */
1405 kvm->arch.crypto.aes_kw = 1;
1406 kvm->arch.crypto.dea_kw = 1;
1407 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1408 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1409 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1410 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1413 static void sca_dispose(struct kvm *kvm)
1415 if (kvm->arch.use_esca)
1416 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1418 free_page((unsigned long)(kvm->arch.sca));
1419 kvm->arch.sca = NULL;
1422 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1424 gfp_t alloc_flags = GFP_KERNEL;
1426 char debug_name[16];
1427 static unsigned long sca_offset;
1430 #ifdef CONFIG_KVM_S390_UCONTROL
1431 if (type & ~KVM_VM_S390_UCONTROL)
1433 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1440 rc = s390_enable_sie();
1446 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1448 kvm->arch.use_esca = 0; /* start with basic SCA */
1449 if (!sclp.has_64bscao)
1450 alloc_flags |= GFP_DMA;
1451 rwlock_init(&kvm->arch.sca_lock);
1452 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1455 spin_lock(&kvm_lock);
1457 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1459 kvm->arch.sca = (struct bsca_block *)
1460 ((char *) kvm->arch.sca + sca_offset);
1461 spin_unlock(&kvm_lock);
1463 sprintf(debug_name, "kvm-%u", current->pid);
1465 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1469 kvm->arch.sie_page2 =
1470 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1471 if (!kvm->arch.sie_page2)
1474 /* Populate the facility mask initially. */
1475 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1476 sizeof(S390_lowcore.stfle_fac_list));
1477 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1478 if (i < kvm_s390_fac_list_mask_size())
1479 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
1481 kvm->arch.model.fac_mask[i] = 0UL;
1484 /* Populate the facility list initially. */
1485 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1486 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1487 S390_ARCH_FAC_LIST_SIZE_BYTE);
1489 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1490 set_kvm_facility(kvm->arch.model.fac_list, 74);
1492 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
1493 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1495 kvm_s390_crypto_init(kvm);
1497 spin_lock_init(&kvm->arch.float_int.lock);
1498 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1499 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1500 init_waitqueue_head(&kvm->arch.ipte_wq);
1501 mutex_init(&kvm->arch.ipte_mutex);
1503 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1504 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1506 if (type & KVM_VM_S390_UCONTROL) {
1507 kvm->arch.gmap = NULL;
1508 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1510 if (sclp.hamax == U64_MAX)
1511 kvm->arch.mem_limit = TASK_MAX_SIZE;
1513 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1515 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1516 if (!kvm->arch.gmap)
1518 kvm->arch.gmap->private = kvm;
1519 kvm->arch.gmap->pfault_enabled = 0;
1522 kvm->arch.css_support = 0;
1523 kvm->arch.use_irqchip = 0;
1524 kvm->arch.epoch = 0;
1526 spin_lock_init(&kvm->arch.start_stop_lock);
1527 kvm_s390_vsie_init(kvm);
1528 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1532 free_page((unsigned long)kvm->arch.sie_page2);
1533 debug_unregister(kvm->arch.dbf);
1535 KVM_EVENT(3, "creation of vm failed: %d", rc);
1539 bool kvm_arch_has_vcpu_debugfs(void)
1544 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1549 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1551 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1552 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1553 kvm_s390_clear_local_irqs(vcpu);
1554 kvm_clear_async_pf_completion_queue(vcpu);
1555 if (!kvm_is_ucontrol(vcpu->kvm))
1558 if (kvm_is_ucontrol(vcpu->kvm))
1559 gmap_remove(vcpu->arch.gmap);
1561 if (vcpu->kvm->arch.use_cmma)
1562 kvm_s390_vcpu_unsetup_cmma(vcpu);
1563 free_page((unsigned long)(vcpu->arch.sie_block));
1565 kvm_vcpu_uninit(vcpu);
1566 kmem_cache_free(kvm_vcpu_cache, vcpu);
1569 static void kvm_free_vcpus(struct kvm *kvm)
1572 struct kvm_vcpu *vcpu;
1574 kvm_for_each_vcpu(i, vcpu, kvm)
1575 kvm_arch_vcpu_destroy(vcpu);
1577 mutex_lock(&kvm->lock);
1578 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1579 kvm->vcpus[i] = NULL;
1581 atomic_set(&kvm->online_vcpus, 0);
1582 mutex_unlock(&kvm->lock);
1585 void kvm_arch_destroy_vm(struct kvm *kvm)
1587 kvm_free_vcpus(kvm);
1589 debug_unregister(kvm->arch.dbf);
1590 free_page((unsigned long)kvm->arch.sie_page2);
1591 if (!kvm_is_ucontrol(kvm))
1592 gmap_remove(kvm->arch.gmap);
1593 kvm_s390_destroy_adapters(kvm);
1594 kvm_s390_clear_float_irqs(kvm);
1595 kvm_s390_vsie_destroy(kvm);
1596 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1599 /* Section: vcpu related */
1600 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1602 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
1603 if (!vcpu->arch.gmap)
1605 vcpu->arch.gmap->private = vcpu->kvm;
1610 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1612 if (!kvm_s390_use_sca_entries())
1614 read_lock(&vcpu->kvm->arch.sca_lock);
1615 if (vcpu->kvm->arch.use_esca) {
1616 struct esca_block *sca = vcpu->kvm->arch.sca;
1618 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1619 sca->cpu[vcpu->vcpu_id].sda = 0;
1621 struct bsca_block *sca = vcpu->kvm->arch.sca;
1623 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1624 sca->cpu[vcpu->vcpu_id].sda = 0;
1626 read_unlock(&vcpu->kvm->arch.sca_lock);
1629 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1631 if (!kvm_s390_use_sca_entries()) {
1632 struct bsca_block *sca = vcpu->kvm->arch.sca;
1634 /* we still need the basic sca for the ipte control */
1635 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1636 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1638 read_lock(&vcpu->kvm->arch.sca_lock);
1639 if (vcpu->kvm->arch.use_esca) {
1640 struct esca_block *sca = vcpu->kvm->arch.sca;
1642 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1643 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1644 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1645 vcpu->arch.sie_block->ecb2 |= 0x04U;
1646 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1648 struct bsca_block *sca = vcpu->kvm->arch.sca;
1650 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1651 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1652 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1653 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1655 read_unlock(&vcpu->kvm->arch.sca_lock);
1658 /* Basic SCA to Extended SCA data copy routines */
1659 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1662 d->sigp_ctrl.c = s->sigp_ctrl.c;
1663 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1666 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1670 d->ipte_control = s->ipte_control;
1672 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1673 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1676 static int sca_switch_to_extended(struct kvm *kvm)
1678 struct bsca_block *old_sca = kvm->arch.sca;
1679 struct esca_block *new_sca;
1680 struct kvm_vcpu *vcpu;
1681 unsigned int vcpu_idx;
1684 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1688 scaoh = (u32)((u64)(new_sca) >> 32);
1689 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1691 kvm_s390_vcpu_block_all(kvm);
1692 write_lock(&kvm->arch.sca_lock);
1694 sca_copy_b_to_e(new_sca, old_sca);
1696 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1697 vcpu->arch.sie_block->scaoh = scaoh;
1698 vcpu->arch.sie_block->scaol = scaol;
1699 vcpu->arch.sie_block->ecb2 |= 0x04U;
1701 kvm->arch.sca = new_sca;
1702 kvm->arch.use_esca = 1;
1704 write_unlock(&kvm->arch.sca_lock);
1705 kvm_s390_vcpu_unblock_all(kvm);
1707 free_page((unsigned long)old_sca);
1709 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1710 old_sca, kvm->arch.sca);
1714 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1718 if (!kvm_s390_use_sca_entries()) {
1719 if (id < KVM_MAX_VCPUS)
1723 if (id < KVM_S390_BSCA_CPU_SLOTS)
1725 if (!sclp.has_esca || !sclp.has_64bscao)
1728 mutex_lock(&kvm->lock);
1729 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1730 mutex_unlock(&kvm->lock);
1732 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1735 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1737 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1738 kvm_clear_async_pf_completion_queue(vcpu);
1739 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1745 kvm_s390_set_prefix(vcpu, 0);
1746 if (test_kvm_facility(vcpu->kvm, 64))
1747 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1748 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1749 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1752 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1754 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1756 if (kvm_is_ucontrol(vcpu->kvm))
1757 return __kvm_ucontrol_vcpu_init(vcpu);
1762 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1763 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1765 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
1766 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1767 vcpu->arch.cputm_start = get_tod_clock_fast();
1768 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1771 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1772 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1774 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
1775 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1776 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1777 vcpu->arch.cputm_start = 0;
1778 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1781 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1782 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1784 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1785 vcpu->arch.cputm_enabled = true;
1786 __start_cpu_timer_accounting(vcpu);
1789 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1790 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1792 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1793 __stop_cpu_timer_accounting(vcpu);
1794 vcpu->arch.cputm_enabled = false;
1797 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1799 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1800 __enable_cpu_timer_accounting(vcpu);
1804 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1806 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1807 __disable_cpu_timer_accounting(vcpu);
1811 /* set the cpu timer - may only be called from the VCPU thread itself */
1812 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1814 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1815 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1816 if (vcpu->arch.cputm_enabled)
1817 vcpu->arch.cputm_start = get_tod_clock_fast();
1818 vcpu->arch.sie_block->cputm = cputm;
1819 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1823 /* update and get the cpu timer - can also be called from other VCPU threads */
1824 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1829 if (unlikely(!vcpu->arch.cputm_enabled))
1830 return vcpu->arch.sie_block->cputm;
1832 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1834 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1836 * If the writer would ever execute a read in the critical
1837 * section, e.g. in irq context, we have a deadlock.
1839 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1840 value = vcpu->arch.sie_block->cputm;
1841 /* if cputm_start is 0, accounting is being started/stopped */
1842 if (likely(vcpu->arch.cputm_start))
1843 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1844 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1849 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1852 gmap_enable(vcpu->arch.enabled_gmap);
1853 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1854 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1855 __start_cpu_timer_accounting(vcpu);
1859 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1862 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1863 __stop_cpu_timer_accounting(vcpu);
1864 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1865 vcpu->arch.enabled_gmap = gmap_get_enabled();
1866 gmap_disable(vcpu->arch.enabled_gmap);
1870 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1872 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1873 vcpu->arch.sie_block->gpsw.mask = 0UL;
1874 vcpu->arch.sie_block->gpsw.addr = 0UL;
1875 kvm_s390_set_prefix(vcpu, 0);
1876 kvm_s390_set_cpu_timer(vcpu, 0);
1877 vcpu->arch.sie_block->ckc = 0UL;
1878 vcpu->arch.sie_block->todpr = 0;
1879 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1880 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1881 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1882 /* make sure the new fpc will be lazily loaded */
1884 current->thread.fpu.fpc = 0;
1885 vcpu->arch.sie_block->gbea = 1;
1886 vcpu->arch.sie_block->pp = 0;
1887 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1888 kvm_clear_async_pf_completion_queue(vcpu);
1889 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1890 kvm_s390_vcpu_stop(vcpu);
1891 kvm_s390_clear_local_irqs(vcpu);
1894 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1896 mutex_lock(&vcpu->kvm->lock);
1898 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1900 mutex_unlock(&vcpu->kvm->lock);
1901 if (!kvm_is_ucontrol(vcpu->kvm)) {
1902 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1905 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
1906 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
1907 /* make vcpu_load load the right gmap on the first trigger */
1908 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
1911 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1913 if (!test_kvm_facility(vcpu->kvm, 76))
1916 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1918 if (vcpu->kvm->arch.crypto.aes_kw)
1919 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1920 if (vcpu->kvm->arch.crypto.dea_kw)
1921 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1923 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1926 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1928 free_page(vcpu->arch.sie_block->cbrlo);
1929 vcpu->arch.sie_block->cbrlo = 0;
1932 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1934 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1935 if (!vcpu->arch.sie_block->cbrlo)
1938 vcpu->arch.sie_block->ecb2 |= 0x80;
1939 vcpu->arch.sie_block->ecb2 &= ~0x08;
1943 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1945 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1947 vcpu->arch.sie_block->ibc = model->ibc;
1948 if (test_kvm_facility(vcpu->kvm, 7))
1949 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
1952 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1956 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1960 if (test_kvm_facility(vcpu->kvm, 78))
1961 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1962 else if (test_kvm_facility(vcpu->kvm, 8))
1963 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1965 kvm_s390_vcpu_setup_model(vcpu);
1967 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1968 if (MACHINE_HAS_ESOP)
1969 vcpu->arch.sie_block->ecb |= 0x02;
1970 if (test_kvm_facility(vcpu->kvm, 9))
1971 vcpu->arch.sie_block->ecb |= 0x04;
1972 if (test_kvm_facility(vcpu->kvm, 73))
1973 vcpu->arch.sie_block->ecb |= 0x10;
1975 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1976 vcpu->arch.sie_block->ecb2 |= 0x08;
1977 if (test_kvm_facility(vcpu->kvm, 130))
1978 vcpu->arch.sie_block->ecb2 |= 0x20;
1979 vcpu->arch.sie_block->eca = 0x1002000U;
1981 vcpu->arch.sie_block->eca |= 0x80000000U;
1983 vcpu->arch.sie_block->eca |= 0x40000000U;
1985 vcpu->arch.sie_block->eca |= 1;
1986 if (sclp.has_sigpif)
1987 vcpu->arch.sie_block->eca |= 0x10000000U;
1988 if (test_kvm_facility(vcpu->kvm, 129)) {
1989 vcpu->arch.sie_block->eca |= 0x00020000;
1990 vcpu->arch.sie_block->ecd |= 0x20000000;
1992 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1993 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1995 if (vcpu->kvm->arch.use_cmma) {
1996 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2000 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2001 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
2003 kvm_s390_vcpu_crypto_setup(vcpu);
2008 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2011 struct kvm_vcpu *vcpu;
2012 struct sie_page *sie_page;
2015 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
2020 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2024 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2028 vcpu->arch.sie_block = &sie_page->sie_block;
2029 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2031 /* the real guest size will always be smaller than msl */
2032 vcpu->arch.sie_block->mso = 0;
2033 vcpu->arch.sie_block->msl = sclp.hamax;
2035 vcpu->arch.sie_block->icpua = id;
2036 spin_lock_init(&vcpu->arch.local_int.lock);
2037 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
2038 vcpu->arch.local_int.wq = &vcpu->wq;
2039 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
2040 seqcount_init(&vcpu->arch.cputm_seqcount);
2042 rc = kvm_vcpu_init(vcpu, kvm, id);
2044 goto out_free_sie_block;
2045 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2046 vcpu->arch.sie_block);
2047 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
2051 free_page((unsigned long)(vcpu->arch.sie_block));
2053 kmem_cache_free(kvm_vcpu_cache, vcpu);
2058 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2060 return kvm_s390_vcpu_has_irq(vcpu, 0);
2063 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
2065 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2069 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
2071 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2074 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2076 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2080 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2082 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2086 * Kick a guest cpu out of SIE and wait until SIE is not running.
2087 * If the CPU is not running (e.g. waiting as idle) the function will
2088 * return immediately. */
2089 void exit_sie(struct kvm_vcpu *vcpu)
2091 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
2092 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2096 /* Kick a guest cpu out of SIE to process a request synchronously */
2097 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
2099 kvm_make_request(req, vcpu);
2100 kvm_s390_vcpu_request(vcpu);
2103 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2106 struct kvm *kvm = gmap->private;
2107 struct kvm_vcpu *vcpu;
2108 unsigned long prefix;
2111 if (gmap_is_shadow(gmap))
2113 if (start >= 1UL << 31)
2114 /* We are only interested in prefix pages */
2116 kvm_for_each_vcpu(i, vcpu, kvm) {
2117 /* match against both prefix pages */
2118 prefix = kvm_s390_get_prefix(vcpu);
2119 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2120 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2122 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2127 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2129 /* kvm common code refers to this, but never calls it */
2134 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2135 struct kvm_one_reg *reg)
2140 case KVM_REG_S390_TODPR:
2141 r = put_user(vcpu->arch.sie_block->todpr,
2142 (u32 __user *)reg->addr);
2144 case KVM_REG_S390_EPOCHDIFF:
2145 r = put_user(vcpu->arch.sie_block->epoch,
2146 (u64 __user *)reg->addr);
2148 case KVM_REG_S390_CPU_TIMER:
2149 r = put_user(kvm_s390_get_cpu_timer(vcpu),
2150 (u64 __user *)reg->addr);
2152 case KVM_REG_S390_CLOCK_COMP:
2153 r = put_user(vcpu->arch.sie_block->ckc,
2154 (u64 __user *)reg->addr);
2156 case KVM_REG_S390_PFTOKEN:
2157 r = put_user(vcpu->arch.pfault_token,
2158 (u64 __user *)reg->addr);
2160 case KVM_REG_S390_PFCOMPARE:
2161 r = put_user(vcpu->arch.pfault_compare,
2162 (u64 __user *)reg->addr);
2164 case KVM_REG_S390_PFSELECT:
2165 r = put_user(vcpu->arch.pfault_select,
2166 (u64 __user *)reg->addr);
2168 case KVM_REG_S390_PP:
2169 r = put_user(vcpu->arch.sie_block->pp,
2170 (u64 __user *)reg->addr);
2172 case KVM_REG_S390_GBEA:
2173 r = put_user(vcpu->arch.sie_block->gbea,
2174 (u64 __user *)reg->addr);
2183 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2184 struct kvm_one_reg *reg)
2190 case KVM_REG_S390_TODPR:
2191 r = get_user(vcpu->arch.sie_block->todpr,
2192 (u32 __user *)reg->addr);
2194 case KVM_REG_S390_EPOCHDIFF:
2195 r = get_user(vcpu->arch.sie_block->epoch,
2196 (u64 __user *)reg->addr);
2198 case KVM_REG_S390_CPU_TIMER:
2199 r = get_user(val, (u64 __user *)reg->addr);
2201 kvm_s390_set_cpu_timer(vcpu, val);
2203 case KVM_REG_S390_CLOCK_COMP:
2204 r = get_user(vcpu->arch.sie_block->ckc,
2205 (u64 __user *)reg->addr);
2207 case KVM_REG_S390_PFTOKEN:
2208 r = get_user(vcpu->arch.pfault_token,
2209 (u64 __user *)reg->addr);
2210 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2211 kvm_clear_async_pf_completion_queue(vcpu);
2213 case KVM_REG_S390_PFCOMPARE:
2214 r = get_user(vcpu->arch.pfault_compare,
2215 (u64 __user *)reg->addr);
2217 case KVM_REG_S390_PFSELECT:
2218 r = get_user(vcpu->arch.pfault_select,
2219 (u64 __user *)reg->addr);
2221 case KVM_REG_S390_PP:
2222 r = get_user(vcpu->arch.sie_block->pp,
2223 (u64 __user *)reg->addr);
2225 case KVM_REG_S390_GBEA:
2226 r = get_user(vcpu->arch.sie_block->gbea,
2227 (u64 __user *)reg->addr);
2236 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2238 kvm_s390_vcpu_initial_reset(vcpu);
2242 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2244 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
2248 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2250 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2254 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2255 struct kvm_sregs *sregs)
2257 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2258 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
2262 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2263 struct kvm_sregs *sregs)
2265 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2266 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2270 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2272 if (test_fp_ctl(fpu->fpc))
2274 vcpu->run->s.regs.fpc = fpu->fpc;
2276 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2277 (freg_t *) fpu->fprs);
2279 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
2283 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2285 /* make sure we have the latest values */
2288 convert_vx_to_fp((freg_t *) fpu->fprs,
2289 (__vector128 *) vcpu->run->s.regs.vrs);
2291 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
2292 fpu->fpc = vcpu->run->s.regs.fpc;
2296 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2300 if (!is_vcpu_stopped(vcpu))
2303 vcpu->run->psw_mask = psw.mask;
2304 vcpu->run->psw_addr = psw.addr;
2309 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2310 struct kvm_translation *tr)
2312 return -EINVAL; /* not implemented yet */
2315 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2316 KVM_GUESTDBG_USE_HW_BP | \
2317 KVM_GUESTDBG_ENABLE)
2319 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2320 struct kvm_guest_debug *dbg)
2324 vcpu->guest_debug = 0;
2325 kvm_s390_clear_bp_data(vcpu);
2327 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2329 if (!sclp.has_gpere)
2332 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2333 vcpu->guest_debug = dbg->control;
2334 /* enforce guest PER */
2335 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2337 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2338 rc = kvm_s390_import_bp_data(vcpu, dbg);
2340 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2341 vcpu->arch.guestdbg.last_bp = 0;
2345 vcpu->guest_debug = 0;
2346 kvm_s390_clear_bp_data(vcpu);
2347 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2353 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2354 struct kvm_mp_state *mp_state)
2356 /* CHECK_STOP and LOAD are not supported yet */
2357 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2358 KVM_MP_STATE_OPERATING;
2361 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2362 struct kvm_mp_state *mp_state)
2366 /* user space knows about this interface - let it control the state */
2367 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2369 switch (mp_state->mp_state) {
2370 case KVM_MP_STATE_STOPPED:
2371 kvm_s390_vcpu_stop(vcpu);
2373 case KVM_MP_STATE_OPERATING:
2374 kvm_s390_vcpu_start(vcpu);
2376 case KVM_MP_STATE_LOAD:
2377 case KVM_MP_STATE_CHECK_STOP:
2378 /* fall through - CHECK_STOP and LOAD are not supported yet */
2386 static bool ibs_enabled(struct kvm_vcpu *vcpu)
2388 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2391 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2394 kvm_s390_vcpu_request_handled(vcpu);
2395 if (!vcpu->requests)
2398 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2399 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2400 * This ensures that the ipte instruction for this request has
2401 * already finished. We might race against a second unmapper that
2402 * wants to set the blocking bit. Lets just retry the request loop.
2404 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2406 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2407 kvm_s390_get_prefix(vcpu),
2408 PAGE_SIZE * 2, PROT_WRITE);
2410 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2416 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2417 vcpu->arch.sie_block->ihcpu = 0xffff;
2421 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2422 if (!ibs_enabled(vcpu)) {
2423 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2424 atomic_or(CPUSTAT_IBS,
2425 &vcpu->arch.sie_block->cpuflags);
2430 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2431 if (ibs_enabled(vcpu)) {
2432 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2433 atomic_andnot(CPUSTAT_IBS,
2434 &vcpu->arch.sie_block->cpuflags);
2439 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2440 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2444 /* nothing to do, just clear the request */
2445 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2450 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2452 struct kvm_vcpu *vcpu;
2455 mutex_lock(&kvm->lock);
2457 kvm->arch.epoch = tod - get_tod_clock();
2458 kvm_s390_vcpu_block_all(kvm);
2459 kvm_for_each_vcpu(i, vcpu, kvm)
2460 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2461 kvm_s390_vcpu_unblock_all(kvm);
2463 mutex_unlock(&kvm->lock);
2467 * kvm_arch_fault_in_page - fault-in guest page if necessary
2468 * @vcpu: The corresponding virtual cpu
2469 * @gpa: Guest physical address
2470 * @writable: Whether the page should be writable or not
2472 * Make sure that a guest page has been faulted-in on the host.
2474 * Return: Zero on success, negative error code otherwise.
2476 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2478 return gmap_fault(vcpu->arch.gmap, gpa,
2479 writable ? FAULT_FLAG_WRITE : 0);
2482 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2483 unsigned long token)
2485 struct kvm_s390_interrupt inti;
2486 struct kvm_s390_irq irq;
2489 irq.u.ext.ext_params2 = token;
2490 irq.type = KVM_S390_INT_PFAULT_INIT;
2491 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2493 inti.type = KVM_S390_INT_PFAULT_DONE;
2494 inti.parm64 = token;
2495 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2499 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2500 struct kvm_async_pf *work)
2502 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2503 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2506 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2507 struct kvm_async_pf *work)
2509 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2510 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2513 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2514 struct kvm_async_pf *work)
2516 /* s390 will always inject the page directly */
2519 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2522 * s390 will always inject the page directly,
2523 * but we still want check_async_completion to cleanup
2528 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2531 struct kvm_arch_async_pf arch;
2534 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2536 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2537 vcpu->arch.pfault_compare)
2539 if (psw_extint_disabled(vcpu))
2541 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2543 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2545 if (!vcpu->arch.gmap->pfault_enabled)
2548 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2549 hva += current->thread.gmap_addr & ~PAGE_MASK;
2550 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2553 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2557 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2562 * On s390 notifications for arriving pages will be delivered directly
2563 * to the guest but the house keeping for completed pfaults is
2564 * handled outside the worker.
2566 kvm_check_async_pf_completion(vcpu);
2568 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2569 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2574 if (test_cpu_flag(CIF_MCCK_PENDING))
2577 if (!kvm_is_ucontrol(vcpu->kvm)) {
2578 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2583 rc = kvm_s390_handle_requests(vcpu);
2587 if (guestdbg_enabled(vcpu)) {
2588 kvm_s390_backup_guest_per_regs(vcpu);
2589 kvm_s390_patch_guest_per_regs(vcpu);
2592 vcpu->arch.sie_block->icptcode = 0;
2593 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2594 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2595 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2600 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2602 struct kvm_s390_pgm_info pgm_info = {
2603 .code = PGM_ADDRESSING,
2608 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2609 trace_kvm_s390_sie_fault(vcpu);
2612 * We want to inject an addressing exception, which is defined as a
2613 * suppressing or terminating exception. However, since we came here
2614 * by a DAT access exception, the PSW still points to the faulting
2615 * instruction since DAT exceptions are nullifying. So we've got
2616 * to look up the current opcode to get the length of the instruction
2617 * to be able to forward the PSW.
2619 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
2620 ilen = insn_length(opcode);
2624 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2625 * Forward by arbitrary ilc, injection will take care of
2626 * nullification if necessary.
2628 pgm_info = vcpu->arch.pgm;
2631 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2632 kvm_s390_forward_psw(vcpu, ilen);
2633 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2636 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2638 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2639 vcpu->arch.sie_block->icptcode);
2640 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2642 if (guestdbg_enabled(vcpu))
2643 kvm_s390_restore_guest_per_regs(vcpu);
2645 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2646 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2648 if (vcpu->arch.sie_block->icptcode > 0) {
2649 int rc = kvm_handle_sie_intercept(vcpu);
2651 if (rc != -EOPNOTSUPP)
2653 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2654 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2655 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2656 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2658 } else if (exit_reason != -EFAULT) {
2659 vcpu->stat.exit_null++;
2661 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2662 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2663 vcpu->run->s390_ucontrol.trans_exc_code =
2664 current->thread.gmap_addr;
2665 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2667 } else if (current->thread.gmap_pfault) {
2668 trace_kvm_s390_major_guest_pfault(vcpu);
2669 current->thread.gmap_pfault = 0;
2670 if (kvm_arch_setup_async_pf(vcpu))
2672 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2674 return vcpu_post_run_fault_in_sie(vcpu);
2677 static int __vcpu_run(struct kvm_vcpu *vcpu)
2679 int rc, exit_reason;
2682 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2683 * ning the guest), so that memslots (and other stuff) are protected
2685 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2688 rc = vcpu_pre_run(vcpu);
2692 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2694 * As PF_VCPU will be used in fault handler, between
2695 * guest_enter and guest_exit should be no uaccess.
2697 local_irq_disable();
2698 guest_enter_irqoff();
2699 __disable_cpu_timer_accounting(vcpu);
2701 exit_reason = sie64a(vcpu->arch.sie_block,
2702 vcpu->run->s.regs.gprs);
2703 local_irq_disable();
2704 __enable_cpu_timer_accounting(vcpu);
2705 guest_exit_irqoff();
2707 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2709 rc = vcpu_post_run(vcpu, exit_reason);
2710 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2712 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2716 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2718 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2719 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2720 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2721 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2722 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2723 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2724 /* some control register changes require a tlb flush */
2725 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2727 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2728 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2729 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2730 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2731 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2732 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2734 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2735 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2736 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2737 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2738 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2739 kvm_clear_async_pf_completion_queue(vcpu);
2742 * If userspace sets the riccb (e.g. after migration) to a valid state,
2743 * we should enable RI here instead of doing the lazy enablement.
2745 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
2746 test_kvm_facility(vcpu->kvm, 64)) {
2747 struct runtime_instr_cb *riccb =
2748 (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
2751 vcpu->arch.sie_block->ecb3 |= 0x01;
2753 save_access_regs(vcpu->arch.host_acrs);
2754 restore_access_regs(vcpu->run->s.regs.acrs);
2755 /* save host (userspace) fprs/vrs */
2757 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
2758 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
2760 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
2762 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
2763 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
2764 if (test_fp_ctl(current->thread.fpu.fpc))
2765 /* User space provided an invalid FPC, let's clear it */
2766 current->thread.fpu.fpc = 0;
2768 kvm_run->kvm_dirty_regs = 0;
2771 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2773 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2774 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2775 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2776 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2777 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2778 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2779 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2780 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2781 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2782 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2783 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2784 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2785 save_access_regs(vcpu->run->s.regs.acrs);
2786 restore_access_regs(vcpu->arch.host_acrs);
2787 /* Save guest register state */
2789 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2790 /* Restore will be done lazily at return */
2791 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
2792 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
2796 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2801 if (guestdbg_exit_pending(vcpu)) {
2802 kvm_s390_prepare_debug_exit(vcpu);
2806 if (vcpu->sigset_active)
2807 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2809 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2810 kvm_s390_vcpu_start(vcpu);
2811 } else if (is_vcpu_stopped(vcpu)) {
2812 pr_err_ratelimited("can't run stopped vcpu %d\n",
2817 sync_regs(vcpu, kvm_run);
2818 enable_cpu_timer_accounting(vcpu);
2821 rc = __vcpu_run(vcpu);
2823 if (signal_pending(current) && !rc) {
2824 kvm_run->exit_reason = KVM_EXIT_INTR;
2828 if (guestdbg_exit_pending(vcpu) && !rc) {
2829 kvm_s390_prepare_debug_exit(vcpu);
2833 if (rc == -EREMOTE) {
2834 /* userspace support is needed, kvm_run has been prepared */
2838 disable_cpu_timer_accounting(vcpu);
2839 store_regs(vcpu, kvm_run);
2841 if (vcpu->sigset_active)
2842 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2844 vcpu->stat.exit_userspace++;
2849 * store status at address
2850 * we use have two special cases:
2851 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2852 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2854 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2856 unsigned char archmode = 1;
2857 freg_t fprs[NUM_FPRS];
2862 px = kvm_s390_get_prefix(vcpu);
2863 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2864 if (write_guest_abs(vcpu, 163, &archmode, 1))
2867 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2868 if (write_guest_real(vcpu, 163, &archmode, 1))
2872 gpa -= __LC_FPREGS_SAVE_AREA;
2874 /* manually convert vector registers if necessary */
2875 if (MACHINE_HAS_VX) {
2876 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2877 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2880 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2881 vcpu->run->s.regs.fprs, 128);
2883 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2884 vcpu->run->s.regs.gprs, 128);
2885 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2886 &vcpu->arch.sie_block->gpsw, 16);
2887 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2889 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2890 &vcpu->run->s.regs.fpc, 4);
2891 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2892 &vcpu->arch.sie_block->todpr, 4);
2893 cputm = kvm_s390_get_cpu_timer(vcpu);
2894 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2896 clkcomp = vcpu->arch.sie_block->ckc >> 8;
2897 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2899 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2900 &vcpu->run->s.regs.acrs, 64);
2901 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2902 &vcpu->arch.sie_block->gcr, 128);
2903 return rc ? -EFAULT : 0;
2906 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2909 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2910 * switch in the run ioctl. Let's update our copies before we save
2911 * it into the save area
2914 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2915 save_access_regs(vcpu->run->s.regs.acrs);
2917 return kvm_s390_store_status_unloaded(vcpu, addr);
2920 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2922 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2923 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2926 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2929 struct kvm_vcpu *vcpu;
2931 kvm_for_each_vcpu(i, vcpu, kvm) {
2932 __disable_ibs_on_vcpu(vcpu);
2936 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2940 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2941 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2944 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2946 int i, online_vcpus, started_vcpus = 0;
2948 if (!is_vcpu_stopped(vcpu))
2951 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2952 /* Only one cpu at a time may enter/leave the STOPPED state. */
2953 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2954 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2956 for (i = 0; i < online_vcpus; i++) {
2957 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2961 if (started_vcpus == 0) {
2962 /* we're the only active VCPU -> speed it up */
2963 __enable_ibs_on_vcpu(vcpu);
2964 } else if (started_vcpus == 1) {
2966 * As we are starting a second VCPU, we have to disable
2967 * the IBS facility on all VCPUs to remove potentially
2968 * oustanding ENABLE requests.
2970 __disable_ibs_on_all_vcpus(vcpu->kvm);
2973 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2975 * Another VCPU might have used IBS while we were offline.
2976 * Let's play safe and flush the VCPU at startup.
2978 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2979 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2983 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2985 int i, online_vcpus, started_vcpus = 0;
2986 struct kvm_vcpu *started_vcpu = NULL;
2988 if (is_vcpu_stopped(vcpu))
2991 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2992 /* Only one cpu at a time may enter/leave the STOPPED state. */
2993 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2994 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2996 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2997 kvm_s390_clear_stop_irq(vcpu);
2999 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
3000 __disable_ibs_on_vcpu(vcpu);
3002 for (i = 0; i < online_vcpus; i++) {
3003 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3005 started_vcpu = vcpu->kvm->vcpus[i];
3009 if (started_vcpus == 1) {
3011 * As we only have one VCPU left, we want to enable the
3012 * IBS facility for that VCPU to speed it up.
3014 __enable_ibs_on_vcpu(started_vcpu);
3017 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
3021 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3022 struct kvm_enable_cap *cap)
3030 case KVM_CAP_S390_CSS_SUPPORT:
3031 if (!vcpu->kvm->arch.css_support) {
3032 vcpu->kvm->arch.css_support = 1;
3033 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
3034 trace_kvm_s390_enable_css(vcpu->kvm);
3045 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3046 struct kvm_s390_mem_op *mop)
3048 void __user *uaddr = (void __user *)mop->buf;
3049 void *tmpbuf = NULL;
3051 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3052 | KVM_S390_MEMOP_F_CHECK_ONLY;
3054 if (mop->flags & ~supported_flags)
3057 if (mop->size > MEM_OP_MAX_SIZE)
3060 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3061 tmpbuf = vmalloc(mop->size);
3066 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3069 case KVM_S390_MEMOP_LOGICAL_READ:
3070 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3071 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3072 mop->size, GACC_FETCH);
3075 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3077 if (copy_to_user(uaddr, tmpbuf, mop->size))
3081 case KVM_S390_MEMOP_LOGICAL_WRITE:
3082 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3083 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3084 mop->size, GACC_STORE);
3087 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3091 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3097 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3099 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3100 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3106 long kvm_arch_vcpu_ioctl(struct file *filp,
3107 unsigned int ioctl, unsigned long arg)
3109 struct kvm_vcpu *vcpu = filp->private_data;
3110 void __user *argp = (void __user *)arg;
3115 case KVM_S390_IRQ: {
3116 struct kvm_s390_irq s390irq;
3119 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3121 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3124 case KVM_S390_INTERRUPT: {
3125 struct kvm_s390_interrupt s390int;
3126 struct kvm_s390_irq s390irq;
3129 if (copy_from_user(&s390int, argp, sizeof(s390int)))
3131 if (s390int_to_s390irq(&s390int, &s390irq))
3133 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3136 case KVM_S390_STORE_STATUS:
3137 idx = srcu_read_lock(&vcpu->kvm->srcu);
3138 r = kvm_s390_vcpu_store_status(vcpu, arg);
3139 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3141 case KVM_S390_SET_INITIAL_PSW: {
3145 if (copy_from_user(&psw, argp, sizeof(psw)))
3147 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3150 case KVM_S390_INITIAL_RESET:
3151 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3153 case KVM_SET_ONE_REG:
3154 case KVM_GET_ONE_REG: {
3155 struct kvm_one_reg reg;
3157 if (copy_from_user(®, argp, sizeof(reg)))
3159 if (ioctl == KVM_SET_ONE_REG)
3160 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
3162 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
3165 #ifdef CONFIG_KVM_S390_UCONTROL
3166 case KVM_S390_UCAS_MAP: {
3167 struct kvm_s390_ucas_mapping ucasmap;
3169 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3174 if (!kvm_is_ucontrol(vcpu->kvm)) {
3179 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3180 ucasmap.vcpu_addr, ucasmap.length);
3183 case KVM_S390_UCAS_UNMAP: {
3184 struct kvm_s390_ucas_mapping ucasmap;
3186 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3191 if (!kvm_is_ucontrol(vcpu->kvm)) {
3196 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3201 case KVM_S390_VCPU_FAULT: {
3202 r = gmap_fault(vcpu->arch.gmap, arg, 0);
3205 case KVM_ENABLE_CAP:
3207 struct kvm_enable_cap cap;
3209 if (copy_from_user(&cap, argp, sizeof(cap)))
3211 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3214 case KVM_S390_MEM_OP: {
3215 struct kvm_s390_mem_op mem_op;
3217 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3218 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3223 case KVM_S390_SET_IRQ_STATE: {
3224 struct kvm_s390_irq_state irq_state;
3227 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3229 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3230 irq_state.len == 0 ||
3231 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3235 r = kvm_s390_set_irq_state(vcpu,
3236 (void __user *) irq_state.buf,
3240 case KVM_S390_GET_IRQ_STATE: {
3241 struct kvm_s390_irq_state irq_state;
3244 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3246 if (irq_state.len == 0) {
3250 r = kvm_s390_get_irq_state(vcpu,
3251 (__u8 __user *) irq_state.buf,
3261 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3263 #ifdef CONFIG_KVM_S390_UCONTROL
3264 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3265 && (kvm_is_ucontrol(vcpu->kvm))) {
3266 vmf->page = virt_to_page(vcpu->arch.sie_block);
3267 get_page(vmf->page);
3271 return VM_FAULT_SIGBUS;
3274 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3275 unsigned long npages)
3280 /* Section: memory related */
3281 int kvm_arch_prepare_memory_region(struct kvm *kvm,
3282 struct kvm_memory_slot *memslot,
3283 const struct kvm_userspace_memory_region *mem,
3284 enum kvm_mr_change change)
3286 /* A few sanity checks. We can have memory slots which have to be
3287 located/ended at a segment boundary (1MB). The memory in userland is
3288 ok to be fragmented into various different vmas. It is okay to mmap()
3289 and munmap() stuff in this slot after doing this call at any time */
3291 if (mem->userspace_addr & 0xffffful)
3294 if (mem->memory_size & 0xffffful)
3297 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3303 void kvm_arch_commit_memory_region(struct kvm *kvm,
3304 const struct kvm_userspace_memory_region *mem,
3305 const struct kvm_memory_slot *old,
3306 const struct kvm_memory_slot *new,
3307 enum kvm_mr_change change)
3311 /* If the basics of the memslot do not change, we do not want
3312 * to update the gmap. Every update causes several unnecessary
3313 * segment translation exceptions. This is usually handled just
3314 * fine by the normal fault handler + gmap, but it will also
3315 * cause faults on the prefix page of running guest CPUs.
3317 if (old->userspace_addr == mem->userspace_addr &&
3318 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3319 old->npages * PAGE_SIZE == mem->memory_size)
3322 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3323 mem->guest_phys_addr, mem->memory_size);
3325 pr_warn("failed to commit memory region\n");
3329 static inline unsigned long nonhyp_mask(int i)
3331 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3333 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3336 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3338 vcpu->valid_wakeup = false;
3341 static int __init kvm_s390_init(void)
3345 if (!sclp.has_sief2) {
3346 pr_info("SIE not available\n");
3350 for (i = 0; i < 16; i++)
3351 kvm_s390_fac_list_mask[i] |=
3352 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3354 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3357 static void __exit kvm_s390_exit(void)
3362 module_init(kvm_s390_init);
3363 module_exit(kvm_s390_exit);
3366 * Enable autoloading of the kvm module.
3367 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3368 * since x86 takes a different approach.
3370 #include <linux/miscdevice.h>
3371 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3372 MODULE_ALIAS("devname:kvm");