s390/kvm: Fix store status for ACRS/FPRS
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143         case KVM_CAP_ENABLE_CAP:
144         case KVM_CAP_S390_CSS_SUPPORT:
145                 r = 1;
146                 break;
147         case KVM_CAP_NR_VCPUS:
148         case KVM_CAP_MAX_VCPUS:
149                 r = KVM_MAX_VCPUS;
150                 break;
151         case KVM_CAP_S390_COW:
152                 r = sclp_get_fac85() & 0x2;
153                 break;
154         default:
155                 r = 0;
156         }
157         return r;
158 }
159
160 /* Section: vm related */
161 /*
162  * Get (and clear) the dirty memory log for a memory slot.
163  */
164 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
165                                struct kvm_dirty_log *log)
166 {
167         return 0;
168 }
169
170 long kvm_arch_vm_ioctl(struct file *filp,
171                        unsigned int ioctl, unsigned long arg)
172 {
173         struct kvm *kvm = filp->private_data;
174         void __user *argp = (void __user *)arg;
175         int r;
176
177         switch (ioctl) {
178         case KVM_S390_INTERRUPT: {
179                 struct kvm_s390_interrupt s390int;
180
181                 r = -EFAULT;
182                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
183                         break;
184                 r = kvm_s390_inject_vm(kvm, &s390int);
185                 break;
186         }
187         default:
188                 r = -ENOTTY;
189         }
190
191         return r;
192 }
193
194 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
195 {
196         int rc;
197         char debug_name[16];
198
199         rc = -EINVAL;
200 #ifdef CONFIG_KVM_S390_UCONTROL
201         if (type & ~KVM_VM_S390_UCONTROL)
202                 goto out_err;
203         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
204                 goto out_err;
205 #else
206         if (type)
207                 goto out_err;
208 #endif
209
210         rc = s390_enable_sie();
211         if (rc)
212                 goto out_err;
213
214         rc = -ENOMEM;
215
216         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
217         if (!kvm->arch.sca)
218                 goto out_err;
219
220         sprintf(debug_name, "kvm-%u", current->pid);
221
222         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
223         if (!kvm->arch.dbf)
224                 goto out_nodbf;
225
226         spin_lock_init(&kvm->arch.float_int.lock);
227         INIT_LIST_HEAD(&kvm->arch.float_int.list);
228
229         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
230         VM_EVENT(kvm, 3, "%s", "vm created");
231
232         if (type & KVM_VM_S390_UCONTROL) {
233                 kvm->arch.gmap = NULL;
234         } else {
235                 kvm->arch.gmap = gmap_alloc(current->mm);
236                 if (!kvm->arch.gmap)
237                         goto out_nogmap;
238         }
239
240         kvm->arch.css_support = 0;
241
242         return 0;
243 out_nogmap:
244         debug_unregister(kvm->arch.dbf);
245 out_nodbf:
246         free_page((unsigned long)(kvm->arch.sca));
247 out_err:
248         return rc;
249 }
250
251 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
252 {
253         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
254         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
255         if (!kvm_is_ucontrol(vcpu->kvm)) {
256                 clear_bit(63 - vcpu->vcpu_id,
257                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
258                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
259                     (__u64) vcpu->arch.sie_block)
260                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
261         }
262         smp_mb();
263
264         if (kvm_is_ucontrol(vcpu->kvm))
265                 gmap_free(vcpu->arch.gmap);
266
267         free_page((unsigned long)(vcpu->arch.sie_block));
268         kvm_vcpu_uninit(vcpu);
269         kfree(vcpu);
270 }
271
272 static void kvm_free_vcpus(struct kvm *kvm)
273 {
274         unsigned int i;
275         struct kvm_vcpu *vcpu;
276
277         kvm_for_each_vcpu(i, vcpu, kvm)
278                 kvm_arch_vcpu_destroy(vcpu);
279
280         mutex_lock(&kvm->lock);
281         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
282                 kvm->vcpus[i] = NULL;
283
284         atomic_set(&kvm->online_vcpus, 0);
285         mutex_unlock(&kvm->lock);
286 }
287
288 void kvm_arch_sync_events(struct kvm *kvm)
289 {
290 }
291
292 void kvm_arch_destroy_vm(struct kvm *kvm)
293 {
294         kvm_free_vcpus(kvm);
295         free_page((unsigned long)(kvm->arch.sca));
296         debug_unregister(kvm->arch.dbf);
297         if (!kvm_is_ucontrol(kvm))
298                 gmap_free(kvm->arch.gmap);
299 }
300
301 /* Section: vcpu related */
302 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
303 {
304         if (kvm_is_ucontrol(vcpu->kvm)) {
305                 vcpu->arch.gmap = gmap_alloc(current->mm);
306                 if (!vcpu->arch.gmap)
307                         return -ENOMEM;
308                 return 0;
309         }
310
311         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
312         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
313                                     KVM_SYNC_GPRS |
314                                     KVM_SYNC_ACRS |
315                                     KVM_SYNC_CRS;
316         return 0;
317 }
318
319 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
320 {
321         /* Nothing todo */
322 }
323
324 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
325 {
326         save_fp_regs(&vcpu->arch.host_fpregs);
327         save_access_regs(vcpu->arch.host_acrs);
328         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
329         restore_fp_regs(&vcpu->arch.guest_fpregs);
330         restore_access_regs(vcpu->run->s.regs.acrs);
331         gmap_enable(vcpu->arch.gmap);
332         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333 }
334
335 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
336 {
337         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
338         gmap_disable(vcpu->arch.gmap);
339         save_fp_regs(&vcpu->arch.guest_fpregs);
340         save_access_regs(vcpu->run->s.regs.acrs);
341         restore_fp_regs(&vcpu->arch.host_fpregs);
342         restore_access_regs(vcpu->arch.host_acrs);
343 }
344
345 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
346 {
347         /* this equals initial cpu reset in pop, but we don't switch to ESA */
348         vcpu->arch.sie_block->gpsw.mask = 0UL;
349         vcpu->arch.sie_block->gpsw.addr = 0UL;
350         kvm_s390_set_prefix(vcpu, 0);
351         vcpu->arch.sie_block->cputm     = 0UL;
352         vcpu->arch.sie_block->ckc       = 0UL;
353         vcpu->arch.sie_block->todpr     = 0;
354         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
355         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
356         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
357         vcpu->arch.guest_fpregs.fpc = 0;
358         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
359         vcpu->arch.sie_block->gbea = 1;
360         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
361 }
362
363 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
364 {
365         return 0;
366 }
367
368 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
369 {
370         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
371                                                     CPUSTAT_SM |
372                                                     CPUSTAT_STOPPED);
373         vcpu->arch.sie_block->ecb   = 6;
374         vcpu->arch.sie_block->eca   = 0xC1002001U;
375         vcpu->arch.sie_block->fac   = (int) (long) facilities;
376         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
377         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
378                      (unsigned long) vcpu);
379         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
380         get_cpu_id(&vcpu->arch.cpu_id);
381         vcpu->arch.cpu_id.version = 0xff;
382         return 0;
383 }
384
385 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
386                                       unsigned int id)
387 {
388         struct kvm_vcpu *vcpu;
389         int rc = -EINVAL;
390
391         if (id >= KVM_MAX_VCPUS)
392                 goto out;
393
394         rc = -ENOMEM;
395
396         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
397         if (!vcpu)
398                 goto out;
399
400         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
401                                         get_zeroed_page(GFP_KERNEL);
402
403         if (!vcpu->arch.sie_block)
404                 goto out_free_cpu;
405
406         vcpu->arch.sie_block->icpua = id;
407         if (!kvm_is_ucontrol(kvm)) {
408                 if (!kvm->arch.sca) {
409                         WARN_ON_ONCE(1);
410                         goto out_free_cpu;
411                 }
412                 if (!kvm->arch.sca->cpu[id].sda)
413                         kvm->arch.sca->cpu[id].sda =
414                                 (__u64) vcpu->arch.sie_block;
415                 vcpu->arch.sie_block->scaoh =
416                         (__u32)(((__u64)kvm->arch.sca) >> 32);
417                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
418                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
419         }
420
421         spin_lock_init(&vcpu->arch.local_int.lock);
422         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
423         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
424         spin_lock(&kvm->arch.float_int.lock);
425         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
426         init_waitqueue_head(&vcpu->arch.local_int.wq);
427         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
428         spin_unlock(&kvm->arch.float_int.lock);
429
430         rc = kvm_vcpu_init(vcpu, kvm, id);
431         if (rc)
432                 goto out_free_sie_block;
433         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
434                  vcpu->arch.sie_block);
435         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
436
437         return vcpu;
438 out_free_sie_block:
439         free_page((unsigned long)(vcpu->arch.sie_block));
440 out_free_cpu:
441         kfree(vcpu);
442 out:
443         return ERR_PTR(rc);
444 }
445
446 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
447 {
448         /* kvm common code refers to this, but never calls it */
449         BUG();
450         return 0;
451 }
452
453 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
454 {
455         /* kvm common code refers to this, but never calls it */
456         BUG();
457         return 0;
458 }
459
460 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
461                                            struct kvm_one_reg *reg)
462 {
463         int r = -EINVAL;
464
465         switch (reg->id) {
466         case KVM_REG_S390_TODPR:
467                 r = put_user(vcpu->arch.sie_block->todpr,
468                              (u32 __user *)reg->addr);
469                 break;
470         case KVM_REG_S390_EPOCHDIFF:
471                 r = put_user(vcpu->arch.sie_block->epoch,
472                              (u64 __user *)reg->addr);
473                 break;
474         case KVM_REG_S390_CPU_TIMER:
475                 r = put_user(vcpu->arch.sie_block->cputm,
476                              (u64 __user *)reg->addr);
477                 break;
478         case KVM_REG_S390_CLOCK_COMP:
479                 r = put_user(vcpu->arch.sie_block->ckc,
480                              (u64 __user *)reg->addr);
481                 break;
482         default:
483                 break;
484         }
485
486         return r;
487 }
488
489 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
490                                            struct kvm_one_reg *reg)
491 {
492         int r = -EINVAL;
493
494         switch (reg->id) {
495         case KVM_REG_S390_TODPR:
496                 r = get_user(vcpu->arch.sie_block->todpr,
497                              (u32 __user *)reg->addr);
498                 break;
499         case KVM_REG_S390_EPOCHDIFF:
500                 r = get_user(vcpu->arch.sie_block->epoch,
501                              (u64 __user *)reg->addr);
502                 break;
503         case KVM_REG_S390_CPU_TIMER:
504                 r = get_user(vcpu->arch.sie_block->cputm,
505                              (u64 __user *)reg->addr);
506                 break;
507         case KVM_REG_S390_CLOCK_COMP:
508                 r = get_user(vcpu->arch.sie_block->ckc,
509                              (u64 __user *)reg->addr);
510                 break;
511         default:
512                 break;
513         }
514
515         return r;
516 }
517
518 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
519 {
520         kvm_s390_vcpu_initial_reset(vcpu);
521         return 0;
522 }
523
524 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
525 {
526         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
527         return 0;
528 }
529
530 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
531 {
532         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
533         return 0;
534 }
535
536 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
537                                   struct kvm_sregs *sregs)
538 {
539         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
540         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
541         restore_access_regs(vcpu->run->s.regs.acrs);
542         return 0;
543 }
544
545 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
546                                   struct kvm_sregs *sregs)
547 {
548         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
549         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
550         return 0;
551 }
552
553 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
554 {
555         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
556         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
557         restore_fp_regs(&vcpu->arch.guest_fpregs);
558         return 0;
559 }
560
561 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
562 {
563         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
564         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
565         return 0;
566 }
567
568 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
569 {
570         int rc = 0;
571
572         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
573                 rc = -EBUSY;
574         else {
575                 vcpu->run->psw_mask = psw.mask;
576                 vcpu->run->psw_addr = psw.addr;
577         }
578         return rc;
579 }
580
581 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
582                                   struct kvm_translation *tr)
583 {
584         return -EINVAL; /* not implemented yet */
585 }
586
587 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
588                                         struct kvm_guest_debug *dbg)
589 {
590         return -EINVAL; /* not implemented yet */
591 }
592
593 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
594                                     struct kvm_mp_state *mp_state)
595 {
596         return -EINVAL; /* not implemented yet */
597 }
598
599 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
600                                     struct kvm_mp_state *mp_state)
601 {
602         return -EINVAL; /* not implemented yet */
603 }
604
605 static int __vcpu_run(struct kvm_vcpu *vcpu)
606 {
607         int rc;
608
609         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
610
611         if (need_resched())
612                 schedule();
613
614         if (test_thread_flag(TIF_MCCK_PENDING))
615                 s390_handle_mcck();
616
617         if (!kvm_is_ucontrol(vcpu->kvm))
618                 kvm_s390_deliver_pending_interrupts(vcpu);
619
620         vcpu->arch.sie_block->icptcode = 0;
621         kvm_guest_enter();
622         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
623                    atomic_read(&vcpu->arch.sie_block->cpuflags));
624         trace_kvm_s390_sie_enter(vcpu,
625                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
626         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
627         if (rc) {
628                 if (kvm_is_ucontrol(vcpu->kvm)) {
629                         rc = SIE_INTERCEPT_UCONTROL;
630                 } else {
631                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
632                         trace_kvm_s390_sie_fault(vcpu);
633                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
634                         rc = 0;
635                 }
636         }
637         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
638                    vcpu->arch.sie_block->icptcode);
639         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
640         kvm_guest_exit();
641
642         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
643         return rc;
644 }
645
646 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
647 {
648         int rc;
649         sigset_t sigsaved;
650
651 rerun_vcpu:
652         if (vcpu->sigset_active)
653                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
654
655         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
656
657         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
658
659         switch (kvm_run->exit_reason) {
660         case KVM_EXIT_S390_SIEIC:
661         case KVM_EXIT_UNKNOWN:
662         case KVM_EXIT_INTR:
663         case KVM_EXIT_S390_RESET:
664         case KVM_EXIT_S390_UCONTROL:
665         case KVM_EXIT_S390_TSCH:
666                 break;
667         default:
668                 BUG();
669         }
670
671         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
672         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
673         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
674                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
675                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
676         }
677         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
678                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
679                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
680                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
681         }
682
683         might_fault();
684
685         do {
686                 rc = __vcpu_run(vcpu);
687                 if (rc)
688                         break;
689                 if (kvm_is_ucontrol(vcpu->kvm))
690                         rc = -EOPNOTSUPP;
691                 else
692                         rc = kvm_handle_sie_intercept(vcpu);
693         } while (!signal_pending(current) && !rc);
694
695         if (rc == SIE_INTERCEPT_RERUNVCPU)
696                 goto rerun_vcpu;
697
698         if (signal_pending(current) && !rc) {
699                 kvm_run->exit_reason = KVM_EXIT_INTR;
700                 rc = -EINTR;
701         }
702
703 #ifdef CONFIG_KVM_S390_UCONTROL
704         if (rc == SIE_INTERCEPT_UCONTROL) {
705                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
706                 kvm_run->s390_ucontrol.trans_exc_code =
707                         current->thread.gmap_addr;
708                 kvm_run->s390_ucontrol.pgm_code = 0x10;
709                 rc = 0;
710         }
711 #endif
712
713         if (rc == -EOPNOTSUPP) {
714                 /* intercept cannot be handled in-kernel, prepare kvm-run */
715                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
716                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
717                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
718                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
719                 rc = 0;
720         }
721
722         if (rc == -EREMOTE) {
723                 /* intercept was handled, but userspace support is needed
724                  * kvm_run has been prepared by the handler */
725                 rc = 0;
726         }
727
728         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
729         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
730         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
731         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
732
733         if (vcpu->sigset_active)
734                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
735
736         vcpu->stat.exit_userspace++;
737         return rc;
738 }
739
740 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
741                        unsigned long n, int prefix)
742 {
743         if (prefix)
744                 return copy_to_guest(vcpu, guestdest, from, n);
745         else
746                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
747 }
748
749 /*
750  * store status at address
751  * we use have two special cases:
752  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
753  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
754  */
755 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
756 {
757         unsigned char archmode = 1;
758         int prefix;
759
760         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
761                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
762                         return -EFAULT;
763                 addr = SAVE_AREA_BASE;
764                 prefix = 0;
765         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
766                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
767                         return -EFAULT;
768                 addr = SAVE_AREA_BASE;
769                 prefix = 1;
770         } else
771                 prefix = 0;
772
773         /*
774          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
775          * copying in vcpu load/put. Lets update our copies before we save
776          * it into the save area
777          */
778         save_fp_regs(&vcpu->arch.guest_fpregs);
779         save_access_regs(vcpu->run->s.regs.acrs);
780
781         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
782                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
783                 return -EFAULT;
784
785         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
786                         vcpu->run->s.regs.gprs, 128, prefix))
787                 return -EFAULT;
788
789         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
790                         &vcpu->arch.sie_block->gpsw, 16, prefix))
791                 return -EFAULT;
792
793         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
794                         &vcpu->arch.sie_block->prefix, 4, prefix))
795                 return -EFAULT;
796
797         if (__guestcopy(vcpu,
798                         addr + offsetof(struct save_area, fp_ctrl_reg),
799                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
800                 return -EFAULT;
801
802         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
803                         &vcpu->arch.sie_block->todpr, 4, prefix))
804                 return -EFAULT;
805
806         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
807                         &vcpu->arch.sie_block->cputm, 8, prefix))
808                 return -EFAULT;
809
810         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
811                         &vcpu->arch.sie_block->ckc, 8, prefix))
812                 return -EFAULT;
813
814         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
815                         &vcpu->run->s.regs.acrs, 64, prefix))
816                 return -EFAULT;
817
818         if (__guestcopy(vcpu,
819                         addr + offsetof(struct save_area, ctrl_regs),
820                         &vcpu->arch.sie_block->gcr, 128, prefix))
821                 return -EFAULT;
822         return 0;
823 }
824
825 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
826                                      struct kvm_enable_cap *cap)
827 {
828         int r;
829
830         if (cap->flags)
831                 return -EINVAL;
832
833         switch (cap->cap) {
834         case KVM_CAP_S390_CSS_SUPPORT:
835                 if (!vcpu->kvm->arch.css_support) {
836                         vcpu->kvm->arch.css_support = 1;
837                         trace_kvm_s390_enable_css(vcpu->kvm);
838                 }
839                 r = 0;
840                 break;
841         default:
842                 r = -EINVAL;
843                 break;
844         }
845         return r;
846 }
847
848 long kvm_arch_vcpu_ioctl(struct file *filp,
849                          unsigned int ioctl, unsigned long arg)
850 {
851         struct kvm_vcpu *vcpu = filp->private_data;
852         void __user *argp = (void __user *)arg;
853         long r;
854
855         switch (ioctl) {
856         case KVM_S390_INTERRUPT: {
857                 struct kvm_s390_interrupt s390int;
858
859                 r = -EFAULT;
860                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
861                         break;
862                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
863                 break;
864         }
865         case KVM_S390_STORE_STATUS:
866                 r = kvm_s390_vcpu_store_status(vcpu, arg);
867                 break;
868         case KVM_S390_SET_INITIAL_PSW: {
869                 psw_t psw;
870
871                 r = -EFAULT;
872                 if (copy_from_user(&psw, argp, sizeof(psw)))
873                         break;
874                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
875                 break;
876         }
877         case KVM_S390_INITIAL_RESET:
878                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
879                 break;
880         case KVM_SET_ONE_REG:
881         case KVM_GET_ONE_REG: {
882                 struct kvm_one_reg reg;
883                 r = -EFAULT;
884                 if (copy_from_user(&reg, argp, sizeof(reg)))
885                         break;
886                 if (ioctl == KVM_SET_ONE_REG)
887                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
888                 else
889                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
890                 break;
891         }
892 #ifdef CONFIG_KVM_S390_UCONTROL
893         case KVM_S390_UCAS_MAP: {
894                 struct kvm_s390_ucas_mapping ucasmap;
895
896                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
897                         r = -EFAULT;
898                         break;
899                 }
900
901                 if (!kvm_is_ucontrol(vcpu->kvm)) {
902                         r = -EINVAL;
903                         break;
904                 }
905
906                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
907                                      ucasmap.vcpu_addr, ucasmap.length);
908                 break;
909         }
910         case KVM_S390_UCAS_UNMAP: {
911                 struct kvm_s390_ucas_mapping ucasmap;
912
913                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
914                         r = -EFAULT;
915                         break;
916                 }
917
918                 if (!kvm_is_ucontrol(vcpu->kvm)) {
919                         r = -EINVAL;
920                         break;
921                 }
922
923                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
924                         ucasmap.length);
925                 break;
926         }
927 #endif
928         case KVM_S390_VCPU_FAULT: {
929                 r = gmap_fault(arg, vcpu->arch.gmap);
930                 if (!IS_ERR_VALUE(r))
931                         r = 0;
932                 break;
933         }
934         case KVM_ENABLE_CAP:
935         {
936                 struct kvm_enable_cap cap;
937                 r = -EFAULT;
938                 if (copy_from_user(&cap, argp, sizeof(cap)))
939                         break;
940                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
941                 break;
942         }
943         default:
944                 r = -ENOTTY;
945         }
946         return r;
947 }
948
949 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
950 {
951 #ifdef CONFIG_KVM_S390_UCONTROL
952         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
953                  && (kvm_is_ucontrol(vcpu->kvm))) {
954                 vmf->page = virt_to_page(vcpu->arch.sie_block);
955                 get_page(vmf->page);
956                 return 0;
957         }
958 #endif
959         return VM_FAULT_SIGBUS;
960 }
961
962 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
963                            struct kvm_memory_slot *dont)
964 {
965 }
966
967 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
968 {
969         return 0;
970 }
971
972 /* Section: memory related */
973 int kvm_arch_prepare_memory_region(struct kvm *kvm,
974                                    struct kvm_memory_slot *memslot,
975                                    struct kvm_memory_slot old,
976                                    struct kvm_userspace_memory_region *mem,
977                                    bool user_alloc)
978 {
979         /* A few sanity checks. We can have exactly one memory slot which has
980            to start at guest virtual zero and which has to be located at a
981            page boundary in userland and which has to end at a page boundary.
982            The memory in userland is ok to be fragmented into various different
983            vmas. It is okay to mmap() and munmap() stuff in this slot after
984            doing this call at any time */
985
986         if (mem->slot)
987                 return -EINVAL;
988
989         if (mem->guest_phys_addr)
990                 return -EINVAL;
991
992         if (mem->userspace_addr & 0xffffful)
993                 return -EINVAL;
994
995         if (mem->memory_size & 0xffffful)
996                 return -EINVAL;
997
998         if (!user_alloc)
999                 return -EINVAL;
1000
1001         return 0;
1002 }
1003
1004 void kvm_arch_commit_memory_region(struct kvm *kvm,
1005                                 struct kvm_userspace_memory_region *mem,
1006                                 struct kvm_memory_slot old,
1007                                 bool user_alloc)
1008 {
1009         int rc;
1010
1011
1012         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1013                 mem->guest_phys_addr, mem->memory_size);
1014         if (rc)
1015                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1016         return;
1017 }
1018
1019 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1020 {
1021 }
1022
1023 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1024                                    struct kvm_memory_slot *slot)
1025 {
1026 }
1027
1028 static int __init kvm_s390_init(void)
1029 {
1030         int ret;
1031         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1032         if (ret)
1033                 return ret;
1034
1035         /*
1036          * guests can ask for up to 255+1 double words, we need a full page
1037          * to hold the maximum amount of facilities. On the other hand, we
1038          * only set facilities that are known to work in KVM.
1039          */
1040         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1041         if (!facilities) {
1042                 kvm_exit();
1043                 return -ENOMEM;
1044         }
1045         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1046         facilities[0] &= 0xff00fff3f47c0000ULL;
1047         facilities[1] &= 0x001c000000000000ULL;
1048         return 0;
1049 }
1050
1051 static void __exit kvm_s390_exit(void)
1052 {
1053         free_page((unsigned long) facilities);
1054         kvm_exit();
1055 }
1056
1057 module_init(kvm_s390_init);
1058 module_exit(kvm_s390_exit);