KVM: s390: Base infrastructure for enabling capabilities.
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143         case KVM_CAP_ENABLE_CAP:
144                 r = 1;
145                 break;
146         case KVM_CAP_NR_VCPUS:
147         case KVM_CAP_MAX_VCPUS:
148                 r = KVM_MAX_VCPUS;
149                 break;
150         case KVM_CAP_S390_COW:
151                 r = sclp_get_fac85() & 0x2;
152                 break;
153         default:
154                 r = 0;
155         }
156         return r;
157 }
158
159 /* Section: vm related */
160 /*
161  * Get (and clear) the dirty memory log for a memory slot.
162  */
163 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
164                                struct kvm_dirty_log *log)
165 {
166         return 0;
167 }
168
169 long kvm_arch_vm_ioctl(struct file *filp,
170                        unsigned int ioctl, unsigned long arg)
171 {
172         struct kvm *kvm = filp->private_data;
173         void __user *argp = (void __user *)arg;
174         int r;
175
176         switch (ioctl) {
177         case KVM_S390_INTERRUPT: {
178                 struct kvm_s390_interrupt s390int;
179
180                 r = -EFAULT;
181                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
182                         break;
183                 r = kvm_s390_inject_vm(kvm, &s390int);
184                 break;
185         }
186         default:
187                 r = -ENOTTY;
188         }
189
190         return r;
191 }
192
193 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
194 {
195         int rc;
196         char debug_name[16];
197
198         rc = -EINVAL;
199 #ifdef CONFIG_KVM_S390_UCONTROL
200         if (type & ~KVM_VM_S390_UCONTROL)
201                 goto out_err;
202         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
203                 goto out_err;
204 #else
205         if (type)
206                 goto out_err;
207 #endif
208
209         rc = s390_enable_sie();
210         if (rc)
211                 goto out_err;
212
213         rc = -ENOMEM;
214
215         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
216         if (!kvm->arch.sca)
217                 goto out_err;
218
219         sprintf(debug_name, "kvm-%u", current->pid);
220
221         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
222         if (!kvm->arch.dbf)
223                 goto out_nodbf;
224
225         spin_lock_init(&kvm->arch.float_int.lock);
226         INIT_LIST_HEAD(&kvm->arch.float_int.list);
227
228         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
229         VM_EVENT(kvm, 3, "%s", "vm created");
230
231         if (type & KVM_VM_S390_UCONTROL) {
232                 kvm->arch.gmap = NULL;
233         } else {
234                 kvm->arch.gmap = gmap_alloc(current->mm);
235                 if (!kvm->arch.gmap)
236                         goto out_nogmap;
237         }
238         return 0;
239 out_nogmap:
240         debug_unregister(kvm->arch.dbf);
241 out_nodbf:
242         free_page((unsigned long)(kvm->arch.sca));
243 out_err:
244         return rc;
245 }
246
247 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
248 {
249         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
250         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
251         if (!kvm_is_ucontrol(vcpu->kvm)) {
252                 clear_bit(63 - vcpu->vcpu_id,
253                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
254                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
255                     (__u64) vcpu->arch.sie_block)
256                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
257         }
258         smp_mb();
259
260         if (kvm_is_ucontrol(vcpu->kvm))
261                 gmap_free(vcpu->arch.gmap);
262
263         free_page((unsigned long)(vcpu->arch.sie_block));
264         kvm_vcpu_uninit(vcpu);
265         kfree(vcpu);
266 }
267
268 static void kvm_free_vcpus(struct kvm *kvm)
269 {
270         unsigned int i;
271         struct kvm_vcpu *vcpu;
272
273         kvm_for_each_vcpu(i, vcpu, kvm)
274                 kvm_arch_vcpu_destroy(vcpu);
275
276         mutex_lock(&kvm->lock);
277         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
278                 kvm->vcpus[i] = NULL;
279
280         atomic_set(&kvm->online_vcpus, 0);
281         mutex_unlock(&kvm->lock);
282 }
283
284 void kvm_arch_sync_events(struct kvm *kvm)
285 {
286 }
287
288 void kvm_arch_destroy_vm(struct kvm *kvm)
289 {
290         kvm_free_vcpus(kvm);
291         free_page((unsigned long)(kvm->arch.sca));
292         debug_unregister(kvm->arch.dbf);
293         if (!kvm_is_ucontrol(kvm))
294                 gmap_free(kvm->arch.gmap);
295 }
296
297 /* Section: vcpu related */
298 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
299 {
300         if (kvm_is_ucontrol(vcpu->kvm)) {
301                 vcpu->arch.gmap = gmap_alloc(current->mm);
302                 if (!vcpu->arch.gmap)
303                         return -ENOMEM;
304                 return 0;
305         }
306
307         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
308         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
309                                     KVM_SYNC_GPRS |
310                                     KVM_SYNC_ACRS |
311                                     KVM_SYNC_CRS;
312         return 0;
313 }
314
315 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
316 {
317         /* Nothing todo */
318 }
319
320 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
321 {
322         save_fp_regs(&vcpu->arch.host_fpregs);
323         save_access_regs(vcpu->arch.host_acrs);
324         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
325         restore_fp_regs(&vcpu->arch.guest_fpregs);
326         restore_access_regs(vcpu->run->s.regs.acrs);
327         gmap_enable(vcpu->arch.gmap);
328         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
329 }
330
331 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
332 {
333         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
334         gmap_disable(vcpu->arch.gmap);
335         save_fp_regs(&vcpu->arch.guest_fpregs);
336         save_access_regs(vcpu->run->s.regs.acrs);
337         restore_fp_regs(&vcpu->arch.host_fpregs);
338         restore_access_regs(vcpu->arch.host_acrs);
339 }
340
341 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
342 {
343         /* this equals initial cpu reset in pop, but we don't switch to ESA */
344         vcpu->arch.sie_block->gpsw.mask = 0UL;
345         vcpu->arch.sie_block->gpsw.addr = 0UL;
346         kvm_s390_set_prefix(vcpu, 0);
347         vcpu->arch.sie_block->cputm     = 0UL;
348         vcpu->arch.sie_block->ckc       = 0UL;
349         vcpu->arch.sie_block->todpr     = 0;
350         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
351         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
352         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
353         vcpu->arch.guest_fpregs.fpc = 0;
354         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
355         vcpu->arch.sie_block->gbea = 1;
356         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
357 }
358
359 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
360 {
361         return 0;
362 }
363
364 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
365 {
366         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
367                                                     CPUSTAT_SM |
368                                                     CPUSTAT_STOPPED);
369         vcpu->arch.sie_block->ecb   = 6;
370         vcpu->arch.sie_block->eca   = 0xC1002001U;
371         vcpu->arch.sie_block->fac   = (int) (long) facilities;
372         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
373         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
374                      (unsigned long) vcpu);
375         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
376         get_cpu_id(&vcpu->arch.cpu_id);
377         vcpu->arch.cpu_id.version = 0xff;
378         return 0;
379 }
380
381 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
382                                       unsigned int id)
383 {
384         struct kvm_vcpu *vcpu;
385         int rc = -EINVAL;
386
387         if (id >= KVM_MAX_VCPUS)
388                 goto out;
389
390         rc = -ENOMEM;
391
392         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
393         if (!vcpu)
394                 goto out;
395
396         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
397                                         get_zeroed_page(GFP_KERNEL);
398
399         if (!vcpu->arch.sie_block)
400                 goto out_free_cpu;
401
402         vcpu->arch.sie_block->icpua = id;
403         if (!kvm_is_ucontrol(kvm)) {
404                 if (!kvm->arch.sca) {
405                         WARN_ON_ONCE(1);
406                         goto out_free_cpu;
407                 }
408                 if (!kvm->arch.sca->cpu[id].sda)
409                         kvm->arch.sca->cpu[id].sda =
410                                 (__u64) vcpu->arch.sie_block;
411                 vcpu->arch.sie_block->scaoh =
412                         (__u32)(((__u64)kvm->arch.sca) >> 32);
413                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
414                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
415         }
416
417         spin_lock_init(&vcpu->arch.local_int.lock);
418         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
419         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
420         spin_lock(&kvm->arch.float_int.lock);
421         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
422         init_waitqueue_head(&vcpu->arch.local_int.wq);
423         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
424         spin_unlock(&kvm->arch.float_int.lock);
425
426         rc = kvm_vcpu_init(vcpu, kvm, id);
427         if (rc)
428                 goto out_free_sie_block;
429         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
430                  vcpu->arch.sie_block);
431         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
432
433         return vcpu;
434 out_free_sie_block:
435         free_page((unsigned long)(vcpu->arch.sie_block));
436 out_free_cpu:
437         kfree(vcpu);
438 out:
439         return ERR_PTR(rc);
440 }
441
442 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
443 {
444         /* kvm common code refers to this, but never calls it */
445         BUG();
446         return 0;
447 }
448
449 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
450 {
451         /* kvm common code refers to this, but never calls it */
452         BUG();
453         return 0;
454 }
455
456 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
457                                            struct kvm_one_reg *reg)
458 {
459         int r = -EINVAL;
460
461         switch (reg->id) {
462         case KVM_REG_S390_TODPR:
463                 r = put_user(vcpu->arch.sie_block->todpr,
464                              (u32 __user *)reg->addr);
465                 break;
466         case KVM_REG_S390_EPOCHDIFF:
467                 r = put_user(vcpu->arch.sie_block->epoch,
468                              (u64 __user *)reg->addr);
469                 break;
470         case KVM_REG_S390_CPU_TIMER:
471                 r = put_user(vcpu->arch.sie_block->cputm,
472                              (u64 __user *)reg->addr);
473                 break;
474         case KVM_REG_S390_CLOCK_COMP:
475                 r = put_user(vcpu->arch.sie_block->ckc,
476                              (u64 __user *)reg->addr);
477                 break;
478         default:
479                 break;
480         }
481
482         return r;
483 }
484
485 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
486                                            struct kvm_one_reg *reg)
487 {
488         int r = -EINVAL;
489
490         switch (reg->id) {
491         case KVM_REG_S390_TODPR:
492                 r = get_user(vcpu->arch.sie_block->todpr,
493                              (u32 __user *)reg->addr);
494                 break;
495         case KVM_REG_S390_EPOCHDIFF:
496                 r = get_user(vcpu->arch.sie_block->epoch,
497                              (u64 __user *)reg->addr);
498                 break;
499         case KVM_REG_S390_CPU_TIMER:
500                 r = get_user(vcpu->arch.sie_block->cputm,
501                              (u64 __user *)reg->addr);
502                 break;
503         case KVM_REG_S390_CLOCK_COMP:
504                 r = get_user(vcpu->arch.sie_block->ckc,
505                              (u64 __user *)reg->addr);
506                 break;
507         default:
508                 break;
509         }
510
511         return r;
512 }
513
514 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
515 {
516         kvm_s390_vcpu_initial_reset(vcpu);
517         return 0;
518 }
519
520 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521 {
522         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
523         return 0;
524 }
525
526 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
527 {
528         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
529         return 0;
530 }
531
532 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
533                                   struct kvm_sregs *sregs)
534 {
535         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
536         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
537         restore_access_regs(vcpu->run->s.regs.acrs);
538         return 0;
539 }
540
541 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
542                                   struct kvm_sregs *sregs)
543 {
544         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
545         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
546         return 0;
547 }
548
549 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
550 {
551         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
552         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
553         restore_fp_regs(&vcpu->arch.guest_fpregs);
554         return 0;
555 }
556
557 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
558 {
559         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
560         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
561         return 0;
562 }
563
564 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
565 {
566         int rc = 0;
567
568         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
569                 rc = -EBUSY;
570         else {
571                 vcpu->run->psw_mask = psw.mask;
572                 vcpu->run->psw_addr = psw.addr;
573         }
574         return rc;
575 }
576
577 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
578                                   struct kvm_translation *tr)
579 {
580         return -EINVAL; /* not implemented yet */
581 }
582
583 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
584                                         struct kvm_guest_debug *dbg)
585 {
586         return -EINVAL; /* not implemented yet */
587 }
588
589 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
590                                     struct kvm_mp_state *mp_state)
591 {
592         return -EINVAL; /* not implemented yet */
593 }
594
595 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
596                                     struct kvm_mp_state *mp_state)
597 {
598         return -EINVAL; /* not implemented yet */
599 }
600
601 static int __vcpu_run(struct kvm_vcpu *vcpu)
602 {
603         int rc;
604
605         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
606
607         if (need_resched())
608                 schedule();
609
610         if (test_thread_flag(TIF_MCCK_PENDING))
611                 s390_handle_mcck();
612
613         if (!kvm_is_ucontrol(vcpu->kvm))
614                 kvm_s390_deliver_pending_interrupts(vcpu);
615
616         vcpu->arch.sie_block->icptcode = 0;
617         kvm_guest_enter();
618         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
619                    atomic_read(&vcpu->arch.sie_block->cpuflags));
620         trace_kvm_s390_sie_enter(vcpu,
621                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
622         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
623         if (rc) {
624                 if (kvm_is_ucontrol(vcpu->kvm)) {
625                         rc = SIE_INTERCEPT_UCONTROL;
626                 } else {
627                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
628                         trace_kvm_s390_sie_fault(vcpu);
629                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
630                         rc = 0;
631                 }
632         }
633         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
634                    vcpu->arch.sie_block->icptcode);
635         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
636         kvm_guest_exit();
637
638         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
639         return rc;
640 }
641
642 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
643 {
644         int rc;
645         sigset_t sigsaved;
646
647 rerun_vcpu:
648         if (vcpu->sigset_active)
649                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
650
651         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
652
653         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
654
655         switch (kvm_run->exit_reason) {
656         case KVM_EXIT_S390_SIEIC:
657         case KVM_EXIT_UNKNOWN:
658         case KVM_EXIT_INTR:
659         case KVM_EXIT_S390_RESET:
660         case KVM_EXIT_S390_UCONTROL:
661                 break;
662         default:
663                 BUG();
664         }
665
666         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
667         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
668         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
669                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
670                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
671         }
672         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
673                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
674                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
675                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
676         }
677
678         might_fault();
679
680         do {
681                 rc = __vcpu_run(vcpu);
682                 if (rc)
683                         break;
684                 if (kvm_is_ucontrol(vcpu->kvm))
685                         rc = -EOPNOTSUPP;
686                 else
687                         rc = kvm_handle_sie_intercept(vcpu);
688         } while (!signal_pending(current) && !rc);
689
690         if (rc == SIE_INTERCEPT_RERUNVCPU)
691                 goto rerun_vcpu;
692
693         if (signal_pending(current) && !rc) {
694                 kvm_run->exit_reason = KVM_EXIT_INTR;
695                 rc = -EINTR;
696         }
697
698 #ifdef CONFIG_KVM_S390_UCONTROL
699         if (rc == SIE_INTERCEPT_UCONTROL) {
700                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
701                 kvm_run->s390_ucontrol.trans_exc_code =
702                         current->thread.gmap_addr;
703                 kvm_run->s390_ucontrol.pgm_code = 0x10;
704                 rc = 0;
705         }
706 #endif
707
708         if (rc == -EOPNOTSUPP) {
709                 /* intercept cannot be handled in-kernel, prepare kvm-run */
710                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
711                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
712                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
713                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
714                 rc = 0;
715         }
716
717         if (rc == -EREMOTE) {
718                 /* intercept was handled, but userspace support is needed
719                  * kvm_run has been prepared by the handler */
720                 rc = 0;
721         }
722
723         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
724         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
725         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
726         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
727
728         if (vcpu->sigset_active)
729                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
730
731         vcpu->stat.exit_userspace++;
732         return rc;
733 }
734
735 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
736                        unsigned long n, int prefix)
737 {
738         if (prefix)
739                 return copy_to_guest(vcpu, guestdest, from, n);
740         else
741                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
742 }
743
744 /*
745  * store status at address
746  * we use have two special cases:
747  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
748  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
749  */
750 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
751 {
752         unsigned char archmode = 1;
753         int prefix;
754
755         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
756                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
757                         return -EFAULT;
758                 addr = SAVE_AREA_BASE;
759                 prefix = 0;
760         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
761                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
762                         return -EFAULT;
763                 addr = SAVE_AREA_BASE;
764                 prefix = 1;
765         } else
766                 prefix = 0;
767
768         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
769                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
770                 return -EFAULT;
771
772         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
773                         vcpu->run->s.regs.gprs, 128, prefix))
774                 return -EFAULT;
775
776         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
777                         &vcpu->arch.sie_block->gpsw, 16, prefix))
778                 return -EFAULT;
779
780         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
781                         &vcpu->arch.sie_block->prefix, 4, prefix))
782                 return -EFAULT;
783
784         if (__guestcopy(vcpu,
785                         addr + offsetof(struct save_area, fp_ctrl_reg),
786                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
787                 return -EFAULT;
788
789         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
790                         &vcpu->arch.sie_block->todpr, 4, prefix))
791                 return -EFAULT;
792
793         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
794                         &vcpu->arch.sie_block->cputm, 8, prefix))
795                 return -EFAULT;
796
797         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
798                         &vcpu->arch.sie_block->ckc, 8, prefix))
799                 return -EFAULT;
800
801         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
802                         &vcpu->run->s.regs.acrs, 64, prefix))
803                 return -EFAULT;
804
805         if (__guestcopy(vcpu,
806                         addr + offsetof(struct save_area, ctrl_regs),
807                         &vcpu->arch.sie_block->gcr, 128, prefix))
808                 return -EFAULT;
809         return 0;
810 }
811
812 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
813                                      struct kvm_enable_cap *cap)
814 {
815         int r;
816
817         if (cap->flags)
818                 return -EINVAL;
819
820         switch (cap->cap) {
821         default:
822                 r = -EINVAL;
823                 break;
824         }
825         return r;
826 }
827
828 long kvm_arch_vcpu_ioctl(struct file *filp,
829                          unsigned int ioctl, unsigned long arg)
830 {
831         struct kvm_vcpu *vcpu = filp->private_data;
832         void __user *argp = (void __user *)arg;
833         long r;
834
835         switch (ioctl) {
836         case KVM_S390_INTERRUPT: {
837                 struct kvm_s390_interrupt s390int;
838
839                 r = -EFAULT;
840                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
841                         break;
842                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
843                 break;
844         }
845         case KVM_S390_STORE_STATUS:
846                 r = kvm_s390_vcpu_store_status(vcpu, arg);
847                 break;
848         case KVM_S390_SET_INITIAL_PSW: {
849                 psw_t psw;
850
851                 r = -EFAULT;
852                 if (copy_from_user(&psw, argp, sizeof(psw)))
853                         break;
854                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
855                 break;
856         }
857         case KVM_S390_INITIAL_RESET:
858                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
859                 break;
860         case KVM_SET_ONE_REG:
861         case KVM_GET_ONE_REG: {
862                 struct kvm_one_reg reg;
863                 r = -EFAULT;
864                 if (copy_from_user(&reg, argp, sizeof(reg)))
865                         break;
866                 if (ioctl == KVM_SET_ONE_REG)
867                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
868                 else
869                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
870                 break;
871         }
872 #ifdef CONFIG_KVM_S390_UCONTROL
873         case KVM_S390_UCAS_MAP: {
874                 struct kvm_s390_ucas_mapping ucasmap;
875
876                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
877                         r = -EFAULT;
878                         break;
879                 }
880
881                 if (!kvm_is_ucontrol(vcpu->kvm)) {
882                         r = -EINVAL;
883                         break;
884                 }
885
886                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
887                                      ucasmap.vcpu_addr, ucasmap.length);
888                 break;
889         }
890         case KVM_S390_UCAS_UNMAP: {
891                 struct kvm_s390_ucas_mapping ucasmap;
892
893                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
894                         r = -EFAULT;
895                         break;
896                 }
897
898                 if (!kvm_is_ucontrol(vcpu->kvm)) {
899                         r = -EINVAL;
900                         break;
901                 }
902
903                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
904                         ucasmap.length);
905                 break;
906         }
907 #endif
908         case KVM_S390_VCPU_FAULT: {
909                 r = gmap_fault(arg, vcpu->arch.gmap);
910                 if (!IS_ERR_VALUE(r))
911                         r = 0;
912                 break;
913         }
914         case KVM_ENABLE_CAP:
915         {
916                 struct kvm_enable_cap cap;
917                 r = -EFAULT;
918                 if (copy_from_user(&cap, argp, sizeof(cap)))
919                         break;
920                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
921                 break;
922         }
923         default:
924                 r = -ENOTTY;
925         }
926         return r;
927 }
928
929 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
930 {
931 #ifdef CONFIG_KVM_S390_UCONTROL
932         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
933                  && (kvm_is_ucontrol(vcpu->kvm))) {
934                 vmf->page = virt_to_page(vcpu->arch.sie_block);
935                 get_page(vmf->page);
936                 return 0;
937         }
938 #endif
939         return VM_FAULT_SIGBUS;
940 }
941
942 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
943                            struct kvm_memory_slot *dont)
944 {
945 }
946
947 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
948 {
949         return 0;
950 }
951
952 /* Section: memory related */
953 int kvm_arch_prepare_memory_region(struct kvm *kvm,
954                                    struct kvm_memory_slot *memslot,
955                                    struct kvm_memory_slot old,
956                                    struct kvm_userspace_memory_region *mem,
957                                    bool user_alloc)
958 {
959         /* A few sanity checks. We can have exactly one memory slot which has
960            to start at guest virtual zero and which has to be located at a
961            page boundary in userland and which has to end at a page boundary.
962            The memory in userland is ok to be fragmented into various different
963            vmas. It is okay to mmap() and munmap() stuff in this slot after
964            doing this call at any time */
965
966         if (mem->slot)
967                 return -EINVAL;
968
969         if (mem->guest_phys_addr)
970                 return -EINVAL;
971
972         if (mem->userspace_addr & 0xffffful)
973                 return -EINVAL;
974
975         if (mem->memory_size & 0xffffful)
976                 return -EINVAL;
977
978         if (!user_alloc)
979                 return -EINVAL;
980
981         return 0;
982 }
983
984 void kvm_arch_commit_memory_region(struct kvm *kvm,
985                                 struct kvm_userspace_memory_region *mem,
986                                 struct kvm_memory_slot old,
987                                 bool user_alloc)
988 {
989         int rc;
990
991
992         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
993                 mem->guest_phys_addr, mem->memory_size);
994         if (rc)
995                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
996         return;
997 }
998
999 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1000 {
1001 }
1002
1003 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1004                                    struct kvm_memory_slot *slot)
1005 {
1006 }
1007
1008 static int __init kvm_s390_init(void)
1009 {
1010         int ret;
1011         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1012         if (ret)
1013                 return ret;
1014
1015         /*
1016          * guests can ask for up to 255+1 double words, we need a full page
1017          * to hold the maximum amount of facilities. On the other hand, we
1018          * only set facilities that are known to work in KVM.
1019          */
1020         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1021         if (!facilities) {
1022                 kvm_exit();
1023                 return -ENOMEM;
1024         }
1025         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1026         facilities[0] &= 0xff00fff3f47c0000ULL;
1027         facilities[1] &= 0x001c000000000000ULL;
1028         return 0;
1029 }
1030
1031 static void __exit kvm_s390_exit(void)
1032 {
1033         free_page((unsigned long) facilities);
1034         kvm_exit();
1035 }
1036
1037 module_init(kvm_s390_init);
1038 module_exit(kvm_s390_exit);