KVM: x86: add kvm_arch_vcpu_postcreate callback, move TSC initialization
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143                 r = 1;
144                 break;
145         case KVM_CAP_NR_VCPUS:
146         case KVM_CAP_MAX_VCPUS:
147                 r = KVM_MAX_VCPUS;
148                 break;
149         case KVM_CAP_S390_COW:
150                 r = sclp_get_fac85() & 0x2;
151                 break;
152         default:
153                 r = 0;
154         }
155         return r;
156 }
157
158 /* Section: vm related */
159 /*
160  * Get (and clear) the dirty memory log for a memory slot.
161  */
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163                                struct kvm_dirty_log *log)
164 {
165         return 0;
166 }
167
168 long kvm_arch_vm_ioctl(struct file *filp,
169                        unsigned int ioctl, unsigned long arg)
170 {
171         struct kvm *kvm = filp->private_data;
172         void __user *argp = (void __user *)arg;
173         int r;
174
175         switch (ioctl) {
176         case KVM_S390_INTERRUPT: {
177                 struct kvm_s390_interrupt s390int;
178
179                 r = -EFAULT;
180                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181                         break;
182                 r = kvm_s390_inject_vm(kvm, &s390int);
183                 break;
184         }
185         default:
186                 r = -ENOTTY;
187         }
188
189         return r;
190 }
191
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
193 {
194         int rc;
195         char debug_name[16];
196
197         rc = -EINVAL;
198 #ifdef CONFIG_KVM_S390_UCONTROL
199         if (type & ~KVM_VM_S390_UCONTROL)
200                 goto out_err;
201         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202                 goto out_err;
203 #else
204         if (type)
205                 goto out_err;
206 #endif
207
208         rc = s390_enable_sie();
209         if (rc)
210                 goto out_err;
211
212         rc = -ENOMEM;
213
214         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215         if (!kvm->arch.sca)
216                 goto out_err;
217
218         sprintf(debug_name, "kvm-%u", current->pid);
219
220         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221         if (!kvm->arch.dbf)
222                 goto out_nodbf;
223
224         spin_lock_init(&kvm->arch.float_int.lock);
225         INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
227         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228         VM_EVENT(kvm, 3, "%s", "vm created");
229
230         if (type & KVM_VM_S390_UCONTROL) {
231                 kvm->arch.gmap = NULL;
232         } else {
233                 kvm->arch.gmap = gmap_alloc(current->mm);
234                 if (!kvm->arch.gmap)
235                         goto out_nogmap;
236         }
237         return 0;
238 out_nogmap:
239         debug_unregister(kvm->arch.dbf);
240 out_nodbf:
241         free_page((unsigned long)(kvm->arch.sca));
242 out_err:
243         return rc;
244 }
245
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247 {
248         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250         if (!kvm_is_ucontrol(vcpu->kvm)) {
251                 clear_bit(63 - vcpu->vcpu_id,
252                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254                     (__u64) vcpu->arch.sie_block)
255                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256         }
257         smp_mb();
258
259         if (kvm_is_ucontrol(vcpu->kvm))
260                 gmap_free(vcpu->arch.gmap);
261
262         free_page((unsigned long)(vcpu->arch.sie_block));
263         kvm_vcpu_uninit(vcpu);
264         kfree(vcpu);
265 }
266
267 static void kvm_free_vcpus(struct kvm *kvm)
268 {
269         unsigned int i;
270         struct kvm_vcpu *vcpu;
271
272         kvm_for_each_vcpu(i, vcpu, kvm)
273                 kvm_arch_vcpu_destroy(vcpu);
274
275         mutex_lock(&kvm->lock);
276         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277                 kvm->vcpus[i] = NULL;
278
279         atomic_set(&kvm->online_vcpus, 0);
280         mutex_unlock(&kvm->lock);
281 }
282
283 void kvm_arch_sync_events(struct kvm *kvm)
284 {
285 }
286
287 void kvm_arch_destroy_vm(struct kvm *kvm)
288 {
289         kvm_free_vcpus(kvm);
290         free_page((unsigned long)(kvm->arch.sca));
291         debug_unregister(kvm->arch.dbf);
292         if (!kvm_is_ucontrol(kvm))
293                 gmap_free(kvm->arch.gmap);
294 }
295
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298 {
299         if (kvm_is_ucontrol(vcpu->kvm)) {
300                 vcpu->arch.gmap = gmap_alloc(current->mm);
301                 if (!vcpu->arch.gmap)
302                         return -ENOMEM;
303                 return 0;
304         }
305
306         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308                                     KVM_SYNC_GPRS |
309                                     KVM_SYNC_ACRS |
310                                     KVM_SYNC_CRS;
311         return 0;
312 }
313
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315 {
316         /* Nothing todo */
317 }
318
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320 {
321         save_fp_regs(&vcpu->arch.host_fpregs);
322         save_access_regs(vcpu->arch.host_acrs);
323         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324         restore_fp_regs(&vcpu->arch.guest_fpregs);
325         restore_access_regs(vcpu->run->s.regs.acrs);
326         gmap_enable(vcpu->arch.gmap);
327         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 }
329
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331 {
332         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333         gmap_disable(vcpu->arch.gmap);
334         save_fp_regs(&vcpu->arch.guest_fpregs);
335         save_access_regs(vcpu->run->s.regs.acrs);
336         restore_fp_regs(&vcpu->arch.host_fpregs);
337         restore_access_regs(vcpu->arch.host_acrs);
338 }
339
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341 {
342         /* this equals initial cpu reset in pop, but we don't switch to ESA */
343         vcpu->arch.sie_block->gpsw.mask = 0UL;
344         vcpu->arch.sie_block->gpsw.addr = 0UL;
345         kvm_s390_set_prefix(vcpu, 0);
346         vcpu->arch.sie_block->cputm     = 0UL;
347         vcpu->arch.sie_block->ckc       = 0UL;
348         vcpu->arch.sie_block->todpr     = 0;
349         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
351         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352         vcpu->arch.guest_fpregs.fpc = 0;
353         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354         vcpu->arch.sie_block->gbea = 1;
355         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
356 }
357
358 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
359 {
360         return 0;
361 }
362
363 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
364 {
365         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
366                                                     CPUSTAT_SM |
367                                                     CPUSTAT_STOPPED);
368         vcpu->arch.sie_block->ecb   = 6;
369         vcpu->arch.sie_block->eca   = 0xC1002001U;
370         vcpu->arch.sie_block->fac   = (int) (long) facilities;
371         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
372         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
373                      (unsigned long) vcpu);
374         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
375         get_cpu_id(&vcpu->arch.cpu_id);
376         vcpu->arch.cpu_id.version = 0xff;
377         return 0;
378 }
379
380 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
381                                       unsigned int id)
382 {
383         struct kvm_vcpu *vcpu;
384         int rc = -EINVAL;
385
386         if (id >= KVM_MAX_VCPUS)
387                 goto out;
388
389         rc = -ENOMEM;
390
391         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
392         if (!vcpu)
393                 goto out;
394
395         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
396                                         get_zeroed_page(GFP_KERNEL);
397
398         if (!vcpu->arch.sie_block)
399                 goto out_free_cpu;
400
401         vcpu->arch.sie_block->icpua = id;
402         if (!kvm_is_ucontrol(kvm)) {
403                 if (!kvm->arch.sca) {
404                         WARN_ON_ONCE(1);
405                         goto out_free_cpu;
406                 }
407                 if (!kvm->arch.sca->cpu[id].sda)
408                         kvm->arch.sca->cpu[id].sda =
409                                 (__u64) vcpu->arch.sie_block;
410                 vcpu->arch.sie_block->scaoh =
411                         (__u32)(((__u64)kvm->arch.sca) >> 32);
412                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
413                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
414         }
415
416         spin_lock_init(&vcpu->arch.local_int.lock);
417         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
418         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
419         spin_lock(&kvm->arch.float_int.lock);
420         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
421         init_waitqueue_head(&vcpu->arch.local_int.wq);
422         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
423         spin_unlock(&kvm->arch.float_int.lock);
424
425         rc = kvm_vcpu_init(vcpu, kvm, id);
426         if (rc)
427                 goto out_free_sie_block;
428         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
429                  vcpu->arch.sie_block);
430         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
431
432         return vcpu;
433 out_free_sie_block:
434         free_page((unsigned long)(vcpu->arch.sie_block));
435 out_free_cpu:
436         kfree(vcpu);
437 out:
438         return ERR_PTR(rc);
439 }
440
441 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
442 {
443         /* kvm common code refers to this, but never calls it */
444         BUG();
445         return 0;
446 }
447
448 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
449 {
450         /* kvm common code refers to this, but never calls it */
451         BUG();
452         return 0;
453 }
454
455 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
456                                            struct kvm_one_reg *reg)
457 {
458         int r = -EINVAL;
459
460         switch (reg->id) {
461         case KVM_REG_S390_TODPR:
462                 r = put_user(vcpu->arch.sie_block->todpr,
463                              (u32 __user *)reg->addr);
464                 break;
465         case KVM_REG_S390_EPOCHDIFF:
466                 r = put_user(vcpu->arch.sie_block->epoch,
467                              (u64 __user *)reg->addr);
468                 break;
469         case KVM_REG_S390_CPU_TIMER:
470                 r = put_user(vcpu->arch.sie_block->cputm,
471                              (u64 __user *)reg->addr);
472                 break;
473         case KVM_REG_S390_CLOCK_COMP:
474                 r = put_user(vcpu->arch.sie_block->ckc,
475                              (u64 __user *)reg->addr);
476                 break;
477         default:
478                 break;
479         }
480
481         return r;
482 }
483
484 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
485                                            struct kvm_one_reg *reg)
486 {
487         int r = -EINVAL;
488
489         switch (reg->id) {
490         case KVM_REG_S390_TODPR:
491                 r = get_user(vcpu->arch.sie_block->todpr,
492                              (u32 __user *)reg->addr);
493                 break;
494         case KVM_REG_S390_EPOCHDIFF:
495                 r = get_user(vcpu->arch.sie_block->epoch,
496                              (u64 __user *)reg->addr);
497                 break;
498         case KVM_REG_S390_CPU_TIMER:
499                 r = get_user(vcpu->arch.sie_block->cputm,
500                              (u64 __user *)reg->addr);
501                 break;
502         case KVM_REG_S390_CLOCK_COMP:
503                 r = get_user(vcpu->arch.sie_block->ckc,
504                              (u64 __user *)reg->addr);
505                 break;
506         default:
507                 break;
508         }
509
510         return r;
511 }
512
513 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
514 {
515         kvm_s390_vcpu_initial_reset(vcpu);
516         return 0;
517 }
518
519 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
520 {
521         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
522         return 0;
523 }
524
525 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
526 {
527         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
528         return 0;
529 }
530
531 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
532                                   struct kvm_sregs *sregs)
533 {
534         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
535         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
536         restore_access_regs(vcpu->run->s.regs.acrs);
537         return 0;
538 }
539
540 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
541                                   struct kvm_sregs *sregs)
542 {
543         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
544         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
545         return 0;
546 }
547
548 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
549 {
550         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
551         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
552         restore_fp_regs(&vcpu->arch.guest_fpregs);
553         return 0;
554 }
555
556 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
557 {
558         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
559         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
560         return 0;
561 }
562
563 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
564 {
565         int rc = 0;
566
567         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
568                 rc = -EBUSY;
569         else {
570                 vcpu->run->psw_mask = psw.mask;
571                 vcpu->run->psw_addr = psw.addr;
572         }
573         return rc;
574 }
575
576 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
577                                   struct kvm_translation *tr)
578 {
579         return -EINVAL; /* not implemented yet */
580 }
581
582 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
583                                         struct kvm_guest_debug *dbg)
584 {
585         return -EINVAL; /* not implemented yet */
586 }
587
588 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
589                                     struct kvm_mp_state *mp_state)
590 {
591         return -EINVAL; /* not implemented yet */
592 }
593
594 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
595                                     struct kvm_mp_state *mp_state)
596 {
597         return -EINVAL; /* not implemented yet */
598 }
599
600 static int __vcpu_run(struct kvm_vcpu *vcpu)
601 {
602         int rc;
603
604         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
605
606         if (need_resched())
607                 schedule();
608
609         if (test_thread_flag(TIF_MCCK_PENDING))
610                 s390_handle_mcck();
611
612         if (!kvm_is_ucontrol(vcpu->kvm))
613                 kvm_s390_deliver_pending_interrupts(vcpu);
614
615         vcpu->arch.sie_block->icptcode = 0;
616         local_irq_disable();
617         kvm_guest_enter();
618         local_irq_enable();
619         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
620                    atomic_read(&vcpu->arch.sie_block->cpuflags));
621         trace_kvm_s390_sie_enter(vcpu,
622                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
623         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
624         if (rc) {
625                 if (kvm_is_ucontrol(vcpu->kvm)) {
626                         rc = SIE_INTERCEPT_UCONTROL;
627                 } else {
628                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
629                         trace_kvm_s390_sie_fault(vcpu);
630                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
631                         rc = 0;
632                 }
633         }
634         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
635                    vcpu->arch.sie_block->icptcode);
636         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
637         local_irq_disable();
638         kvm_guest_exit();
639         local_irq_enable();
640
641         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
642         return rc;
643 }
644
645 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
646 {
647         int rc;
648         sigset_t sigsaved;
649
650 rerun_vcpu:
651         if (vcpu->sigset_active)
652                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
653
654         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
655
656         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
657
658         switch (kvm_run->exit_reason) {
659         case KVM_EXIT_S390_SIEIC:
660         case KVM_EXIT_UNKNOWN:
661         case KVM_EXIT_INTR:
662         case KVM_EXIT_S390_RESET:
663         case KVM_EXIT_S390_UCONTROL:
664                 break;
665         default:
666                 BUG();
667         }
668
669         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
670         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
671         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
672                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
673                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
674         }
675         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
676                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
677                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
678                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
679         }
680
681         might_fault();
682
683         do {
684                 rc = __vcpu_run(vcpu);
685                 if (rc)
686                         break;
687                 if (kvm_is_ucontrol(vcpu->kvm))
688                         rc = -EOPNOTSUPP;
689                 else
690                         rc = kvm_handle_sie_intercept(vcpu);
691         } while (!signal_pending(current) && !rc);
692
693         if (rc == SIE_INTERCEPT_RERUNVCPU)
694                 goto rerun_vcpu;
695
696         if (signal_pending(current) && !rc) {
697                 kvm_run->exit_reason = KVM_EXIT_INTR;
698                 rc = -EINTR;
699         }
700
701 #ifdef CONFIG_KVM_S390_UCONTROL
702         if (rc == SIE_INTERCEPT_UCONTROL) {
703                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
704                 kvm_run->s390_ucontrol.trans_exc_code =
705                         current->thread.gmap_addr;
706                 kvm_run->s390_ucontrol.pgm_code = 0x10;
707                 rc = 0;
708         }
709 #endif
710
711         if (rc == -EOPNOTSUPP) {
712                 /* intercept cannot be handled in-kernel, prepare kvm-run */
713                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
714                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
715                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
716                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
717                 rc = 0;
718         }
719
720         if (rc == -EREMOTE) {
721                 /* intercept was handled, but userspace support is needed
722                  * kvm_run has been prepared by the handler */
723                 rc = 0;
724         }
725
726         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
727         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
728         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
729         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
730
731         if (vcpu->sigset_active)
732                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
733
734         vcpu->stat.exit_userspace++;
735         return rc;
736 }
737
738 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
739                        unsigned long n, int prefix)
740 {
741         if (prefix)
742                 return copy_to_guest(vcpu, guestdest, from, n);
743         else
744                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
745 }
746
747 /*
748  * store status at address
749  * we use have two special cases:
750  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
751  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
752  */
753 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
754 {
755         unsigned char archmode = 1;
756         int prefix;
757
758         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
759                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
760                         return -EFAULT;
761                 addr = SAVE_AREA_BASE;
762                 prefix = 0;
763         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
764                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
765                         return -EFAULT;
766                 addr = SAVE_AREA_BASE;
767                 prefix = 1;
768         } else
769                 prefix = 0;
770
771         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
772                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
773                 return -EFAULT;
774
775         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
776                         vcpu->run->s.regs.gprs, 128, prefix))
777                 return -EFAULT;
778
779         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
780                         &vcpu->arch.sie_block->gpsw, 16, prefix))
781                 return -EFAULT;
782
783         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
784                         &vcpu->arch.sie_block->prefix, 4, prefix))
785                 return -EFAULT;
786
787         if (__guestcopy(vcpu,
788                         addr + offsetof(struct save_area, fp_ctrl_reg),
789                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
790                 return -EFAULT;
791
792         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
793                         &vcpu->arch.sie_block->todpr, 4, prefix))
794                 return -EFAULT;
795
796         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
797                         &vcpu->arch.sie_block->cputm, 8, prefix))
798                 return -EFAULT;
799
800         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
801                         &vcpu->arch.sie_block->ckc, 8, prefix))
802                 return -EFAULT;
803
804         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
805                         &vcpu->run->s.regs.acrs, 64, prefix))
806                 return -EFAULT;
807
808         if (__guestcopy(vcpu,
809                         addr + offsetof(struct save_area, ctrl_regs),
810                         &vcpu->arch.sie_block->gcr, 128, prefix))
811                 return -EFAULT;
812         return 0;
813 }
814
815 long kvm_arch_vcpu_ioctl(struct file *filp,
816                          unsigned int ioctl, unsigned long arg)
817 {
818         struct kvm_vcpu *vcpu = filp->private_data;
819         void __user *argp = (void __user *)arg;
820         long r;
821
822         switch (ioctl) {
823         case KVM_S390_INTERRUPT: {
824                 struct kvm_s390_interrupt s390int;
825
826                 r = -EFAULT;
827                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
828                         break;
829                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
830                 break;
831         }
832         case KVM_S390_STORE_STATUS:
833                 r = kvm_s390_vcpu_store_status(vcpu, arg);
834                 break;
835         case KVM_S390_SET_INITIAL_PSW: {
836                 psw_t psw;
837
838                 r = -EFAULT;
839                 if (copy_from_user(&psw, argp, sizeof(psw)))
840                         break;
841                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
842                 break;
843         }
844         case KVM_S390_INITIAL_RESET:
845                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
846                 break;
847         case KVM_SET_ONE_REG:
848         case KVM_GET_ONE_REG: {
849                 struct kvm_one_reg reg;
850                 r = -EFAULT;
851                 if (copy_from_user(&reg, argp, sizeof(reg)))
852                         break;
853                 if (ioctl == KVM_SET_ONE_REG)
854                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
855                 else
856                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
857                 break;
858         }
859 #ifdef CONFIG_KVM_S390_UCONTROL
860         case KVM_S390_UCAS_MAP: {
861                 struct kvm_s390_ucas_mapping ucasmap;
862
863                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
864                         r = -EFAULT;
865                         break;
866                 }
867
868                 if (!kvm_is_ucontrol(vcpu->kvm)) {
869                         r = -EINVAL;
870                         break;
871                 }
872
873                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
874                                      ucasmap.vcpu_addr, ucasmap.length);
875                 break;
876         }
877         case KVM_S390_UCAS_UNMAP: {
878                 struct kvm_s390_ucas_mapping ucasmap;
879
880                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
881                         r = -EFAULT;
882                         break;
883                 }
884
885                 if (!kvm_is_ucontrol(vcpu->kvm)) {
886                         r = -EINVAL;
887                         break;
888                 }
889
890                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
891                         ucasmap.length);
892                 break;
893         }
894 #endif
895         case KVM_S390_VCPU_FAULT: {
896                 r = gmap_fault(arg, vcpu->arch.gmap);
897                 if (!IS_ERR_VALUE(r))
898                         r = 0;
899                 break;
900         }
901         default:
902                 r = -ENOTTY;
903         }
904         return r;
905 }
906
907 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
908 {
909 #ifdef CONFIG_KVM_S390_UCONTROL
910         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
911                  && (kvm_is_ucontrol(vcpu->kvm))) {
912                 vmf->page = virt_to_page(vcpu->arch.sie_block);
913                 get_page(vmf->page);
914                 return 0;
915         }
916 #endif
917         return VM_FAULT_SIGBUS;
918 }
919
920 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
921                            struct kvm_memory_slot *dont)
922 {
923 }
924
925 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
926 {
927         return 0;
928 }
929
930 /* Section: memory related */
931 int kvm_arch_prepare_memory_region(struct kvm *kvm,
932                                    struct kvm_memory_slot *memslot,
933                                    struct kvm_memory_slot old,
934                                    struct kvm_userspace_memory_region *mem,
935                                    int user_alloc)
936 {
937         /* A few sanity checks. We can have exactly one memory slot which has
938            to start at guest virtual zero and which has to be located at a
939            page boundary in userland and which has to end at a page boundary.
940            The memory in userland is ok to be fragmented into various different
941            vmas. It is okay to mmap() and munmap() stuff in this slot after
942            doing this call at any time */
943
944         if (mem->slot)
945                 return -EINVAL;
946
947         if (mem->guest_phys_addr)
948                 return -EINVAL;
949
950         if (mem->userspace_addr & 0xffffful)
951                 return -EINVAL;
952
953         if (mem->memory_size & 0xffffful)
954                 return -EINVAL;
955
956         if (!user_alloc)
957                 return -EINVAL;
958
959         return 0;
960 }
961
962 void kvm_arch_commit_memory_region(struct kvm *kvm,
963                                 struct kvm_userspace_memory_region *mem,
964                                 struct kvm_memory_slot old,
965                                 int user_alloc)
966 {
967         int rc;
968
969
970         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
971                 mem->guest_phys_addr, mem->memory_size);
972         if (rc)
973                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
974         return;
975 }
976
977 void kvm_arch_flush_shadow_all(struct kvm *kvm)
978 {
979 }
980
981 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
982                                    struct kvm_memory_slot *slot)
983 {
984 }
985
986 static int __init kvm_s390_init(void)
987 {
988         int ret;
989         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
990         if (ret)
991                 return ret;
992
993         /*
994          * guests can ask for up to 255+1 double words, we need a full page
995          * to hold the maximum amount of facilities. On the other hand, we
996          * only set facilities that are known to work in KVM.
997          */
998         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
999         if (!facilities) {
1000                 kvm_exit();
1001                 return -ENOMEM;
1002         }
1003         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1004         facilities[0] &= 0xff00fff3f47c0000ULL;
1005         facilities[1] &= 0x001c000000000000ULL;
1006         return 0;
1007 }
1008
1009 static void __exit kvm_s390_exit(void)
1010 {
1011         free_page((unsigned long) facilities);
1012         kvm_exit();
1013 }
1014
1015 module_init(kvm_s390_init);
1016 module_exit(kvm_s390_exit);