1e88eef152e0e072d4938f381488493888704ae7
[kernel/linux-3.0.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71         { "diagnose_44", VCPU_STAT(diagnose_44) },
72         { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80         /* every s390 is virtualization enabled ;-) */
81         return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90         return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103         return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112                         unsigned int ioctl, unsigned long arg)
113 {
114         if (ioctl == KVM_S390_ENABLE_SIE)
115                 return s390_enable_sie();
116         return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122
123         switch (ext) {
124         case KVM_CAP_S390_PSW:
125                 r = 1;
126                 break;
127         default:
128                 r = 0;
129         }
130         return r;
131 }
132
133 /* Section: vm related */
134 /*
135  * Get (and clear) the dirty memory log for a memory slot.
136  */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138                                struct kvm_dirty_log *log)
139 {
140         return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144                        unsigned int ioctl, unsigned long arg)
145 {
146         struct kvm *kvm = filp->private_data;
147         void __user *argp = (void __user *)arg;
148         int r;
149
150         switch (ioctl) {
151         case KVM_S390_INTERRUPT: {
152                 struct kvm_s390_interrupt s390int;
153
154                 r = -EFAULT;
155                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156                         break;
157                 r = kvm_s390_inject_vm(kvm, &s390int);
158                 break;
159         }
160         default:
161                 r = -ENOTTY;
162         }
163
164         return r;
165 }
166
167 int kvm_arch_init_vm(struct kvm *kvm)
168 {
169         int rc;
170         char debug_name[16];
171
172         rc = s390_enable_sie();
173         if (rc)
174                 goto out_err;
175
176         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
177         if (!kvm->arch.sca)
178                 goto out_err;
179
180         sprintf(debug_name, "kvm-%u", current->pid);
181
182         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
183         if (!kvm->arch.dbf)
184                 goto out_nodbf;
185
186         spin_lock_init(&kvm->arch.float_int.lock);
187         INIT_LIST_HEAD(&kvm->arch.float_int.list);
188
189         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
190         VM_EVENT(kvm, 3, "%s", "vm created");
191
192         return 0;
193 out_nodbf:
194         free_page((unsigned long)(kvm->arch.sca));
195 out_err:
196         return rc;
197 }
198
199 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200 {
201         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
202         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
203         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
204                 (__u64) vcpu->arch.sie_block)
205                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
206         smp_mb();
207         free_page((unsigned long)(vcpu->arch.sie_block));
208         kvm_vcpu_uninit(vcpu);
209         kfree(vcpu);
210 }
211
212 static void kvm_free_vcpus(struct kvm *kvm)
213 {
214         unsigned int i;
215         struct kvm_vcpu *vcpu;
216
217         kvm_for_each_vcpu(i, vcpu, kvm)
218                 kvm_arch_vcpu_destroy(vcpu);
219
220         mutex_lock(&kvm->lock);
221         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
222                 kvm->vcpus[i] = NULL;
223
224         atomic_set(&kvm->online_vcpus, 0);
225         mutex_unlock(&kvm->lock);
226 }
227
228 void kvm_arch_sync_events(struct kvm *kvm)
229 {
230 }
231
232 void kvm_arch_destroy_vm(struct kvm *kvm)
233 {
234         kvm_free_vcpus(kvm);
235         free_page((unsigned long)(kvm->arch.sca));
236         debug_unregister(kvm->arch.dbf);
237 }
238
239 /* Section: vcpu related */
240 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241 {
242         return 0;
243 }
244
245 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
246 {
247         /* Nothing todo */
248 }
249
250 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
251 {
252         save_fp_regs(&vcpu->arch.host_fpregs);
253         save_access_regs(vcpu->arch.host_acrs);
254         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
255         restore_fp_regs(&vcpu->arch.guest_fpregs);
256         restore_access_regs(vcpu->arch.guest_acrs);
257 }
258
259 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
260 {
261         save_fp_regs(&vcpu->arch.guest_fpregs);
262         save_access_regs(vcpu->arch.guest_acrs);
263         restore_fp_regs(&vcpu->arch.host_fpregs);
264         restore_access_regs(vcpu->arch.host_acrs);
265 }
266
267 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
268 {
269         /* this equals initial cpu reset in pop, but we don't switch to ESA */
270         vcpu->arch.sie_block->gpsw.mask = 0UL;
271         vcpu->arch.sie_block->gpsw.addr = 0UL;
272         vcpu->arch.sie_block->prefix    = 0UL;
273         vcpu->arch.sie_block->ihcpu     = 0xffff;
274         vcpu->arch.sie_block->cputm     = 0UL;
275         vcpu->arch.sie_block->ckc       = 0UL;
276         vcpu->arch.sie_block->todpr     = 0;
277         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
278         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
279         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
280         vcpu->arch.guest_fpregs.fpc = 0;
281         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
282         vcpu->arch.sie_block->gbea = 1;
283 }
284
285 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286 {
287         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
288         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
289         vcpu->arch.sie_block->ecb   = 6;
290         vcpu->arch.sie_block->eca   = 0xC1002001U;
291         vcpu->arch.sie_block->fac   = (int) (long) facilities;
292         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
293         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
294                      (unsigned long) vcpu);
295         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
296         get_cpu_id(&vcpu->arch.cpu_id);
297         vcpu->arch.cpu_id.version = 0xff;
298         return 0;
299 }
300
301 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
302                                       unsigned int id)
303 {
304         struct kvm_vcpu *vcpu;
305         int rc = -EINVAL;
306
307         if (id >= KVM_MAX_VCPUS)
308                 goto out;
309
310         rc = -ENOMEM;
311
312         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
313         if (!vcpu)
314                 goto out;
315
316         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
317                                         get_zeroed_page(GFP_KERNEL);
318
319         if (!vcpu->arch.sie_block)
320                 goto out_free_cpu;
321
322         vcpu->arch.sie_block->icpua = id;
323         BUG_ON(!kvm->arch.sca);
324         if (!kvm->arch.sca->cpu[id].sda)
325                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
326         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
327         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
328         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
329
330         spin_lock_init(&vcpu->arch.local_int.lock);
331         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
332         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
333         spin_lock(&kvm->arch.float_int.lock);
334         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
335         init_waitqueue_head(&vcpu->arch.local_int.wq);
336         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
337         spin_unlock(&kvm->arch.float_int.lock);
338
339         rc = kvm_vcpu_init(vcpu, kvm, id);
340         if (rc)
341                 goto out_free_sie_block;
342         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
343                  vcpu->arch.sie_block);
344
345         return vcpu;
346 out_free_sie_block:
347         free_page((unsigned long)(vcpu->arch.sie_block));
348 out_free_cpu:
349         kfree(vcpu);
350 out:
351         return ERR_PTR(rc);
352 }
353
354 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
355 {
356         /* kvm common code refers to this, but never calls it */
357         BUG();
358         return 0;
359 }
360
361 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
362 {
363         kvm_s390_vcpu_initial_reset(vcpu);
364         return 0;
365 }
366
367 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
368 {
369         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
370         return 0;
371 }
372
373 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
374 {
375         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
376         return 0;
377 }
378
379 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
380                                   struct kvm_sregs *sregs)
381 {
382         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
383         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
384         return 0;
385 }
386
387 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388                                   struct kvm_sregs *sregs)
389 {
390         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
391         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
396 {
397         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
398         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
399         return 0;
400 }
401
402 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
403 {
404         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
405         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
406         return 0;
407 }
408
409 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
410 {
411         int rc = 0;
412
413         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
414                 rc = -EBUSY;
415         else {
416                 vcpu->run->psw_mask = psw.mask;
417                 vcpu->run->psw_addr = psw.addr;
418         }
419         return rc;
420 }
421
422 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
423                                   struct kvm_translation *tr)
424 {
425         return -EINVAL; /* not implemented yet */
426 }
427
428 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
429                                         struct kvm_guest_debug *dbg)
430 {
431         return -EINVAL; /* not implemented yet */
432 }
433
434 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
435                                     struct kvm_mp_state *mp_state)
436 {
437         return -EINVAL; /* not implemented yet */
438 }
439
440 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
441                                     struct kvm_mp_state *mp_state)
442 {
443         return -EINVAL; /* not implemented yet */
444 }
445
446 static void __vcpu_run(struct kvm_vcpu *vcpu)
447 {
448         int rc;
449
450         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
451
452         if (need_resched())
453                 schedule();
454
455         if (test_thread_flag(TIF_MCCK_PENDING))
456                 s390_handle_mcck();
457
458         kvm_s390_deliver_pending_interrupts(vcpu);
459
460         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
461                    atomic_read(&vcpu->arch.sie_block->cpuflags));
462
463         vcpu->arch.sie_block->icptcode = 0;
464         local_irq_disable();
465         kvm_guest_enter();
466         local_irq_enable();
467         rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
468         local_irq_disable();
469         kvm_guest_exit();
470         local_irq_enable();
471
472         if (rc) {
473                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
474                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
475         }
476         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
477                    vcpu->arch.sie_block->icptcode);
478
479         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480 }
481
482 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483 {
484         int rc;
485         sigset_t sigsaved;
486
487 rerun_vcpu:
488         if (vcpu->requests)
489                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
490                         kvm_s390_vcpu_set_mem(vcpu);
491
492         /* verify, that memory has been registered */
493         if (!vcpu->arch.sie_block->gmslm) {
494                 vcpu_put(vcpu);
495                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
496                 return -EINVAL;
497         }
498
499         if (vcpu->sigset_active)
500                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
501
502         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
503
504         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
505
506         switch (kvm_run->exit_reason) {
507         case KVM_EXIT_S390_SIEIC:
508         case KVM_EXIT_UNKNOWN:
509         case KVM_EXIT_INTR:
510         case KVM_EXIT_S390_RESET:
511                 break;
512         default:
513                 BUG();
514         }
515
516         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
517         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
518
519         might_fault();
520
521         do {
522                 __vcpu_run(vcpu);
523                 rc = kvm_handle_sie_intercept(vcpu);
524         } while (!signal_pending(current) && !rc);
525
526         if (rc == SIE_INTERCEPT_RERUNVCPU)
527                 goto rerun_vcpu;
528
529         if (signal_pending(current) && !rc) {
530                 kvm_run->exit_reason = KVM_EXIT_INTR;
531                 rc = -EINTR;
532         }
533
534         if (rc == -EOPNOTSUPP) {
535                 /* intercept cannot be handled in-kernel, prepare kvm-run */
536                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
537                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
538                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
539                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
540                 rc = 0;
541         }
542
543         if (rc == -EREMOTE) {
544                 /* intercept was handled, but userspace support is needed
545                  * kvm_run has been prepared by the handler */
546                 rc = 0;
547         }
548
549         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
550         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
551
552         if (vcpu->sigset_active)
553                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
554
555         vcpu->stat.exit_userspace++;
556         return rc;
557 }
558
559 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
560                        unsigned long n, int prefix)
561 {
562         if (prefix)
563                 return copy_to_guest(vcpu, guestdest, from, n);
564         else
565                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
566 }
567
568 /*
569  * store status at address
570  * we use have two special cases:
571  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
572  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
573  */
574 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
575 {
576         const unsigned char archmode = 1;
577         int prefix;
578
579         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
580                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
581                         return -EFAULT;
582                 addr = SAVE_AREA_BASE;
583                 prefix = 0;
584         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
585                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
586                         return -EFAULT;
587                 addr = SAVE_AREA_BASE;
588                 prefix = 1;
589         } else
590                 prefix = 0;
591
592         /*
593          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
594          * copying in vcpu load/put. Lets update our copies before we save
595          * it into the save area
596          */
597         save_fp_regs(&vcpu->arch.guest_fpregs);
598         save_access_regs(vcpu->arch.guest_acrs);
599
600         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
601                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
602                 return -EFAULT;
603
604         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
605                         vcpu->arch.guest_gprs, 128, prefix))
606                 return -EFAULT;
607
608         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
609                         &vcpu->arch.sie_block->gpsw, 16, prefix))
610                 return -EFAULT;
611
612         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
613                         &vcpu->arch.sie_block->prefix, 4, prefix))
614                 return -EFAULT;
615
616         if (__guestcopy(vcpu,
617                         addr + offsetof(struct save_area, fp_ctrl_reg),
618                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
619                 return -EFAULT;
620
621         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
622                         &vcpu->arch.sie_block->todpr, 4, prefix))
623                 return -EFAULT;
624
625         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
626                         &vcpu->arch.sie_block->cputm, 8, prefix))
627                 return -EFAULT;
628
629         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
630                         &vcpu->arch.sie_block->ckc, 8, prefix))
631                 return -EFAULT;
632
633         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
634                         &vcpu->arch.guest_acrs, 64, prefix))
635                 return -EFAULT;
636
637         if (__guestcopy(vcpu,
638                         addr + offsetof(struct save_area, ctrl_regs),
639                         &vcpu->arch.sie_block->gcr, 128, prefix))
640                 return -EFAULT;
641         return 0;
642 }
643
644 long kvm_arch_vcpu_ioctl(struct file *filp,
645                          unsigned int ioctl, unsigned long arg)
646 {
647         struct kvm_vcpu *vcpu = filp->private_data;
648         void __user *argp = (void __user *)arg;
649         long r;
650
651         switch (ioctl) {
652         case KVM_S390_INTERRUPT: {
653                 struct kvm_s390_interrupt s390int;
654
655                 r = -EFAULT;
656                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
657                         break;
658                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
659                 break;
660         }
661         case KVM_S390_STORE_STATUS:
662                 r = kvm_s390_vcpu_store_status(vcpu, arg);
663                 break;
664         case KVM_S390_SET_INITIAL_PSW: {
665                 psw_t psw;
666
667                 r = -EFAULT;
668                 if (copy_from_user(&psw, argp, sizeof(psw)))
669                         break;
670                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
671                 break;
672         }
673         case KVM_S390_INITIAL_RESET:
674                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675                 break;
676         default:
677                 r = -EINVAL;
678         }
679         return r;
680 }
681
682 /* Section: memory related */
683 int kvm_arch_prepare_memory_region(struct kvm *kvm,
684                                    struct kvm_memory_slot *memslot,
685                                    struct kvm_memory_slot old,
686                                    struct kvm_userspace_memory_region *mem,
687                                    int user_alloc)
688 {
689         /* A few sanity checks. We can have exactly one memory slot which has
690            to start at guest virtual zero and which has to be located at a
691            page boundary in userland and which has to end at a page boundary.
692            The memory in userland is ok to be fragmented into various different
693            vmas. It is okay to mmap() and munmap() stuff in this slot after
694            doing this call at any time */
695
696         if (mem->slot)
697                 return -EINVAL;
698
699         if (mem->guest_phys_addr)
700                 return -EINVAL;
701
702         if (mem->userspace_addr & (PAGE_SIZE - 1))
703                 return -EINVAL;
704
705         if (mem->memory_size & (PAGE_SIZE - 1))
706                 return -EINVAL;
707
708         if (!user_alloc)
709                 return -EINVAL;
710
711         return 0;
712 }
713
714 void kvm_arch_commit_memory_region(struct kvm *kvm,
715                                 struct kvm_userspace_memory_region *mem,
716                                 struct kvm_memory_slot old,
717                                 int user_alloc)
718 {
719         int i;
720         struct kvm_vcpu *vcpu;
721
722         /* request update of sie control block for all available vcpus */
723         kvm_for_each_vcpu(i, vcpu, kvm) {
724                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
725                         continue;
726                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
727         }
728 }
729
730 void kvm_arch_flush_shadow(struct kvm *kvm)
731 {
732 }
733
734 static int __init kvm_s390_init(void)
735 {
736         int ret;
737         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
738         if (ret)
739                 return ret;
740
741         /*
742          * guests can ask for up to 255+1 double words, we need a full page
743          * to hold the maximum amount of facilities. On the other hand, we
744          * only set facilities that are known to work in KVM.
745          */
746         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
747         if (!facilities) {
748                 kvm_exit();
749                 return -ENOMEM;
750         }
751         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
752         facilities[0] &= 0xff00fff3f47c0000ULL;
753         facilities[1] &= 0x201c000000000000ULL;
754         return 0;
755 }
756
757 static void __exit kvm_s390_exit(void)
758 {
759         free_page((unsigned long) facilities);
760         kvm_exit();
761 }
762
763 module_init(kvm_s390_init);
764 module_exit(kvm_s390_exit);