2ada634fc7c8dbc049ac4cc419c885c1b9357d8f
[kernel/linux-3.0.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71         { "diagnose_44", VCPU_STAT(diagnose_44) },
72         { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80         /* every s390 is virtualization enabled ;-) */
81         return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90         return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103         return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112                         unsigned int ioctl, unsigned long arg)
113 {
114         if (ioctl == KVM_S390_ENABLE_SIE)
115                 return s390_enable_sie();
116         return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122
123         switch (ext) {
124         case KVM_CAP_S390_PSW:
125                 r = 1;
126                 break;
127         default:
128                 r = 0;
129         }
130         return r;
131 }
132
133 /* Section: vm related */
134 /*
135  * Get (and clear) the dirty memory log for a memory slot.
136  */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138                                struct kvm_dirty_log *log)
139 {
140         return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144                        unsigned int ioctl, unsigned long arg)
145 {
146         struct kvm *kvm = filp->private_data;
147         void __user *argp = (void __user *)arg;
148         int r;
149
150         switch (ioctl) {
151         case KVM_S390_INTERRUPT: {
152                 struct kvm_s390_interrupt s390int;
153
154                 r = -EFAULT;
155                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156                         break;
157                 r = kvm_s390_inject_vm(kvm, &s390int);
158                 break;
159         }
160         default:
161                 r = -ENOTTY;
162         }
163
164         return r;
165 }
166
167 int kvm_arch_init_vm(struct kvm *kvm)
168 {
169         int rc;
170         char debug_name[16];
171
172         rc = s390_enable_sie();
173         if (rc)
174                 goto out_err;
175
176         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
177         if (!kvm->arch.sca)
178                 goto out_err;
179
180         sprintf(debug_name, "kvm-%u", current->pid);
181
182         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
183         if (!kvm->arch.dbf)
184                 goto out_nodbf;
185
186         spin_lock_init(&kvm->arch.float_int.lock);
187         INIT_LIST_HEAD(&kvm->arch.float_int.list);
188
189         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
190         VM_EVENT(kvm, 3, "%s", "vm created");
191
192         return 0;
193 out_nodbf:
194         free_page((unsigned long)(kvm->arch.sca));
195 out_err:
196         return rc;
197 }
198
199 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200 {
201         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
202         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
203         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
204                 (__u64) vcpu->arch.sie_block)
205                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
206         smp_mb();
207         free_page((unsigned long)(vcpu->arch.sie_block));
208         kvm_vcpu_uninit(vcpu);
209         kfree(vcpu);
210 }
211
212 static void kvm_free_vcpus(struct kvm *kvm)
213 {
214         unsigned int i;
215         struct kvm_vcpu *vcpu;
216
217         kvm_for_each_vcpu(i, vcpu, kvm)
218                 kvm_arch_vcpu_destroy(vcpu);
219
220         mutex_lock(&kvm->lock);
221         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
222                 kvm->vcpus[i] = NULL;
223
224         atomic_set(&kvm->online_vcpus, 0);
225         mutex_unlock(&kvm->lock);
226 }
227
228 void kvm_arch_sync_events(struct kvm *kvm)
229 {
230 }
231
232 void kvm_arch_destroy_vm(struct kvm *kvm)
233 {
234         kvm_free_vcpus(kvm);
235         free_page((unsigned long)(kvm->arch.sca));
236         debug_unregister(kvm->arch.dbf);
237 }
238
239 /* Section: vcpu related */
240 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241 {
242         return 0;
243 }
244
245 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
246 {
247         /* Nothing todo */
248 }
249
250 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
251 {
252         save_fp_regs(&vcpu->arch.host_fpregs);
253         save_access_regs(vcpu->arch.host_acrs);
254         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
255         restore_fp_regs(&vcpu->arch.guest_fpregs);
256         restore_access_regs(vcpu->arch.guest_acrs);
257 }
258
259 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
260 {
261         save_fp_regs(&vcpu->arch.guest_fpregs);
262         save_access_regs(vcpu->arch.guest_acrs);
263         restore_fp_regs(&vcpu->arch.host_fpregs);
264         restore_access_regs(vcpu->arch.host_acrs);
265 }
266
267 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
268 {
269         /* this equals initial cpu reset in pop, but we don't switch to ESA */
270         vcpu->arch.sie_block->gpsw.mask = 0UL;
271         vcpu->arch.sie_block->gpsw.addr = 0UL;
272         vcpu->arch.sie_block->prefix    = 0UL;
273         vcpu->arch.sie_block->ihcpu     = 0xffff;
274         vcpu->arch.sie_block->cputm     = 0UL;
275         vcpu->arch.sie_block->ckc       = 0UL;
276         vcpu->arch.sie_block->todpr     = 0;
277         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
278         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
279         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
280         vcpu->arch.guest_fpregs.fpc = 0;
281         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
282         vcpu->arch.sie_block->gbea = 1;
283 }
284
285 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286 {
287         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
288         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
289         vcpu->arch.sie_block->ecb   = 6;
290         vcpu->arch.sie_block->eca   = 0xC1002001U;
291         vcpu->arch.sie_block->fac   = (int) (long) facilities;
292         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
293         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
294                      (unsigned long) vcpu);
295         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
296         get_cpu_id(&vcpu->arch.cpu_id);
297         vcpu->arch.cpu_id.version = 0xff;
298         return 0;
299 }
300
301 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
302                                       unsigned int id)
303 {
304         struct kvm_vcpu *vcpu;
305         int rc = -EINVAL;
306
307         if (id >= KVM_MAX_VCPUS)
308                 goto out;
309
310         rc = -ENOMEM;
311
312         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
313         if (!vcpu)
314                 goto out;
315
316         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
317                                         get_zeroed_page(GFP_KERNEL);
318
319         if (!vcpu->arch.sie_block)
320                 goto out_free_cpu;
321
322         vcpu->arch.sie_block->icpua = id;
323         BUG_ON(!kvm->arch.sca);
324         if (!kvm->arch.sca->cpu[id].sda)
325                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
326         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
327         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
328         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
329
330         spin_lock_init(&vcpu->arch.local_int.lock);
331         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
332         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
333         spin_lock(&kvm->arch.float_int.lock);
334         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
335         init_waitqueue_head(&vcpu->arch.local_int.wq);
336         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
337         spin_unlock(&kvm->arch.float_int.lock);
338
339         rc = kvm_vcpu_init(vcpu, kvm, id);
340         if (rc)
341                 goto out_free_sie_block;
342         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
343                  vcpu->arch.sie_block);
344
345         return vcpu;
346 out_free_sie_block:
347         free_page((unsigned long)(vcpu->arch.sie_block));
348 out_free_cpu:
349         kfree(vcpu);
350 out:
351         return ERR_PTR(rc);
352 }
353
354 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
355 {
356         /* kvm common code refers to this, but never calls it */
357         BUG();
358         return 0;
359 }
360
361 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
362 {
363         kvm_s390_vcpu_initial_reset(vcpu);
364         return 0;
365 }
366
367 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
368 {
369         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
370         return 0;
371 }
372
373 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
374 {
375         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
376         return 0;
377 }
378
379 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
380                                   struct kvm_sregs *sregs)
381 {
382         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
383         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
384         return 0;
385 }
386
387 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388                                   struct kvm_sregs *sregs)
389 {
390         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
391         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
396 {
397         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
398         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
399         return 0;
400 }
401
402 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
403 {
404         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
405         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
406         return 0;
407 }
408
409 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
410 {
411         int rc = 0;
412
413         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
414                 rc = -EBUSY;
415         else {
416                 vcpu->run->psw_mask = psw.mask;
417                 vcpu->run->psw_addr = psw.addr;
418         }
419         return rc;
420 }
421
422 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
423                                   struct kvm_translation *tr)
424 {
425         return -EINVAL; /* not implemented yet */
426 }
427
428 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
429                                         struct kvm_guest_debug *dbg)
430 {
431         return -EINVAL; /* not implemented yet */
432 }
433
434 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
435                                     struct kvm_mp_state *mp_state)
436 {
437         return -EINVAL; /* not implemented yet */
438 }
439
440 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
441                                     struct kvm_mp_state *mp_state)
442 {
443         return -EINVAL; /* not implemented yet */
444 }
445
446 static void __vcpu_run(struct kvm_vcpu *vcpu)
447 {
448         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
449
450         if (need_resched())
451                 schedule();
452
453         if (test_thread_flag(TIF_MCCK_PENDING))
454                 s390_handle_mcck();
455
456         kvm_s390_deliver_pending_interrupts(vcpu);
457
458         vcpu->arch.sie_block->icptcode = 0;
459         local_irq_disable();
460         kvm_guest_enter();
461         local_irq_enable();
462         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
463                    atomic_read(&vcpu->arch.sie_block->cpuflags));
464         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
465                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
466                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
467         }
468         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
469                    vcpu->arch.sie_block->icptcode);
470         local_irq_disable();
471         kvm_guest_exit();
472         local_irq_enable();
473
474         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
475 }
476
477 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
478 {
479         int rc;
480         sigset_t sigsaved;
481
482 rerun_vcpu:
483         if (vcpu->requests)
484                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
485                         kvm_s390_vcpu_set_mem(vcpu);
486
487         /* verify, that memory has been registered */
488         if (!vcpu->arch.sie_block->gmslm) {
489                 vcpu_put(vcpu);
490                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
491                 return -EINVAL;
492         }
493
494         if (vcpu->sigset_active)
495                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
496
497         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
498
499         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
500
501         switch (kvm_run->exit_reason) {
502         case KVM_EXIT_S390_SIEIC:
503         case KVM_EXIT_UNKNOWN:
504         case KVM_EXIT_INTR:
505         case KVM_EXIT_S390_RESET:
506                 break;
507         default:
508                 BUG();
509         }
510
511         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
512         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
513
514         might_fault();
515
516         do {
517                 __vcpu_run(vcpu);
518                 rc = kvm_handle_sie_intercept(vcpu);
519         } while (!signal_pending(current) && !rc);
520
521         if (rc == SIE_INTERCEPT_RERUNVCPU)
522                 goto rerun_vcpu;
523
524         if (signal_pending(current) && !rc) {
525                 kvm_run->exit_reason = KVM_EXIT_INTR;
526                 rc = -EINTR;
527         }
528
529         if (rc == -EOPNOTSUPP) {
530                 /* intercept cannot be handled in-kernel, prepare kvm-run */
531                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
532                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
533                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
534                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
535                 rc = 0;
536         }
537
538         if (rc == -EREMOTE) {
539                 /* intercept was handled, but userspace support is needed
540                  * kvm_run has been prepared by the handler */
541                 rc = 0;
542         }
543
544         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
545         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
546
547         if (vcpu->sigset_active)
548                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
549
550         vcpu->stat.exit_userspace++;
551         return rc;
552 }
553
554 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
555                        unsigned long n, int prefix)
556 {
557         if (prefix)
558                 return copy_to_guest(vcpu, guestdest, from, n);
559         else
560                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
561 }
562
563 /*
564  * store status at address
565  * we use have two special cases:
566  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
567  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
568  */
569 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
570 {
571         const unsigned char archmode = 1;
572         int prefix;
573
574         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
575                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
576                         return -EFAULT;
577                 addr = SAVE_AREA_BASE;
578                 prefix = 0;
579         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
580                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
581                         return -EFAULT;
582                 addr = SAVE_AREA_BASE;
583                 prefix = 1;
584         } else
585                 prefix = 0;
586
587         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
588                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
589                 return -EFAULT;
590
591         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
592                         vcpu->arch.guest_gprs, 128, prefix))
593                 return -EFAULT;
594
595         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
596                         &vcpu->arch.sie_block->gpsw, 16, prefix))
597                 return -EFAULT;
598
599         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
600                         &vcpu->arch.sie_block->prefix, 4, prefix))
601                 return -EFAULT;
602
603         if (__guestcopy(vcpu,
604                         addr + offsetof(struct save_area, fp_ctrl_reg),
605                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
606                 return -EFAULT;
607
608         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
609                         &vcpu->arch.sie_block->todpr, 4, prefix))
610                 return -EFAULT;
611
612         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
613                         &vcpu->arch.sie_block->cputm, 8, prefix))
614                 return -EFAULT;
615
616         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
617                         &vcpu->arch.sie_block->ckc, 8, prefix))
618                 return -EFAULT;
619
620         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
621                         &vcpu->arch.guest_acrs, 64, prefix))
622                 return -EFAULT;
623
624         if (__guestcopy(vcpu,
625                         addr + offsetof(struct save_area, ctrl_regs),
626                         &vcpu->arch.sie_block->gcr, 128, prefix))
627                 return -EFAULT;
628         return 0;
629 }
630
631 long kvm_arch_vcpu_ioctl(struct file *filp,
632                          unsigned int ioctl, unsigned long arg)
633 {
634         struct kvm_vcpu *vcpu = filp->private_data;
635         void __user *argp = (void __user *)arg;
636         long r;
637
638         switch (ioctl) {
639         case KVM_S390_INTERRUPT: {
640                 struct kvm_s390_interrupt s390int;
641
642                 r = -EFAULT;
643                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
644                         break;
645                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
646                 break;
647         }
648         case KVM_S390_STORE_STATUS:
649                 r = kvm_s390_vcpu_store_status(vcpu, arg);
650                 break;
651         case KVM_S390_SET_INITIAL_PSW: {
652                 psw_t psw;
653
654                 r = -EFAULT;
655                 if (copy_from_user(&psw, argp, sizeof(psw)))
656                         break;
657                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
658                 break;
659         }
660         case KVM_S390_INITIAL_RESET:
661                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
662                 break;
663         default:
664                 r = -EINVAL;
665         }
666         return r;
667 }
668
669 /* Section: memory related */
670 int kvm_arch_prepare_memory_region(struct kvm *kvm,
671                                    struct kvm_memory_slot *memslot,
672                                    struct kvm_memory_slot old,
673                                    struct kvm_userspace_memory_region *mem,
674                                    int user_alloc)
675 {
676         /* A few sanity checks. We can have exactly one memory slot which has
677            to start at guest virtual zero and which has to be located at a
678            page boundary in userland and which has to end at a page boundary.
679            The memory in userland is ok to be fragmented into various different
680            vmas. It is okay to mmap() and munmap() stuff in this slot after
681            doing this call at any time */
682
683         if (mem->slot)
684                 return -EINVAL;
685
686         if (mem->guest_phys_addr)
687                 return -EINVAL;
688
689         if (mem->userspace_addr & (PAGE_SIZE - 1))
690                 return -EINVAL;
691
692         if (mem->memory_size & (PAGE_SIZE - 1))
693                 return -EINVAL;
694
695         if (!user_alloc)
696                 return -EINVAL;
697
698         return 0;
699 }
700
701 void kvm_arch_commit_memory_region(struct kvm *kvm,
702                                 struct kvm_userspace_memory_region *mem,
703                                 struct kvm_memory_slot old,
704                                 int user_alloc)
705 {
706         int i;
707         struct kvm_vcpu *vcpu;
708
709         /* request update of sie control block for all available vcpus */
710         kvm_for_each_vcpu(i, vcpu, kvm) {
711                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
712                         continue;
713                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
714         }
715 }
716
717 void kvm_arch_flush_shadow(struct kvm *kvm)
718 {
719 }
720
721 static int __init kvm_s390_init(void)
722 {
723         int ret;
724         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
725         if (ret)
726                 return ret;
727
728         /*
729          * guests can ask for up to 255+1 double words, we need a full page
730          * to hold the maximum amount of facilities. On the other hand, we
731          * only set facilities that are known to work in KVM.
732          */
733         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
734         if (!facilities) {
735                 kvm_exit();
736                 return -ENOMEM;
737         }
738         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
739         facilities[0] &= 0xff00fff3f47c0000ULL;
740         facilities[1] &= 0x201c000000000000ULL;
741         return 0;
742 }
743
744 static void __exit kvm_s390_exit(void)
745 {
746         free_page((unsigned long) facilities);
747         kvm_exit();
748 }
749
750 module_init(kvm_s390_init);
751 module_exit(kvm_s390_exit);