[S390] kvm: handle tprot intercepts
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72         { "diagnose_44", VCPU_STAT(diagnose_44) },
73         { NULL }
74 };
75
76 static unsigned long long *facilities;
77
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
80 {
81         /* every s390 is virtualization enabled ;-) */
82         return 0;
83 }
84
85 void kvm_arch_hardware_disable(void *garbage)
86 {
87 }
88
89 int kvm_arch_hardware_setup(void)
90 {
91         return 0;
92 }
93
94 void kvm_arch_hardware_unsetup(void)
95 {
96 }
97
98 void kvm_arch_check_processor_compat(void *rtn)
99 {
100 }
101
102 int kvm_arch_init(void *opaque)
103 {
104         return 0;
105 }
106
107 void kvm_arch_exit(void)
108 {
109 }
110
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113                         unsigned int ioctl, unsigned long arg)
114 {
115         if (ioctl == KVM_S390_ENABLE_SIE)
116                 return s390_enable_sie();
117         return -EINVAL;
118 }
119
120 int kvm_dev_ioctl_check_extension(long ext)
121 {
122         int r;
123
124         switch (ext) {
125         case KVM_CAP_S390_PSW:
126                 r = 1;
127                 break;
128         default:
129                 r = 0;
130         }
131         return r;
132 }
133
134 /* Section: vm related */
135 /*
136  * Get (and clear) the dirty memory log for a memory slot.
137  */
138 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139                                struct kvm_dirty_log *log)
140 {
141         return 0;
142 }
143
144 long kvm_arch_vm_ioctl(struct file *filp,
145                        unsigned int ioctl, unsigned long arg)
146 {
147         struct kvm *kvm = filp->private_data;
148         void __user *argp = (void __user *)arg;
149         int r;
150
151         switch (ioctl) {
152         case KVM_S390_INTERRUPT: {
153                 struct kvm_s390_interrupt s390int;
154
155                 r = -EFAULT;
156                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
157                         break;
158                 r = kvm_s390_inject_vm(kvm, &s390int);
159                 break;
160         }
161         default:
162                 r = -ENOTTY;
163         }
164
165         return r;
166 }
167
168 int kvm_arch_init_vm(struct kvm *kvm)
169 {
170         int rc;
171         char debug_name[16];
172
173         rc = s390_enable_sie();
174         if (rc)
175                 goto out_err;
176
177         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
178         if (!kvm->arch.sca)
179                 goto out_err;
180
181         sprintf(debug_name, "kvm-%u", current->pid);
182
183         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
184         if (!kvm->arch.dbf)
185                 goto out_nodbf;
186
187         spin_lock_init(&kvm->arch.float_int.lock);
188         INIT_LIST_HEAD(&kvm->arch.float_int.list);
189
190         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191         VM_EVENT(kvm, 3, "%s", "vm created");
192
193         return 0;
194 out_nodbf:
195         free_page((unsigned long)(kvm->arch.sca));
196 out_err:
197         return rc;
198 }
199
200 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
201 {
202         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
203         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
204         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
205                 (__u64) vcpu->arch.sie_block)
206                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
207         smp_mb();
208         free_page((unsigned long)(vcpu->arch.sie_block));
209         kvm_vcpu_uninit(vcpu);
210         kfree(vcpu);
211 }
212
213 static void kvm_free_vcpus(struct kvm *kvm)
214 {
215         unsigned int i;
216         struct kvm_vcpu *vcpu;
217
218         kvm_for_each_vcpu(i, vcpu, kvm)
219                 kvm_arch_vcpu_destroy(vcpu);
220
221         mutex_lock(&kvm->lock);
222         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
223                 kvm->vcpus[i] = NULL;
224
225         atomic_set(&kvm->online_vcpus, 0);
226         mutex_unlock(&kvm->lock);
227 }
228
229 void kvm_arch_sync_events(struct kvm *kvm)
230 {
231 }
232
233 void kvm_arch_destroy_vm(struct kvm *kvm)
234 {
235         kvm_free_vcpus(kvm);
236         free_page((unsigned long)(kvm->arch.sca));
237         debug_unregister(kvm->arch.dbf);
238 }
239
240 /* Section: vcpu related */
241 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242 {
243         return 0;
244 }
245
246 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
247 {
248         /* Nothing todo */
249 }
250
251 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
252 {
253         save_fp_regs(&vcpu->arch.host_fpregs);
254         save_access_regs(vcpu->arch.host_acrs);
255         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
256         restore_fp_regs(&vcpu->arch.guest_fpregs);
257         restore_access_regs(vcpu->arch.guest_acrs);
258 }
259
260 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
261 {
262         save_fp_regs(&vcpu->arch.guest_fpregs);
263         save_access_regs(vcpu->arch.guest_acrs);
264         restore_fp_regs(&vcpu->arch.host_fpregs);
265         restore_access_regs(vcpu->arch.host_acrs);
266 }
267
268 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
269 {
270         /* this equals initial cpu reset in pop, but we don't switch to ESA */
271         vcpu->arch.sie_block->gpsw.mask = 0UL;
272         vcpu->arch.sie_block->gpsw.addr = 0UL;
273         vcpu->arch.sie_block->prefix    = 0UL;
274         vcpu->arch.sie_block->ihcpu     = 0xffff;
275         vcpu->arch.sie_block->cputm     = 0UL;
276         vcpu->arch.sie_block->ckc       = 0UL;
277         vcpu->arch.sie_block->todpr     = 0;
278         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
279         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
280         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
281         vcpu->arch.guest_fpregs.fpc = 0;
282         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
283         vcpu->arch.sie_block->gbea = 1;
284 }
285
286 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
287 {
288         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
289         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
290         vcpu->arch.sie_block->ecb   = 6;
291         vcpu->arch.sie_block->eca   = 0xC1002001U;
292         vcpu->arch.sie_block->fac   = (int) (long) facilities;
293         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
294         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
295                      (unsigned long) vcpu);
296         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
297         get_cpu_id(&vcpu->arch.cpu_id);
298         vcpu->arch.cpu_id.version = 0xff;
299         return 0;
300 }
301
302 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
303                                       unsigned int id)
304 {
305         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
306         int rc = -ENOMEM;
307
308         if (!vcpu)
309                 goto out_nomem;
310
311         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
312                                         get_zeroed_page(GFP_KERNEL);
313
314         if (!vcpu->arch.sie_block)
315                 goto out_free_cpu;
316
317         vcpu->arch.sie_block->icpua = id;
318         BUG_ON(!kvm->arch.sca);
319         if (!kvm->arch.sca->cpu[id].sda)
320                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
321         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
324
325         spin_lock_init(&vcpu->arch.local_int.lock);
326         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
327         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
328         spin_lock(&kvm->arch.float_int.lock);
329         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
330         init_waitqueue_head(&vcpu->arch.local_int.wq);
331         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
332         spin_unlock(&kvm->arch.float_int.lock);
333
334         rc = kvm_vcpu_init(vcpu, kvm, id);
335         if (rc)
336                 goto out_free_sie_block;
337         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
338                  vcpu->arch.sie_block);
339
340         return vcpu;
341 out_free_sie_block:
342         free_page((unsigned long)(vcpu->arch.sie_block));
343 out_free_cpu:
344         kfree(vcpu);
345 out_nomem:
346         return ERR_PTR(rc);
347 }
348
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
350 {
351         /* kvm common code refers to this, but never calls it */
352         BUG();
353         return 0;
354 }
355
356 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
357 {
358         kvm_s390_vcpu_initial_reset(vcpu);
359         return 0;
360 }
361
362 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
363 {
364         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
365         return 0;
366 }
367
368 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
369 {
370         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
371         return 0;
372 }
373
374 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
375                                   struct kvm_sregs *sregs)
376 {
377         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
378         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
379         return 0;
380 }
381
382 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
383                                   struct kvm_sregs *sregs)
384 {
385         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
386         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
387         return 0;
388 }
389
390 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
391 {
392         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
393         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
394         return 0;
395 }
396
397 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398 {
399         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
400         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
401         return 0;
402 }
403
404 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
405 {
406         int rc = 0;
407
408         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
409                 rc = -EBUSY;
410         else {
411                 vcpu->run->psw_mask = psw.mask;
412                 vcpu->run->psw_addr = psw.addr;
413         }
414         return rc;
415 }
416
417 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
418                                   struct kvm_translation *tr)
419 {
420         return -EINVAL; /* not implemented yet */
421 }
422
423 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
424                                         struct kvm_guest_debug *dbg)
425 {
426         return -EINVAL; /* not implemented yet */
427 }
428
429 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
430                                     struct kvm_mp_state *mp_state)
431 {
432         return -EINVAL; /* not implemented yet */
433 }
434
435 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
436                                     struct kvm_mp_state *mp_state)
437 {
438         return -EINVAL; /* not implemented yet */
439 }
440
441 static void __vcpu_run(struct kvm_vcpu *vcpu)
442 {
443         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
444
445         if (need_resched())
446                 schedule();
447
448         if (test_thread_flag(TIF_MCCK_PENDING))
449                 s390_handle_mcck();
450
451         kvm_s390_deliver_pending_interrupts(vcpu);
452
453         vcpu->arch.sie_block->icptcode = 0;
454         local_irq_disable();
455         kvm_guest_enter();
456         local_irq_enable();
457         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
458                    atomic_read(&vcpu->arch.sie_block->cpuflags));
459         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
460                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
461                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
462         }
463         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
464                    vcpu->arch.sie_block->icptcode);
465         local_irq_disable();
466         kvm_guest_exit();
467         local_irq_enable();
468
469         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
470 }
471
472 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
473 {
474         int rc;
475         sigset_t sigsaved;
476
477 rerun_vcpu:
478         if (vcpu->requests)
479                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
480                         kvm_s390_vcpu_set_mem(vcpu);
481
482         /* verify, that memory has been registered */
483         if (!vcpu->arch.sie_block->gmslm) {
484                 vcpu_put(vcpu);
485                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
486                 return -EINVAL;
487         }
488
489         if (vcpu->sigset_active)
490                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
491
492         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
493
494         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
495
496         switch (kvm_run->exit_reason) {
497         case KVM_EXIT_S390_SIEIC:
498         case KVM_EXIT_UNKNOWN:
499         case KVM_EXIT_INTR:
500         case KVM_EXIT_S390_RESET:
501                 break;
502         default:
503                 BUG();
504         }
505
506         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
507         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
508
509         might_fault();
510
511         do {
512                 __vcpu_run(vcpu);
513                 rc = kvm_handle_sie_intercept(vcpu);
514         } while (!signal_pending(current) && !rc);
515
516         if (rc == SIE_INTERCEPT_RERUNVCPU)
517                 goto rerun_vcpu;
518
519         if (signal_pending(current) && !rc) {
520                 kvm_run->exit_reason = KVM_EXIT_INTR;
521                 rc = -EINTR;
522         }
523
524         if (rc == -EOPNOTSUPP) {
525                 /* intercept cannot be handled in-kernel, prepare kvm-run */
526                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
527                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
528                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
529                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
530                 rc = 0;
531         }
532
533         if (rc == -EREMOTE) {
534                 /* intercept was handled, but userspace support is needed
535                  * kvm_run has been prepared by the handler */
536                 rc = 0;
537         }
538
539         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
540         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
541
542         if (vcpu->sigset_active)
543                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
544
545         vcpu->stat.exit_userspace++;
546         return rc;
547 }
548
549 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
550                        unsigned long n, int prefix)
551 {
552         if (prefix)
553                 return copy_to_guest(vcpu, guestdest, from, n);
554         else
555                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
556 }
557
558 /*
559  * store status at address
560  * we use have two special cases:
561  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
562  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
563  */
564 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
565 {
566         const unsigned char archmode = 1;
567         int prefix;
568
569         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
570                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
571                         return -EFAULT;
572                 addr = SAVE_AREA_BASE;
573                 prefix = 0;
574         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
575                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
576                         return -EFAULT;
577                 addr = SAVE_AREA_BASE;
578                 prefix = 1;
579         } else
580                 prefix = 0;
581
582         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
583                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
584                 return -EFAULT;
585
586         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
587                         vcpu->arch.guest_gprs, 128, prefix))
588                 return -EFAULT;
589
590         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
591                         &vcpu->arch.sie_block->gpsw, 16, prefix))
592                 return -EFAULT;
593
594         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
595                         &vcpu->arch.sie_block->prefix, 4, prefix))
596                 return -EFAULT;
597
598         if (__guestcopy(vcpu,
599                         addr + offsetof(struct save_area, fp_ctrl_reg),
600                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
601                 return -EFAULT;
602
603         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
604                         &vcpu->arch.sie_block->todpr, 4, prefix))
605                 return -EFAULT;
606
607         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
608                         &vcpu->arch.sie_block->cputm, 8, prefix))
609                 return -EFAULT;
610
611         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
612                         &vcpu->arch.sie_block->ckc, 8, prefix))
613                 return -EFAULT;
614
615         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
616                         &vcpu->arch.guest_acrs, 64, prefix))
617                 return -EFAULT;
618
619         if (__guestcopy(vcpu,
620                         addr + offsetof(struct save_area, ctrl_regs),
621                         &vcpu->arch.sie_block->gcr, 128, prefix))
622                 return -EFAULT;
623         return 0;
624 }
625
626 long kvm_arch_vcpu_ioctl(struct file *filp,
627                          unsigned int ioctl, unsigned long arg)
628 {
629         struct kvm_vcpu *vcpu = filp->private_data;
630         void __user *argp = (void __user *)arg;
631         long r;
632
633         switch (ioctl) {
634         case KVM_S390_INTERRUPT: {
635                 struct kvm_s390_interrupt s390int;
636
637                 r = -EFAULT;
638                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
639                         break;
640                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
641                 break;
642         }
643         case KVM_S390_STORE_STATUS:
644                 r = kvm_s390_vcpu_store_status(vcpu, arg);
645                 break;
646         case KVM_S390_SET_INITIAL_PSW: {
647                 psw_t psw;
648
649                 r = -EFAULT;
650                 if (copy_from_user(&psw, argp, sizeof(psw)))
651                         break;
652                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
653                 break;
654         }
655         case KVM_S390_INITIAL_RESET:
656                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
657                 break;
658         default:
659                 r = -EINVAL;
660         }
661         return r;
662 }
663
664 /* Section: memory related */
665 int kvm_arch_prepare_memory_region(struct kvm *kvm,
666                                    struct kvm_memory_slot *memslot,
667                                    struct kvm_memory_slot old,
668                                    struct kvm_userspace_memory_region *mem,
669                                    int user_alloc)
670 {
671         /* A few sanity checks. We can have exactly one memory slot which has
672            to start at guest virtual zero and which has to be located at a
673            page boundary in userland and which has to end at a page boundary.
674            The memory in userland is ok to be fragmented into various different
675            vmas. It is okay to mmap() and munmap() stuff in this slot after
676            doing this call at any time */
677
678         if (mem->slot)
679                 return -EINVAL;
680
681         if (mem->guest_phys_addr)
682                 return -EINVAL;
683
684         if (mem->userspace_addr & (PAGE_SIZE - 1))
685                 return -EINVAL;
686
687         if (mem->memory_size & (PAGE_SIZE - 1))
688                 return -EINVAL;
689
690         if (!user_alloc)
691                 return -EINVAL;
692
693         return 0;
694 }
695
696 void kvm_arch_commit_memory_region(struct kvm *kvm,
697                                 struct kvm_userspace_memory_region *mem,
698                                 struct kvm_memory_slot old,
699                                 int user_alloc)
700 {
701         int i;
702         struct kvm_vcpu *vcpu;
703
704         /* request update of sie control block for all available vcpus */
705         kvm_for_each_vcpu(i, vcpu, kvm) {
706                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
707                         continue;
708                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
709         }
710 }
711
712 void kvm_arch_flush_shadow(struct kvm *kvm)
713 {
714 }
715
716 static int __init kvm_s390_init(void)
717 {
718         int ret;
719         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
720         if (ret)
721                 return ret;
722
723         /*
724          * guests can ask for up to 255+1 double words, we need a full page
725          * to hold the maximum amount of facilities. On the other hand, we
726          * only set facilities that are known to work in KVM.
727          */
728         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
729         if (!facilities) {
730                 kvm_exit();
731                 return -ENOMEM;
732         }
733         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
734         facilities[0] &= 0xff00fff3f47c0000ULL;
735         facilities[1] &= 0x201c000000000000ULL;
736         return 0;
737 }
738
739 static void __exit kvm_s390_exit(void)
740 {
741         free_page((unsigned long) facilities);
742         kvm_exit();
743 }
744
745 module_init(kvm_s390_init);
746 module_exit(kvm_s390_exit);