Correct .gbs.conf settings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
29 #include "gaccess.h"
30 #include "kvm-s390.h"
31 #include "trace.h"
32
33 /* Handle SCK (SET CLOCK) interception */
34 static int handle_set_clock(struct kvm_vcpu *vcpu)
35 {
36         struct kvm_vcpu *cpup;
37         s64 hostclk, val;
38         u64 op2;
39         int i;
40
41         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44         op2 = kvm_s390_get_base_disp_s(vcpu);
45         if (op2 & 7)    /* Operand must be on a doubleword boundary */
46                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47         if (get_guest(vcpu, val, (u64 __user *) op2))
48                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
49
50         if (store_tod_clock(&hostclk)) {
51                 kvm_s390_set_psw_cc(vcpu, 3);
52                 return 0;
53         }
54         val = (val - hostclk) & ~0x3fUL;
55
56         mutex_lock(&vcpu->kvm->lock);
57         kvm_for_each_vcpu(i, cpup, vcpu->kvm)
58                 cpup->arch.sie_block->epoch = val;
59         mutex_unlock(&vcpu->kvm->lock);
60
61         kvm_s390_set_psw_cc(vcpu, 0);
62         return 0;
63 }
64
65 static int handle_set_prefix(struct kvm_vcpu *vcpu)
66 {
67         u64 operand2;
68         u32 address = 0;
69         u8 tmp;
70
71         vcpu->stat.instruction_spx++;
72
73         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
74                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
75
76         operand2 = kvm_s390_get_base_disp_s(vcpu);
77
78         /* must be word boundary */
79         if (operand2 & 3)
80                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81
82         /* get the value */
83         if (get_guest(vcpu, address, (u32 __user *) operand2))
84                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
85
86         address = address & 0x7fffe000u;
87
88         /* make sure that the new value is valid memory */
89         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
90            (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
91                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
92
93         kvm_s390_set_prefix(vcpu, address);
94
95         VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
96         trace_kvm_s390_handle_prefix(vcpu, 1, address);
97         return 0;
98 }
99
100 static int handle_store_prefix(struct kvm_vcpu *vcpu)
101 {
102         u64 operand2;
103         u32 address;
104
105         vcpu->stat.instruction_stpx++;
106
107         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
108                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
109
110         operand2 = kvm_s390_get_base_disp_s(vcpu);
111
112         /* must be word boundary */
113         if (operand2 & 3)
114                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
115
116         address = vcpu->arch.sie_block->prefix;
117         address = address & 0x7fffe000u;
118
119         /* get the value */
120         if (put_guest(vcpu, address, (u32 __user *)operand2))
121                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
122
123         VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
124         trace_kvm_s390_handle_prefix(vcpu, 0, address);
125         return 0;
126 }
127
128 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
129 {
130         u64 useraddr;
131
132         vcpu->stat.instruction_stap++;
133
134         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
135                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
136
137         useraddr = kvm_s390_get_base_disp_s(vcpu);
138
139         if (useraddr & 1)
140                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
141
142         if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
143                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
144
145         VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
146         trace_kvm_s390_handle_stap(vcpu, useraddr);
147         return 0;
148 }
149
150 static int handle_skey(struct kvm_vcpu *vcpu)
151 {
152         vcpu->stat.instruction_storage_key++;
153
154         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
155                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
156
157         vcpu->arch.sie_block->gpsw.addr =
158                 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
159         VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
160         return 0;
161 }
162
163 static int handle_test_block(struct kvm_vcpu *vcpu)
164 {
165         unsigned long hva;
166         gpa_t addr;
167         int reg2;
168
169         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
170                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
171
172         kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
173         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
174         addr = kvm_s390_real_to_abs(vcpu, addr);
175
176         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
177         if (kvm_is_error_hva(hva))
178                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
179         /*
180          * We don't expect errors on modern systems, and do not care
181          * about storage keys (yet), so let's just clear the page.
182          */
183         if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
184                 return -EFAULT;
185         kvm_s390_set_psw_cc(vcpu, 0);
186         vcpu->run->s.regs.gprs[0] = 0;
187         return 0;
188 }
189
190 static int handle_tpi(struct kvm_vcpu *vcpu)
191 {
192         struct kvm_s390_interrupt_info *inti;
193         u64 addr;
194         int cc;
195
196         addr = kvm_s390_get_base_disp_s(vcpu);
197         if (addr & 3)
198                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
199         cc = 0;
200         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
201         if (!inti)
202                 goto no_interrupt;
203         cc = 1;
204         if (addr) {
205                 /*
206                  * Store the two-word I/O interruption code into the
207                  * provided area.
208                  */
209                 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
210                     || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
211                     || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
212                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
213         } else {
214                 /*
215                  * Store the three-word I/O interruption code into
216                  * the appropriate lowcore area.
217                  */
218                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
219                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
220                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
221                 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
222         }
223         kfree(inti);
224 no_interrupt:
225         /* Set condition code and we're done. */
226         kvm_s390_set_psw_cc(vcpu, cc);
227         return 0;
228 }
229
230 static int handle_tsch(struct kvm_vcpu *vcpu)
231 {
232         struct kvm_s390_interrupt_info *inti;
233
234         inti = kvm_s390_get_io_int(vcpu->kvm, 0,
235                                    vcpu->run->s.regs.gprs[1]);
236
237         /*
238          * Prepare exit to userspace.
239          * We indicate whether we dequeued a pending I/O interrupt
240          * so that userspace can re-inject it if the instruction gets
241          * a program check. While this may re-order the pending I/O
242          * interrupts, this is no problem since the priority is kept
243          * intact.
244          */
245         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
246         vcpu->run->s390_tsch.dequeued = !!inti;
247         if (inti) {
248                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
249                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
250                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
251                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
252         }
253         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
254         kfree(inti);
255         return -EREMOTE;
256 }
257
258 static int handle_io_inst(struct kvm_vcpu *vcpu)
259 {
260         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
261
262         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
263                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
264
265         if (vcpu->kvm->arch.css_support) {
266                 /*
267                  * Most I/O instructions will be handled by userspace.
268                  * Exceptions are tpi and the interrupt portion of tsch.
269                  */
270                 if (vcpu->arch.sie_block->ipa == 0xb236)
271                         return handle_tpi(vcpu);
272                 if (vcpu->arch.sie_block->ipa == 0xb235)
273                         return handle_tsch(vcpu);
274                 /* Handle in userspace. */
275                 return -EOPNOTSUPP;
276         } else {
277                 /*
278                  * Set condition code 3 to stop the guest from issuing channel
279                  * I/O instructions.
280                  */
281                 kvm_s390_set_psw_cc(vcpu, 3);
282                 return 0;
283         }
284 }
285
286 static int handle_stfl(struct kvm_vcpu *vcpu)
287 {
288         int rc;
289
290         vcpu->stat.instruction_stfl++;
291
292         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
293                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
294
295         rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
296                            vfacilities, 4);
297         if (rc)
298                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
299         VCPU_EVENT(vcpu, 5, "store facility list value %x",
300                    *(unsigned int *) vfacilities);
301         trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
302         return 0;
303 }
304
305 static void handle_new_psw(struct kvm_vcpu *vcpu)
306 {
307         /* Check whether the new psw is enabled for machine checks. */
308         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
309                 kvm_s390_deliver_pending_machine_checks(vcpu);
310 }
311
312 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
313 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
314 #define PSW_ADDR_24 0x0000000000ffffffUL
315 #define PSW_ADDR_31 0x000000007fffffffUL
316
317 static int is_valid_psw(psw_t *psw) {
318         if (psw->mask & PSW_MASK_UNASSIGNED)
319                 return 0;
320         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
321                 if (psw->addr & ~PSW_ADDR_31)
322                         return 0;
323         }
324         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
325                 return 0;
326         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
327                 return 0;
328         return 1;
329 }
330
331 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
332 {
333         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
334         psw_compat_t new_psw;
335         u64 addr;
336
337         if (gpsw->mask & PSW_MASK_PSTATE)
338                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
339
340         addr = kvm_s390_get_base_disp_s(vcpu);
341         if (addr & 7)
342                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
343         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
344                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
345         if (!(new_psw.mask & PSW32_MASK_BASE))
346                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
347         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
348         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
349         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
350         if (!is_valid_psw(gpsw))
351                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
352         handle_new_psw(vcpu);
353         return 0;
354 }
355
356 static int handle_lpswe(struct kvm_vcpu *vcpu)
357 {
358         psw_t new_psw;
359         u64 addr;
360
361         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
362                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
363
364         addr = kvm_s390_get_base_disp_s(vcpu);
365         if (addr & 7)
366                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
367         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
368                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
369         vcpu->arch.sie_block->gpsw = new_psw;
370         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
371                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
372         handle_new_psw(vcpu);
373         return 0;
374 }
375
376 static int handle_stidp(struct kvm_vcpu *vcpu)
377 {
378         u64 operand2;
379
380         vcpu->stat.instruction_stidp++;
381
382         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
383                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
384
385         operand2 = kvm_s390_get_base_disp_s(vcpu);
386
387         if (operand2 & 7)
388                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
389
390         if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
391                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
392
393         VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
394         return 0;
395 }
396
397 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
398 {
399         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
400         int cpus = 0;
401         int n;
402
403         spin_lock(&fi->lock);
404         for (n = 0; n < KVM_MAX_VCPUS; n++)
405                 if (fi->local_int[n])
406                         cpus++;
407         spin_unlock(&fi->lock);
408
409         /* deal with other level 3 hypervisors */
410         if (stsi(mem, 3, 2, 2))
411                 mem->count = 0;
412         if (mem->count < 8)
413                 mem->count++;
414         for (n = mem->count - 1; n > 0 ; n--)
415                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
416
417         mem->vm[0].cpus_total = cpus;
418         mem->vm[0].cpus_configured = cpus;
419         mem->vm[0].cpus_standby = 0;
420         mem->vm[0].cpus_reserved = 0;
421         mem->vm[0].caf = 1000;
422         memcpy(mem->vm[0].name, "KVMguest", 8);
423         ASCEBC(mem->vm[0].name, 8);
424         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
425         ASCEBC(mem->vm[0].cpi, 16);
426 }
427
428 static int handle_stsi(struct kvm_vcpu *vcpu)
429 {
430         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
431         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
432         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
433         unsigned long mem = 0;
434         u64 operand2;
435         int rc = 0;
436
437         vcpu->stat.instruction_stsi++;
438         VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
439
440         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
441                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
442
443         if (fc > 3) {
444                 kvm_s390_set_psw_cc(vcpu, 3);
445                 return 0;
446         }
447
448         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
449             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
450                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
451
452         if (fc == 0) {
453                 vcpu->run->s.regs.gprs[0] = 3 << 28;
454                 kvm_s390_set_psw_cc(vcpu, 0);
455                 return 0;
456         }
457
458         operand2 = kvm_s390_get_base_disp_s(vcpu);
459
460         if (operand2 & 0xfff)
461                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
462
463         switch (fc) {
464         case 1: /* same handling for 1 and 2 */
465         case 2:
466                 mem = get_zeroed_page(GFP_KERNEL);
467                 if (!mem)
468                         goto out_no_data;
469                 if (stsi((void *) mem, fc, sel1, sel2))
470                         goto out_no_data;
471                 break;
472         case 3:
473                 if (sel1 != 2 || sel2 != 2)
474                         goto out_no_data;
475                 mem = get_zeroed_page(GFP_KERNEL);
476                 if (!mem)
477                         goto out_no_data;
478                 handle_stsi_3_2_2(vcpu, (void *) mem);
479                 break;
480         }
481
482         if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
483                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
484                 goto out_exception;
485         }
486         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
487         free_page(mem);
488         kvm_s390_set_psw_cc(vcpu, 0);
489         vcpu->run->s.regs.gprs[0] = 0;
490         return 0;
491 out_no_data:
492         kvm_s390_set_psw_cc(vcpu, 3);
493 out_exception:
494         free_page(mem);
495         return rc;
496 }
497
498 static const intercept_handler_t b2_handlers[256] = {
499         [0x02] = handle_stidp,
500         [0x04] = handle_set_clock,
501         [0x10] = handle_set_prefix,
502         [0x11] = handle_store_prefix,
503         [0x12] = handle_store_cpu_address,
504         [0x29] = handle_skey,
505         [0x2a] = handle_skey,
506         [0x2b] = handle_skey,
507         [0x2c] = handle_test_block,
508         [0x30] = handle_io_inst,
509         [0x31] = handle_io_inst,
510         [0x32] = handle_io_inst,
511         [0x33] = handle_io_inst,
512         [0x34] = handle_io_inst,
513         [0x35] = handle_io_inst,
514         [0x36] = handle_io_inst,
515         [0x37] = handle_io_inst,
516         [0x38] = handle_io_inst,
517         [0x39] = handle_io_inst,
518         [0x3a] = handle_io_inst,
519         [0x3b] = handle_io_inst,
520         [0x3c] = handle_io_inst,
521         [0x5f] = handle_io_inst,
522         [0x74] = handle_io_inst,
523         [0x76] = handle_io_inst,
524         [0x7d] = handle_stsi,
525         [0xb1] = handle_stfl,
526         [0xb2] = handle_lpswe,
527 };
528
529 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
530 {
531         intercept_handler_t handler;
532
533         /*
534          * A lot of B2 instructions are priviledged. Here we check for
535          * the privileged ones, that we can handle in the kernel.
536          * Anything else goes to userspace.
537          */
538         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
539         if (handler)
540                 return handler(vcpu);
541
542         return -EOPNOTSUPP;
543 }
544
545 static int handle_epsw(struct kvm_vcpu *vcpu)
546 {
547         int reg1, reg2;
548
549         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
550
551         /* This basically extracts the mask half of the psw. */
552         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
553         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
554         if (reg2) {
555                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
556                 vcpu->run->s.regs.gprs[reg2] |=
557                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
558         }
559         return 0;
560 }
561
562 #define PFMF_RESERVED   0xfffc0101UL
563 #define PFMF_SK         0x00020000UL
564 #define PFMF_CF         0x00010000UL
565 #define PFMF_UI         0x00008000UL
566 #define PFMF_FSC        0x00007000UL
567 #define PFMF_NQ         0x00000800UL
568 #define PFMF_MR         0x00000400UL
569 #define PFMF_MC         0x00000200UL
570 #define PFMF_KEY        0x000000feUL
571
572 static int handle_pfmf(struct kvm_vcpu *vcpu)
573 {
574         int reg1, reg2;
575         unsigned long start, end;
576
577         vcpu->stat.instruction_pfmf++;
578
579         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
580
581         if (!MACHINE_HAS_PFMF)
582                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
583
584         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
585                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
586
587         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
588                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
589
590         /* Only provide non-quiescing support if the host supports it */
591         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
592                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
593
594         /* No support for conditional-SSKE */
595         if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
596                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
597
598         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
599         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
600         case 0x00000000:
601                 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
602                 break;
603         case 0x00001000:
604                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
605                 break;
606         /* We dont support EDAT2
607         case 0x00002000:
608                 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
609                 break;*/
610         default:
611                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
612         }
613         while (start < end) {
614                 unsigned long useraddr;
615
616                 useraddr = gmap_translate(start, vcpu->arch.gmap);
617                 if (IS_ERR((void *)useraddr))
618                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
619
620                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
621                         if (clear_user((void __user *)useraddr, PAGE_SIZE))
622                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
623                 }
624
625                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
626                         if (set_guest_storage_key(current->mm, useraddr,
627                                         vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
628                                         vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
629                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
630                 }
631
632                 start += PAGE_SIZE;
633         }
634         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
635                 vcpu->run->s.regs.gprs[reg2] = end;
636         return 0;
637 }
638
639 static const intercept_handler_t b9_handlers[256] = {
640         [0x8d] = handle_epsw,
641         [0xaf] = handle_pfmf,
642 };
643
644 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
645 {
646         intercept_handler_t handler;
647
648         /* This is handled just as for the B2 instructions. */
649         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
650         if (handler)
651                 return handler(vcpu);
652
653         return -EOPNOTSUPP;
654 }
655
656 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
657 {
658         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
659         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
660         u64 useraddr;
661         u32 val = 0;
662         int reg, rc;
663
664         vcpu->stat.instruction_lctl++;
665
666         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
667                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
668
669         useraddr = kvm_s390_get_base_disp_rs(vcpu);
670
671         if (useraddr & 3)
672                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
673
674         VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
675                    useraddr);
676         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
677
678         reg = reg1;
679         do {
680                 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
681                 if (rc)
682                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
683                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
684                 vcpu->arch.sie_block->gcr[reg] |= val;
685                 useraddr += 4;
686                 if (reg == reg3)
687                         break;
688                 reg = (reg + 1) % 16;
689         } while (1);
690
691         return 0;
692 }
693
694 static int handle_lctlg(struct kvm_vcpu *vcpu)
695 {
696         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
697         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
698         u64 useraddr;
699         int reg, rc;
700
701         vcpu->stat.instruction_lctlg++;
702
703         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
704                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
705
706         useraddr = kvm_s390_get_base_disp_rsy(vcpu);
707
708         if (useraddr & 7)
709                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
710
711         reg = reg1;
712
713         VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
714                    useraddr);
715         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
716
717         do {
718                 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
719                                (u64 __user *) useraddr);
720                 if (rc)
721                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
722                 useraddr += 8;
723                 if (reg == reg3)
724                         break;
725                 reg = (reg + 1) % 16;
726         } while (1);
727
728         return 0;
729 }
730
731 static const intercept_handler_t eb_handlers[256] = {
732         [0x2f] = handle_lctlg,
733 };
734
735 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
736 {
737         intercept_handler_t handler;
738
739         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
740         if (handler)
741                 return handler(vcpu);
742         return -EOPNOTSUPP;
743 }
744
745 static int handle_tprot(struct kvm_vcpu *vcpu)
746 {
747         u64 address1, address2;
748         struct vm_area_struct *vma;
749         unsigned long user_address;
750
751         vcpu->stat.instruction_tprot++;
752
753         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
754                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
755
756         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
757
758         /* we only handle the Linux memory detection case:
759          * access key == 0
760          * guest DAT == off
761          * everything else goes to userspace. */
762         if (address2 & 0xf0)
763                 return -EOPNOTSUPP;
764         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
765                 return -EOPNOTSUPP;
766
767         down_read(&current->mm->mmap_sem);
768         user_address = __gmap_translate(address1, vcpu->arch.gmap);
769         if (IS_ERR_VALUE(user_address))
770                 goto out_inject;
771         vma = find_vma(current->mm, user_address);
772         if (!vma)
773                 goto out_inject;
774         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
775         if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
776                 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
777         if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
778                 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
779
780         up_read(&current->mm->mmap_sem);
781         return 0;
782
783 out_inject:
784         up_read(&current->mm->mmap_sem);
785         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
786 }
787
788 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
789 {
790         /* For e5xx... instructions we only handle TPROT */
791         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
792                 return handle_tprot(vcpu);
793         return -EOPNOTSUPP;
794 }
795
796 static int handle_sckpf(struct kvm_vcpu *vcpu)
797 {
798         u32 value;
799
800         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
801                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
802
803         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
804                 return kvm_s390_inject_program_int(vcpu,
805                                                    PGM_SPECIFICATION);
806
807         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
808         vcpu->arch.sie_block->todpr = value;
809
810         return 0;
811 }
812
813 static const intercept_handler_t x01_handlers[256] = {
814         [0x07] = handle_sckpf,
815 };
816
817 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
818 {
819         intercept_handler_t handler;
820
821         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
822         if (handler)
823                 return handler(vcpu);
824         return -EOPNOTSUPP;
825 }