Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / mips / kvm / kvm_trap_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "kvm_mips_opcode.h"
20 #include "kvm_mips_int.h"
21
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23 {
24         gpa_t gpa;
25         uint32_t kseg = KSEGX(gva);
26
27         if ((kseg == CKSEG0) || (kseg == CKSEG1))
28                 gpa = CPHYSADDR(gva);
29         else {
30                 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31                 kvm_mips_dump_host_tlbs();
32                 gpa = KVM_INVALID_ADDR;
33         }
34
35 #ifdef DEBUG
36         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
37 #endif
38
39         return gpa;
40 }
41
42
43 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44 {
45         struct kvm_run *run = vcpu->run;
46         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
47         unsigned long cause = vcpu->arch.host_cp0_cause;
48         enum emulation_result er = EMULATE_DONE;
49         int ret = RESUME_GUEST;
50
51         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
52                 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
53         } else
54                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
55
56         switch (er) {
57         case EMULATE_DONE:
58                 ret = RESUME_GUEST;
59                 break;
60
61         case EMULATE_FAIL:
62                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
63                 ret = RESUME_HOST;
64                 break;
65
66         case EMULATE_WAIT:
67                 run->exit_reason = KVM_EXIT_INTR;
68                 ret = RESUME_HOST;
69                 break;
70
71         default:
72                 BUG();
73         }
74         return ret;
75 }
76
77 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
78 {
79         struct kvm_run *run = vcpu->run;
80         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
81         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
82         unsigned long cause = vcpu->arch.host_cp0_cause;
83         enum emulation_result er = EMULATE_DONE;
84         int ret = RESUME_GUEST;
85
86         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
87             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
88 #ifdef DEBUG
89                 kvm_debug
90                     ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
91                      cause, opc, badvaddr);
92 #endif
93                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
94
95                 if (er == EMULATE_DONE)
96                         ret = RESUME_GUEST;
97                 else {
98                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99                         ret = RESUME_HOST;
100                 }
101         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
102                 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
103                  * using HIGHMEM. Need to address this in a HIGHMEM kernel
104                  */
105                 printk
106                     ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
107                      cause, opc, badvaddr);
108                 kvm_mips_dump_host_tlbs();
109                 kvm_arch_vcpu_dump_regs(vcpu);
110                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
111                 ret = RESUME_HOST;
112         } else {
113                 printk
114                     ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
115                      cause, opc, badvaddr);
116                 kvm_mips_dump_host_tlbs();
117                 kvm_arch_vcpu_dump_regs(vcpu);
118                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119                 ret = RESUME_HOST;
120         }
121         return ret;
122 }
123
124 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
125 {
126         struct kvm_run *run = vcpu->run;
127         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
128         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
129         unsigned long cause = vcpu->arch.host_cp0_cause;
130         enum emulation_result er = EMULATE_DONE;
131         int ret = RESUME_GUEST;
132
133         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
134             && KVM_GUEST_KERNEL_MODE(vcpu)) {
135                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
136                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137                         ret = RESUME_HOST;
138                 }
139         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
140                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
141 #ifdef DEBUG
142                 kvm_debug
143                     ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
144                      cause, opc, badvaddr);
145 #endif
146                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
147                 if (er == EMULATE_DONE)
148                         ret = RESUME_GUEST;
149                 else {
150                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
151                         ret = RESUME_HOST;
152                 }
153         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
154                 /* All KSEG0 faults are handled by KVM, as the guest kernel does not
155                  * expect to ever get them
156                  */
157                 if (kvm_mips_handle_kseg0_tlb_fault
158                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
159                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160                         ret = RESUME_HOST;
161                 }
162         } else {
163                 kvm_err
164                     ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
165                      cause, opc, badvaddr);
166                 kvm_mips_dump_host_tlbs();
167                 kvm_arch_vcpu_dump_regs(vcpu);
168                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169                 ret = RESUME_HOST;
170         }
171         return ret;
172 }
173
174 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
175 {
176         struct kvm_run *run = vcpu->run;
177         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
178         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
179         unsigned long cause = vcpu->arch.host_cp0_cause;
180         enum emulation_result er = EMULATE_DONE;
181         int ret = RESUME_GUEST;
182
183         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
184             && KVM_GUEST_KERNEL_MODE(vcpu)) {
185                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
186                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
187                         ret = RESUME_HOST;
188                 }
189         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
190                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
191 #ifdef DEBUG
192                 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
193                           vcpu->arch.pc, badvaddr);
194 #endif
195
196                 /* User Address (UA) fault, this could happen if
197                  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
198                  *     case we pass on the fault to the guest kernel and let it handle it.
199                  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
200                  *     case we inject the TLB from the Guest TLB into the shadow host TLB
201                  */
202
203                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
204                 if (er == EMULATE_DONE)
205                         ret = RESUME_GUEST;
206                 else {
207                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208                         ret = RESUME_HOST;
209                 }
210         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211                 if (kvm_mips_handle_kseg0_tlb_fault
212                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
213                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214                         ret = RESUME_HOST;
215                 }
216         } else {
217                 printk
218                     ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
219                      cause, opc, badvaddr);
220                 kvm_mips_dump_host_tlbs();
221                 kvm_arch_vcpu_dump_regs(vcpu);
222                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
223                 ret = RESUME_HOST;
224         }
225         return ret;
226 }
227
228 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
229 {
230         struct kvm_run *run = vcpu->run;
231         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
232         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
233         unsigned long cause = vcpu->arch.host_cp0_cause;
234         enum emulation_result er = EMULATE_DONE;
235         int ret = RESUME_GUEST;
236
237         if (KVM_GUEST_KERNEL_MODE(vcpu)
238             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
239 #ifdef DEBUG
240                 kvm_debug("Emulate Store to MMIO space\n");
241 #endif
242                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
243                 if (er == EMULATE_FAIL) {
244                         printk("Emulate Store to MMIO space failed\n");
245                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246                         ret = RESUME_HOST;
247                 } else {
248                         run->exit_reason = KVM_EXIT_MMIO;
249                         ret = RESUME_HOST;
250                 }
251         } else {
252                 printk
253                     ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
254                      cause, opc, badvaddr);
255                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256                 ret = RESUME_HOST;
257         }
258         return ret;
259 }
260
261 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
262 {
263         struct kvm_run *run = vcpu->run;
264         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
265         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
266         unsigned long cause = vcpu->arch.host_cp0_cause;
267         enum emulation_result er = EMULATE_DONE;
268         int ret = RESUME_GUEST;
269
270         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
271 #ifdef DEBUG
272                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
273 #endif
274                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275                 if (er == EMULATE_FAIL) {
276                         printk("Emulate Load from MMIO space failed\n");
277                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278                         ret = RESUME_HOST;
279                 } else {
280                         run->exit_reason = KVM_EXIT_MMIO;
281                         ret = RESUME_HOST;
282                 }
283         } else {
284                 printk
285                     ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
286                      cause, opc, badvaddr);
287                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
288                 ret = RESUME_HOST;
289                 er = EMULATE_FAIL;
290         }
291         return ret;
292 }
293
294 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
295 {
296         struct kvm_run *run = vcpu->run;
297         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
298         unsigned long cause = vcpu->arch.host_cp0_cause;
299         enum emulation_result er = EMULATE_DONE;
300         int ret = RESUME_GUEST;
301
302         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
303         if (er == EMULATE_DONE)
304                 ret = RESUME_GUEST;
305         else {
306                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307                 ret = RESUME_HOST;
308         }
309         return ret;
310 }
311
312 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
313 {
314         struct kvm_run *run = vcpu->run;
315         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
316         unsigned long cause = vcpu->arch.host_cp0_cause;
317         enum emulation_result er = EMULATE_DONE;
318         int ret = RESUME_GUEST;
319
320         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
321         if (er == EMULATE_DONE)
322                 ret = RESUME_GUEST;
323         else {
324                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325                 ret = RESUME_HOST;
326         }
327         return ret;
328 }
329
330 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
331 {
332         struct kvm_run *run = vcpu->run;
333         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
334         unsigned long cause = vcpu->arch.host_cp0_cause;
335         enum emulation_result er = EMULATE_DONE;
336         int ret = RESUME_GUEST;
337
338         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
339         if (er == EMULATE_DONE)
340                 ret = RESUME_GUEST;
341         else {
342                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343                 ret = RESUME_HOST;
344         }
345         return ret;
346 }
347
348 static int
349 kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350 {
351         struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353         kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354         kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355         kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356         kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357         kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359         kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360         kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361         kvm_write_c0_guest_pagemask(cop0,
362                                     regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363         kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364         kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366         return 0;
367 }
368
369 static int
370 kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371 {
372         struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374         regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375         regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376         regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377         regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378         regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380         regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381         regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382         regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383             kvm_read_c0_guest_pagemask(cop0);
384         regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385         regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387         regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388         regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389         regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390         regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391         regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393         return 0;
394 }
395
396 static int kvm_trap_emul_vm_init(struct kvm *kvm)
397 {
398         return 0;
399 }
400
401 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
402 {
403         return 0;
404 }
405
406 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
407 {
408         struct mips_coproc *cop0 = vcpu->arch.cop0;
409         uint32_t config1;
410         int vcpu_id = vcpu->vcpu_id;
411
412         /* Arch specific stuff, set up config registers properly so that the
413          * guest will come up as expected, for now we simulate a
414          * MIPS 24kc
415          */
416         kvm_write_c0_guest_prid(cop0, 0x00019300);
417         kvm_write_c0_guest_config(cop0,
418                                   MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
419                                   (MMU_TYPE_R4000 << CP0C0_MT));
420
421         /* Read the cache characteristics from the host Config1 Register */
422         config1 = (read_c0_config1() & ~0x7f);
423
424         /* Set up MMU size */
425         config1 &= ~(0x3f << 25);
426         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
427
428         /* We unset some bits that we aren't emulating */
429         config1 &=
430             ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
431               (1 << CP0C1_WR) | (1 << CP0C1_CA));
432         kvm_write_c0_guest_config1(cop0, config1);
433
434         kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
435         /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
436         kvm_write_c0_guest_config3(cop0,
437                                    MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
438                                                                        CP0C3_ULRI));
439
440         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
441         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
442
443         /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
444         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
445
446         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
447         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
448
449         return 0;
450 }
451
452 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
453         /* exit handlers */
454         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
455         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
456         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
457         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
458         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
459         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
460         .handle_syscall = kvm_trap_emul_handle_syscall,
461         .handle_res_inst = kvm_trap_emul_handle_res_inst,
462         .handle_break = kvm_trap_emul_handle_break,
463
464         .vm_init = kvm_trap_emul_vm_init,
465         .vcpu_init = kvm_trap_emul_vcpu_init,
466         .vcpu_setup = kvm_trap_emul_vcpu_setup,
467         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
468         .queue_timer_int = kvm_mips_queue_timer_int_cb,
469         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
470         .queue_io_int = kvm_mips_queue_io_int_cb,
471         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472         .irq_deliver = kvm_mips_irq_deliver_cb,
473         .irq_clear = kvm_mips_irq_clear_cb,
474         .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475         .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476 };
477
478 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
479 {
480         *install_callbacks = &kvm_trap_emul_callbacks;
481         return 0;
482 }