KVM: PPC: booke: Support perfmon interrupts
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / powerpc / kvm / booke.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2010-2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20  *          Scott Wood <scottwood@freescale.com>
21  *          Varun Sethi <varun.sethi@freescale.com>
22  */
23
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
30 #include <linux/fs.h>
31
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
38 #include <asm/irq.h>
39
40 #include "timing.h"
41 #include "booke.h"
42
43 unsigned long kvmppc_booke_handlers;
44
45 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
47
48 struct kvm_stats_debugfs_item debugfs_entries[] = {
49         { "mmio",       VCPU_STAT(mmio_exits) },
50         { "dcr",        VCPU_STAT(dcr_exits) },
51         { "sig",        VCPU_STAT(signal_exits) },
52         { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
53         { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
54         { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
55         { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
56         { "sysc",       VCPU_STAT(syscall_exits) },
57         { "isi",        VCPU_STAT(isi_exits) },
58         { "dsi",        VCPU_STAT(dsi_exits) },
59         { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
60         { "dec",        VCPU_STAT(dec_exits) },
61         { "ext_intr",   VCPU_STAT(ext_intr_exits) },
62         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63         { "doorbell", VCPU_STAT(dbell_exits) },
64         { "guest doorbell", VCPU_STAT(gdbell_exits) },
65         { NULL }
66 };
67
68 /* TODO: use vcpu_printf() */
69 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
70 {
71         int i;
72
73         printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
74         printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
75         printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
76                                             vcpu->arch.shared->srr1);
77
78         printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
79
80         for (i = 0; i < 32; i += 4) {
81                 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
82                        kvmppc_get_gpr(vcpu, i),
83                        kvmppc_get_gpr(vcpu, i+1),
84                        kvmppc_get_gpr(vcpu, i+2),
85                        kvmppc_get_gpr(vcpu, i+3));
86         }
87 }
88
89 #ifdef CONFIG_SPE
90 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
91 {
92         preempt_disable();
93         enable_kernel_spe();
94         kvmppc_save_guest_spe(vcpu);
95         vcpu->arch.shadow_msr &= ~MSR_SPE;
96         preempt_enable();
97 }
98
99 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
100 {
101         preempt_disable();
102         enable_kernel_spe();
103         kvmppc_load_guest_spe(vcpu);
104         vcpu->arch.shadow_msr |= MSR_SPE;
105         preempt_enable();
106 }
107
108 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109 {
110         if (vcpu->arch.shared->msr & MSR_SPE) {
111                 if (!(vcpu->arch.shadow_msr & MSR_SPE))
112                         kvmppc_vcpu_enable_spe(vcpu);
113         } else if (vcpu->arch.shadow_msr & MSR_SPE) {
114                 kvmppc_vcpu_disable_spe(vcpu);
115         }
116 }
117 #else
118 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
119 {
120 }
121 #endif
122
123 /*
124  * Helper function for "full" MSR writes.  No need to call this if only
125  * EE/CE/ME/DE/RI are changing.
126  */
127 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
128 {
129         u32 old_msr = vcpu->arch.shared->msr;
130
131 #ifdef CONFIG_KVM_BOOKE_HV
132         new_msr |= MSR_GS;
133 #endif
134
135         vcpu->arch.shared->msr = new_msr;
136
137         kvmppc_mmu_msr_notify(vcpu, old_msr);
138         kvmppc_vcpu_sync_spe(vcpu);
139 }
140
141 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142                                        unsigned int priority)
143 {
144         set_bit(priority, &vcpu->arch.pending_exceptions);
145 }
146
147 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
148                                         ulong dear_flags, ulong esr_flags)
149 {
150         vcpu->arch.queued_dear = dear_flags;
151         vcpu->arch.queued_esr = esr_flags;
152         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
153 }
154
155 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156                                            ulong dear_flags, ulong esr_flags)
157 {
158         vcpu->arch.queued_dear = dear_flags;
159         vcpu->arch.queued_esr = esr_flags;
160         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
161 }
162
163 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
164                                            ulong esr_flags)
165 {
166         vcpu->arch.queued_esr = esr_flags;
167         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
168 }
169
170 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
171 {
172         vcpu->arch.queued_esr = esr_flags;
173         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
174 }
175
176 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
177 {
178         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
179 }
180
181 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
182 {
183         return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
184 }
185
186 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
187 {
188         clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
189 }
190
191 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
192                                 struct kvm_interrupt *irq)
193 {
194         unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
195
196         if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
197                 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
198
199         kvmppc_booke_queue_irqprio(vcpu, prio);
200 }
201
202 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
203                                   struct kvm_interrupt *irq)
204 {
205         clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
206         clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
207 }
208
209 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210 {
211 #ifdef CONFIG_KVM_BOOKE_HV
212         mtspr(SPRN_GSRR0, srr0);
213         mtspr(SPRN_GSRR1, srr1);
214 #else
215         vcpu->arch.shared->srr0 = srr0;
216         vcpu->arch.shared->srr1 = srr1;
217 #endif
218 }
219
220 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221 {
222         vcpu->arch.csrr0 = srr0;
223         vcpu->arch.csrr1 = srr1;
224 }
225
226 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227 {
228         if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229                 vcpu->arch.dsrr0 = srr0;
230                 vcpu->arch.dsrr1 = srr1;
231         } else {
232                 set_guest_csrr(vcpu, srr0, srr1);
233         }
234 }
235
236 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237 {
238         vcpu->arch.mcsrr0 = srr0;
239         vcpu->arch.mcsrr1 = srr1;
240 }
241
242 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243 {
244 #ifdef CONFIG_KVM_BOOKE_HV
245         return mfspr(SPRN_GDEAR);
246 #else
247         return vcpu->arch.shared->dar;
248 #endif
249 }
250
251 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252 {
253 #ifdef CONFIG_KVM_BOOKE_HV
254         mtspr(SPRN_GDEAR, dear);
255 #else
256         vcpu->arch.shared->dar = dear;
257 #endif
258 }
259
260 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261 {
262 #ifdef CONFIG_KVM_BOOKE_HV
263         return mfspr(SPRN_GESR);
264 #else
265         return vcpu->arch.shared->esr;
266 #endif
267 }
268
269 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270 {
271 #ifdef CONFIG_KVM_BOOKE_HV
272         mtspr(SPRN_GESR, esr);
273 #else
274         vcpu->arch.shared->esr = esr;
275 #endif
276 }
277
278 /* Deliver the interrupt of the corresponding priority, if possible. */
279 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
280                                         unsigned int priority)
281 {
282         int allowed = 0;
283         ulong msr_mask = 0;
284         bool update_esr = false, update_dear = false;
285         ulong crit_raw = vcpu->arch.shared->critical;
286         ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
287         bool crit;
288         bool keep_irq = false;
289         enum int_class int_class;
290
291         /* Truncate crit indicators in 32 bit mode */
292         if (!(vcpu->arch.shared->msr & MSR_SF)) {
293                 crit_raw &= 0xffffffff;
294                 crit_r1 &= 0xffffffff;
295         }
296
297         /* Critical section when crit == r1 */
298         crit = (crit_raw == crit_r1);
299         /* ... and we're in supervisor mode */
300         crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
301
302         if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
303                 priority = BOOKE_IRQPRIO_EXTERNAL;
304                 keep_irq = true;
305         }
306
307         switch (priority) {
308         case BOOKE_IRQPRIO_DTLB_MISS:
309         case BOOKE_IRQPRIO_DATA_STORAGE:
310                 update_dear = true;
311                 /* fall through */
312         case BOOKE_IRQPRIO_INST_STORAGE:
313         case BOOKE_IRQPRIO_PROGRAM:
314                 update_esr = true;
315                 /* fall through */
316         case BOOKE_IRQPRIO_ITLB_MISS:
317         case BOOKE_IRQPRIO_SYSCALL:
318         case BOOKE_IRQPRIO_FP_UNAVAIL:
319         case BOOKE_IRQPRIO_SPE_UNAVAIL:
320         case BOOKE_IRQPRIO_SPE_FP_DATA:
321         case BOOKE_IRQPRIO_SPE_FP_ROUND:
322         case BOOKE_IRQPRIO_AP_UNAVAIL:
323         case BOOKE_IRQPRIO_ALIGNMENT:
324                 allowed = 1;
325                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
326                 int_class = INT_CLASS_NONCRIT;
327                 break;
328         case BOOKE_IRQPRIO_CRITICAL:
329         case BOOKE_IRQPRIO_DBELL_CRIT:
330                 allowed = vcpu->arch.shared->msr & MSR_CE;
331                 allowed = allowed && !crit;
332                 msr_mask = MSR_ME;
333                 int_class = INT_CLASS_CRIT;
334                 break;
335         case BOOKE_IRQPRIO_MACHINE_CHECK:
336                 allowed = vcpu->arch.shared->msr & MSR_ME;
337                 allowed = allowed && !crit;
338                 int_class = INT_CLASS_MC;
339                 break;
340         case BOOKE_IRQPRIO_DECREMENTER:
341         case BOOKE_IRQPRIO_FIT:
342                 keep_irq = true;
343                 /* fall through */
344         case BOOKE_IRQPRIO_EXTERNAL:
345         case BOOKE_IRQPRIO_DBELL:
346                 allowed = vcpu->arch.shared->msr & MSR_EE;
347                 allowed = allowed && !crit;
348                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
349                 int_class = INT_CLASS_NONCRIT;
350                 break;
351         case BOOKE_IRQPRIO_DEBUG:
352                 allowed = vcpu->arch.shared->msr & MSR_DE;
353                 allowed = allowed && !crit;
354                 msr_mask = MSR_ME;
355                 int_class = INT_CLASS_CRIT;
356                 break;
357         }
358
359         if (allowed) {
360                 switch (int_class) {
361                 case INT_CLASS_NONCRIT:
362                         set_guest_srr(vcpu, vcpu->arch.pc,
363                                       vcpu->arch.shared->msr);
364                         break;
365                 case INT_CLASS_CRIT:
366                         set_guest_csrr(vcpu, vcpu->arch.pc,
367                                        vcpu->arch.shared->msr);
368                         break;
369                 case INT_CLASS_DBG:
370                         set_guest_dsrr(vcpu, vcpu->arch.pc,
371                                        vcpu->arch.shared->msr);
372                         break;
373                 case INT_CLASS_MC:
374                         set_guest_mcsrr(vcpu, vcpu->arch.pc,
375                                         vcpu->arch.shared->msr);
376                         break;
377                 }
378
379                 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
380                 if (update_esr == true)
381                         set_guest_esr(vcpu, vcpu->arch.queued_esr);
382                 if (update_dear == true)
383                         set_guest_dear(vcpu, vcpu->arch.queued_dear);
384                 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
385
386                 if (!keep_irq)
387                         clear_bit(priority, &vcpu->arch.pending_exceptions);
388         }
389
390 #ifdef CONFIG_KVM_BOOKE_HV
391         /*
392          * If an interrupt is pending but masked, raise a guest doorbell
393          * so that we are notified when the guest enables the relevant
394          * MSR bit.
395          */
396         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
397                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
398         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
399                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
400         if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
401                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
402 #endif
403
404         return allowed;
405 }
406
407 static void update_timer_ints(struct kvm_vcpu *vcpu)
408 {
409         if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
410                 kvmppc_core_queue_dec(vcpu);
411         else
412                 kvmppc_core_dequeue_dec(vcpu);
413 }
414
415 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
416 {
417         unsigned long *pending = &vcpu->arch.pending_exceptions;
418         unsigned int priority;
419
420         if (vcpu->requests) {
421                 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422                         smp_mb();
423                         update_timer_ints(vcpu);
424                 }
425         }
426
427         priority = __ffs(*pending);
428         while (priority < BOOKE_IRQPRIO_MAX) {
429                 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
430                         break;
431
432                 priority = find_next_bit(pending,
433                                          BITS_PER_BYTE * sizeof(*pending),
434                                          priority + 1);
435         }
436
437         /* Tell the guest about our interrupt status */
438         vcpu->arch.shared->int_pending = !!*pending;
439 }
440
441 /* Check pending exceptions and deliver one, if possible. */
442 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
443 {
444         int r = 0;
445         WARN_ON_ONCE(!irqs_disabled());
446
447         kvmppc_core_check_exceptions(vcpu);
448
449         if (vcpu->arch.shared->msr & MSR_WE) {
450                 local_irq_enable();
451                 kvm_vcpu_block(vcpu);
452                 local_irq_disable();
453
454                 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
455                 r = 1;
456         };
457
458         return r;
459 }
460
461 /*
462  * Common checks before entering the guest world.  Call with interrupts
463  * disabled.
464  *
465  * returns !0 if a signal is pending and check_signal is true
466  */
467 static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu, bool check_signal)
468 {
469         int r = 0;
470
471         WARN_ON_ONCE(!irqs_disabled());
472         while (true) {
473                 if (need_resched()) {
474                         local_irq_enable();
475                         cond_resched();
476                         local_irq_disable();
477                         continue;
478                 }
479
480                 if (check_signal && signal_pending(current)) {
481                         r = 1;
482                         break;
483                 }
484
485                 if (kvmppc_core_prepare_to_enter(vcpu)) {
486                         /* interrupts got enabled in between, so we
487                            are back at square 1 */
488                         continue;
489                 }
490
491                 break;
492         }
493
494         return r;
495 }
496
497 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
498 {
499         int ret;
500 #ifdef CONFIG_PPC_FPU
501         unsigned int fpscr;
502         int fpexc_mode;
503         u64 fpr[32];
504 #endif
505
506         if (!vcpu->arch.sane) {
507                 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
508                 return -EINVAL;
509         }
510
511         local_irq_disable();
512         if (kvmppc_prepare_to_enter(vcpu, true)) {
513                 kvm_run->exit_reason = KVM_EXIT_INTR;
514                 ret = -EINTR;
515                 goto out;
516         }
517
518         kvm_guest_enter();
519
520 #ifdef CONFIG_PPC_FPU
521         /* Save userspace FPU state in stack */
522         enable_kernel_fp();
523         memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
524         fpscr = current->thread.fpscr.val;
525         fpexc_mode = current->thread.fpexc_mode;
526
527         /* Restore guest FPU state to thread */
528         memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
529         current->thread.fpscr.val = vcpu->arch.fpscr;
530
531         /*
532          * Since we can't trap on MSR_FP in GS-mode, we consider the guest
533          * as always using the FPU.  Kernel usage of FP (via
534          * enable_kernel_fp()) in this thread must not occur while
535          * vcpu->fpu_active is set.
536          */
537         vcpu->fpu_active = 1;
538
539         kvmppc_load_guest_fp(vcpu);
540 #endif
541
542         ret = __kvmppc_vcpu_run(kvm_run, vcpu);
543
544 #ifdef CONFIG_PPC_FPU
545         kvmppc_save_guest_fp(vcpu);
546
547         vcpu->fpu_active = 0;
548
549         /* Save guest FPU state from thread */
550         memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
551         vcpu->arch.fpscr = current->thread.fpscr.val;
552
553         /* Restore userspace FPU state from stack */
554         memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
555         current->thread.fpscr.val = fpscr;
556         current->thread.fpexc_mode = fpexc_mode;
557 #endif
558
559         kvm_guest_exit();
560
561 out:
562         local_irq_enable();
563         return ret;
564 }
565
566 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
567 {
568         enum emulation_result er;
569
570         er = kvmppc_emulate_instruction(run, vcpu);
571         switch (er) {
572         case EMULATE_DONE:
573                 /* don't overwrite subtypes, just account kvm_stats */
574                 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
575                 /* Future optimization: only reload non-volatiles if
576                  * they were actually modified by emulation. */
577                 return RESUME_GUEST_NV;
578
579         case EMULATE_DO_DCR:
580                 run->exit_reason = KVM_EXIT_DCR;
581                 return RESUME_HOST;
582
583         case EMULATE_FAIL:
584                 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
585                        __func__, vcpu->arch.pc, vcpu->arch.last_inst);
586                 /* For debugging, encode the failing instruction and
587                  * report it to userspace. */
588                 run->hw.hardware_exit_reason = ~0ULL << 32;
589                 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
590                 kvmppc_core_queue_program(vcpu, ESR_PIL);
591                 return RESUME_HOST;
592
593         default:
594                 BUG();
595         }
596 }
597
598 /**
599  * kvmppc_handle_exit
600  *
601  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
602  */
603 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
604                        unsigned int exit_nr)
605 {
606         int r = RESUME_HOST;
607
608         /* update before a new last_exit_type is rewritten */
609         kvmppc_update_timing_stats(vcpu);
610
611         switch (exit_nr) {
612         case BOOKE_INTERRUPT_EXTERNAL:
613                 do_IRQ(current->thread.regs);
614                 break;
615
616         case BOOKE_INTERRUPT_DECREMENTER:
617                 timer_interrupt(current->thread.regs);
618                 break;
619
620 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
621         case BOOKE_INTERRUPT_DOORBELL:
622                 doorbell_exception(current->thread.regs);
623                 break;
624 #endif
625         case BOOKE_INTERRUPT_MACHINE_CHECK:
626                 /* FIXME */
627                 break;
628         }
629
630         local_irq_enable();
631
632         run->exit_reason = KVM_EXIT_UNKNOWN;
633         run->ready_for_interrupt_injection = 1;
634
635         switch (exit_nr) {
636         case BOOKE_INTERRUPT_MACHINE_CHECK:
637                 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
638                 kvmppc_dump_vcpu(vcpu);
639                 /* For debugging, send invalid exit reason to user space */
640                 run->hw.hardware_exit_reason = ~1ULL << 32;
641                 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
642                 r = RESUME_HOST;
643                 break;
644
645         case BOOKE_INTERRUPT_EXTERNAL:
646                 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
647                 r = RESUME_GUEST;
648                 break;
649
650         case BOOKE_INTERRUPT_DECREMENTER:
651                 kvmppc_account_exit(vcpu, DEC_EXITS);
652                 r = RESUME_GUEST;
653                 break;
654
655         case BOOKE_INTERRUPT_DOORBELL:
656                 kvmppc_account_exit(vcpu, DBELL_EXITS);
657                 r = RESUME_GUEST;
658                 break;
659
660         case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
661                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
662
663                 /*
664                  * We are here because there is a pending guest interrupt
665                  * which could not be delivered as MSR_CE or MSR_ME was not
666                  * set.  Once we break from here we will retry delivery.
667                  */
668                 r = RESUME_GUEST;
669                 break;
670
671         case BOOKE_INTERRUPT_GUEST_DBELL:
672                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
673
674                 /*
675                  * We are here because there is a pending guest interrupt
676                  * which could not be delivered as MSR_EE was not set.  Once
677                  * we break from here we will retry delivery.
678                  */
679                 r = RESUME_GUEST;
680                 break;
681
682         case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
683                 r = RESUME_GUEST;
684                 break;
685
686         case BOOKE_INTERRUPT_HV_PRIV:
687                 r = emulation_exit(run, vcpu);
688                 break;
689
690         case BOOKE_INTERRUPT_PROGRAM:
691                 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
692                         /*
693                          * Program traps generated by user-level software must
694                          * be handled by the guest kernel.
695                          *
696                          * In GS mode, hypervisor privileged instructions trap
697                          * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
698                          * actual program interrupts, handled by the guest.
699                          */
700                         kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
701                         r = RESUME_GUEST;
702                         kvmppc_account_exit(vcpu, USR_PR_INST);
703                         break;
704                 }
705
706                 r = emulation_exit(run, vcpu);
707                 break;
708
709         case BOOKE_INTERRUPT_FP_UNAVAIL:
710                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
711                 kvmppc_account_exit(vcpu, FP_UNAVAIL);
712                 r = RESUME_GUEST;
713                 break;
714
715 #ifdef CONFIG_SPE
716         case BOOKE_INTERRUPT_SPE_UNAVAIL: {
717                 if (vcpu->arch.shared->msr & MSR_SPE)
718                         kvmppc_vcpu_enable_spe(vcpu);
719                 else
720                         kvmppc_booke_queue_irqprio(vcpu,
721                                                    BOOKE_IRQPRIO_SPE_UNAVAIL);
722                 r = RESUME_GUEST;
723                 break;
724         }
725
726         case BOOKE_INTERRUPT_SPE_FP_DATA:
727                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
728                 r = RESUME_GUEST;
729                 break;
730
731         case BOOKE_INTERRUPT_SPE_FP_ROUND:
732                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
733                 r = RESUME_GUEST;
734                 break;
735 #else
736         case BOOKE_INTERRUPT_SPE_UNAVAIL:
737                 /*
738                  * Guest wants SPE, but host kernel doesn't support it.  Send
739                  * an "unimplemented operation" program check to the guest.
740                  */
741                 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
742                 r = RESUME_GUEST;
743                 break;
744
745         /*
746          * These really should never happen without CONFIG_SPE,
747          * as we should never enable the real MSR[SPE] in the guest.
748          */
749         case BOOKE_INTERRUPT_SPE_FP_DATA:
750         case BOOKE_INTERRUPT_SPE_FP_ROUND:
751                 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
752                        __func__, exit_nr, vcpu->arch.pc);
753                 run->hw.hardware_exit_reason = exit_nr;
754                 r = RESUME_HOST;
755                 break;
756 #endif
757
758         case BOOKE_INTERRUPT_DATA_STORAGE:
759                 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
760                                                vcpu->arch.fault_esr);
761                 kvmppc_account_exit(vcpu, DSI_EXITS);
762                 r = RESUME_GUEST;
763                 break;
764
765         case BOOKE_INTERRUPT_INST_STORAGE:
766                 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
767                 kvmppc_account_exit(vcpu, ISI_EXITS);
768                 r = RESUME_GUEST;
769                 break;
770
771 #ifdef CONFIG_KVM_BOOKE_HV
772         case BOOKE_INTERRUPT_HV_SYSCALL:
773                 if (!(vcpu->arch.shared->msr & MSR_PR)) {
774                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
775                 } else {
776                         /*
777                          * hcall from guest userspace -- send privileged
778                          * instruction program check.
779                          */
780                         kvmppc_core_queue_program(vcpu, ESR_PPR);
781                 }
782
783                 r = RESUME_GUEST;
784                 break;
785 #else
786         case BOOKE_INTERRUPT_SYSCALL:
787                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
788                     (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
789                         /* KVM PV hypercalls */
790                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
791                         r = RESUME_GUEST;
792                 } else {
793                         /* Guest syscalls */
794                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
795                 }
796                 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
797                 r = RESUME_GUEST;
798                 break;
799 #endif
800
801         case BOOKE_INTERRUPT_DTLB_MISS: {
802                 unsigned long eaddr = vcpu->arch.fault_dear;
803                 int gtlb_index;
804                 gpa_t gpaddr;
805                 gfn_t gfn;
806
807 #ifdef CONFIG_KVM_E500V2
808                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
809                     (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
810                         kvmppc_map_magic(vcpu);
811                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
812                         r = RESUME_GUEST;
813
814                         break;
815                 }
816 #endif
817
818                 /* Check the guest TLB. */
819                 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
820                 if (gtlb_index < 0) {
821                         /* The guest didn't have a mapping for it. */
822                         kvmppc_core_queue_dtlb_miss(vcpu,
823                                                     vcpu->arch.fault_dear,
824                                                     vcpu->arch.fault_esr);
825                         kvmppc_mmu_dtlb_miss(vcpu);
826                         kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
827                         r = RESUME_GUEST;
828                         break;
829                 }
830
831                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
832                 gfn = gpaddr >> PAGE_SHIFT;
833
834                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
835                         /* The guest TLB had a mapping, but the shadow TLB
836                          * didn't, and it is RAM. This could be because:
837                          * a) the entry is mapping the host kernel, or
838                          * b) the guest used a large mapping which we're faking
839                          * Either way, we need to satisfy the fault without
840                          * invoking the guest. */
841                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
842                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
843                         r = RESUME_GUEST;
844                 } else {
845                         /* Guest has mapped and accessed a page which is not
846                          * actually RAM. */
847                         vcpu->arch.paddr_accessed = gpaddr;
848                         r = kvmppc_emulate_mmio(run, vcpu);
849                         kvmppc_account_exit(vcpu, MMIO_EXITS);
850                 }
851
852                 break;
853         }
854
855         case BOOKE_INTERRUPT_ITLB_MISS: {
856                 unsigned long eaddr = vcpu->arch.pc;
857                 gpa_t gpaddr;
858                 gfn_t gfn;
859                 int gtlb_index;
860
861                 r = RESUME_GUEST;
862
863                 /* Check the guest TLB. */
864                 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
865                 if (gtlb_index < 0) {
866                         /* The guest didn't have a mapping for it. */
867                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
868                         kvmppc_mmu_itlb_miss(vcpu);
869                         kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
870                         break;
871                 }
872
873                 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
874
875                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
876                 gfn = gpaddr >> PAGE_SHIFT;
877
878                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
879                         /* The guest TLB had a mapping, but the shadow TLB
880                          * didn't. This could be because:
881                          * a) the entry is mapping the host kernel, or
882                          * b) the guest used a large mapping which we're faking
883                          * Either way, we need to satisfy the fault without
884                          * invoking the guest. */
885                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
886                 } else {
887                         /* Guest mapped and leaped at non-RAM! */
888                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
889                 }
890
891                 break;
892         }
893
894         case BOOKE_INTERRUPT_DEBUG: {
895                 u32 dbsr;
896
897                 vcpu->arch.pc = mfspr(SPRN_CSRR0);
898
899                 /* clear IAC events in DBSR register */
900                 dbsr = mfspr(SPRN_DBSR);
901                 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
902                 mtspr(SPRN_DBSR, dbsr);
903
904                 run->exit_reason = KVM_EXIT_DEBUG;
905                 kvmppc_account_exit(vcpu, DEBUG_EXITS);
906                 r = RESUME_HOST;
907                 break;
908         }
909
910         default:
911                 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
912                 BUG();
913         }
914
915         /*
916          * To avoid clobbering exit_reason, only check for signals if we
917          * aren't already exiting to userspace for some other reason.
918          */
919         local_irq_disable();
920         if (kvmppc_prepare_to_enter(vcpu, !(r & RESUME_HOST))) {
921                 run->exit_reason = KVM_EXIT_INTR;
922                 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
923                 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
924         }
925
926         return r;
927 }
928
929 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
930 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
931 {
932         int i;
933         int r;
934
935         vcpu->arch.pc = 0;
936         vcpu->arch.shared->pir = vcpu->vcpu_id;
937         kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
938         kvmppc_set_msr(vcpu, 0);
939
940 #ifndef CONFIG_KVM_BOOKE_HV
941         vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
942         vcpu->arch.shadow_pid = 1;
943         vcpu->arch.shared->msr = 0;
944 #endif
945
946         /* Eye-catching numbers so we know if the guest takes an interrupt
947          * before it's programmed its own IVPR/IVORs. */
948         vcpu->arch.ivpr = 0x55550000;
949         for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
950                 vcpu->arch.ivor[i] = 0x7700 | i * 4;
951
952         kvmppc_init_timing_stats(vcpu);
953
954         r = kvmppc_core_vcpu_setup(vcpu);
955         kvmppc_sanity_check(vcpu);
956         return r;
957 }
958
959 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
960 {
961         int i;
962
963         regs->pc = vcpu->arch.pc;
964         regs->cr = kvmppc_get_cr(vcpu);
965         regs->ctr = vcpu->arch.ctr;
966         regs->lr = vcpu->arch.lr;
967         regs->xer = kvmppc_get_xer(vcpu);
968         regs->msr = vcpu->arch.shared->msr;
969         regs->srr0 = vcpu->arch.shared->srr0;
970         regs->srr1 = vcpu->arch.shared->srr1;
971         regs->pid = vcpu->arch.pid;
972         regs->sprg0 = vcpu->arch.shared->sprg0;
973         regs->sprg1 = vcpu->arch.shared->sprg1;
974         regs->sprg2 = vcpu->arch.shared->sprg2;
975         regs->sprg3 = vcpu->arch.shared->sprg3;
976         regs->sprg4 = vcpu->arch.shared->sprg4;
977         regs->sprg5 = vcpu->arch.shared->sprg5;
978         regs->sprg6 = vcpu->arch.shared->sprg6;
979         regs->sprg7 = vcpu->arch.shared->sprg7;
980
981         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
982                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
983
984         return 0;
985 }
986
987 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
988 {
989         int i;
990
991         vcpu->arch.pc = regs->pc;
992         kvmppc_set_cr(vcpu, regs->cr);
993         vcpu->arch.ctr = regs->ctr;
994         vcpu->arch.lr = regs->lr;
995         kvmppc_set_xer(vcpu, regs->xer);
996         kvmppc_set_msr(vcpu, regs->msr);
997         vcpu->arch.shared->srr0 = regs->srr0;
998         vcpu->arch.shared->srr1 = regs->srr1;
999         kvmppc_set_pid(vcpu, regs->pid);
1000         vcpu->arch.shared->sprg0 = regs->sprg0;
1001         vcpu->arch.shared->sprg1 = regs->sprg1;
1002         vcpu->arch.shared->sprg2 = regs->sprg2;
1003         vcpu->arch.shared->sprg3 = regs->sprg3;
1004         vcpu->arch.shared->sprg4 = regs->sprg4;
1005         vcpu->arch.shared->sprg5 = regs->sprg5;
1006         vcpu->arch.shared->sprg6 = regs->sprg6;
1007         vcpu->arch.shared->sprg7 = regs->sprg7;
1008
1009         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1010                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1011
1012         return 0;
1013 }
1014
1015 static void get_sregs_base(struct kvm_vcpu *vcpu,
1016                            struct kvm_sregs *sregs)
1017 {
1018         u64 tb = get_tb();
1019
1020         sregs->u.e.features |= KVM_SREGS_E_BASE;
1021
1022         sregs->u.e.csrr0 = vcpu->arch.csrr0;
1023         sregs->u.e.csrr1 = vcpu->arch.csrr1;
1024         sregs->u.e.mcsr = vcpu->arch.mcsr;
1025         sregs->u.e.esr = get_guest_esr(vcpu);
1026         sregs->u.e.dear = get_guest_dear(vcpu);
1027         sregs->u.e.tsr = vcpu->arch.tsr;
1028         sregs->u.e.tcr = vcpu->arch.tcr;
1029         sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1030         sregs->u.e.tb = tb;
1031         sregs->u.e.vrsave = vcpu->arch.vrsave;
1032 }
1033
1034 static int set_sregs_base(struct kvm_vcpu *vcpu,
1035                           struct kvm_sregs *sregs)
1036 {
1037         if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1038                 return 0;
1039
1040         vcpu->arch.csrr0 = sregs->u.e.csrr0;
1041         vcpu->arch.csrr1 = sregs->u.e.csrr1;
1042         vcpu->arch.mcsr = sregs->u.e.mcsr;
1043         set_guest_esr(vcpu, sregs->u.e.esr);
1044         set_guest_dear(vcpu, sregs->u.e.dear);
1045         vcpu->arch.vrsave = sregs->u.e.vrsave;
1046         kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1047
1048         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1049                 vcpu->arch.dec = sregs->u.e.dec;
1050                 kvmppc_emulate_dec(vcpu);
1051         }
1052
1053         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1054                 vcpu->arch.tsr = sregs->u.e.tsr;
1055                 update_timer_ints(vcpu);
1056         }
1057
1058         return 0;
1059 }
1060
1061 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1062                               struct kvm_sregs *sregs)
1063 {
1064         sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1065
1066         sregs->u.e.pir = vcpu->vcpu_id;
1067         sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1068         sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1069         sregs->u.e.decar = vcpu->arch.decar;
1070         sregs->u.e.ivpr = vcpu->arch.ivpr;
1071 }
1072
1073 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1074                              struct kvm_sregs *sregs)
1075 {
1076         if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1077                 return 0;
1078
1079         if (sregs->u.e.pir != vcpu->vcpu_id)
1080                 return -EINVAL;
1081
1082         vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1083         vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1084         vcpu->arch.decar = sregs->u.e.decar;
1085         vcpu->arch.ivpr = sregs->u.e.ivpr;
1086
1087         return 0;
1088 }
1089
1090 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1091 {
1092         sregs->u.e.features |= KVM_SREGS_E_IVOR;
1093
1094         sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1095         sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1096         sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1097         sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1098         sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1099         sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1100         sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1101         sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1102         sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1103         sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1104         sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1105         sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1106         sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1107         sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1108         sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1109         sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1110 }
1111
1112 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1113 {
1114         if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1115                 return 0;
1116
1117         vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1118         vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1119         vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1120         vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1121         vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1122         vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1123         vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1124         vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1125         vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1126         vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1127         vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1128         vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1129         vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1130         vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1131         vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1132         vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1133
1134         return 0;
1135 }
1136
1137 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1138                                   struct kvm_sregs *sregs)
1139 {
1140         sregs->pvr = vcpu->arch.pvr;
1141
1142         get_sregs_base(vcpu, sregs);
1143         get_sregs_arch206(vcpu, sregs);
1144         kvmppc_core_get_sregs(vcpu, sregs);
1145         return 0;
1146 }
1147
1148 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1149                                   struct kvm_sregs *sregs)
1150 {
1151         int ret;
1152
1153         if (vcpu->arch.pvr != sregs->pvr)
1154                 return -EINVAL;
1155
1156         ret = set_sregs_base(vcpu, sregs);
1157         if (ret < 0)
1158                 return ret;
1159
1160         ret = set_sregs_arch206(vcpu, sregs);
1161         if (ret < 0)
1162                 return ret;
1163
1164         return kvmppc_core_set_sregs(vcpu, sregs);
1165 }
1166
1167 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1168 {
1169         return -EINVAL;
1170 }
1171
1172 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1173 {
1174         return -EINVAL;
1175 }
1176
1177 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1178 {
1179         return -ENOTSUPP;
1180 }
1181
1182 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1183 {
1184         return -ENOTSUPP;
1185 }
1186
1187 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1188                                   struct kvm_translation *tr)
1189 {
1190         int r;
1191
1192         r = kvmppc_core_vcpu_translate(vcpu, tr);
1193         return r;
1194 }
1195
1196 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1197 {
1198         return -ENOTSUPP;
1199 }
1200
1201 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1202                                       struct kvm_userspace_memory_region *mem)
1203 {
1204         return 0;
1205 }
1206
1207 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1208                                 struct kvm_userspace_memory_region *mem)
1209 {
1210 }
1211
1212 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1213 {
1214         vcpu->arch.tcr = new_tcr;
1215         update_timer_ints(vcpu);
1216 }
1217
1218 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1219 {
1220         set_bits(tsr_bits, &vcpu->arch.tsr);
1221         smp_wmb();
1222         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1223         kvm_vcpu_kick(vcpu);
1224 }
1225
1226 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1227 {
1228         clear_bits(tsr_bits, &vcpu->arch.tsr);
1229         update_timer_ints(vcpu);
1230 }
1231
1232 void kvmppc_decrementer_func(unsigned long data)
1233 {
1234         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1235
1236         kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1237 }
1238
1239 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1240 {
1241         current->thread.kvm_vcpu = vcpu;
1242 }
1243
1244 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1245 {
1246         current->thread.kvm_vcpu = NULL;
1247 }
1248
1249 int __init kvmppc_booke_init(void)
1250 {
1251 #ifndef CONFIG_KVM_BOOKE_HV
1252         unsigned long ivor[16];
1253         unsigned long max_ivor = 0;
1254         int i;
1255
1256         /* We install our own exception handlers by hijacking IVPR. IVPR must
1257          * be 16-bit aligned, so we need a 64KB allocation. */
1258         kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1259                                                  VCPU_SIZE_ORDER);
1260         if (!kvmppc_booke_handlers)
1261                 return -ENOMEM;
1262
1263         /* XXX make sure our handlers are smaller than Linux's */
1264
1265         /* Copy our interrupt handlers to match host IVORs. That way we don't
1266          * have to swap the IVORs on every guest/host transition. */
1267         ivor[0] = mfspr(SPRN_IVOR0);
1268         ivor[1] = mfspr(SPRN_IVOR1);
1269         ivor[2] = mfspr(SPRN_IVOR2);
1270         ivor[3] = mfspr(SPRN_IVOR3);
1271         ivor[4] = mfspr(SPRN_IVOR4);
1272         ivor[5] = mfspr(SPRN_IVOR5);
1273         ivor[6] = mfspr(SPRN_IVOR6);
1274         ivor[7] = mfspr(SPRN_IVOR7);
1275         ivor[8] = mfspr(SPRN_IVOR8);
1276         ivor[9] = mfspr(SPRN_IVOR9);
1277         ivor[10] = mfspr(SPRN_IVOR10);
1278         ivor[11] = mfspr(SPRN_IVOR11);
1279         ivor[12] = mfspr(SPRN_IVOR12);
1280         ivor[13] = mfspr(SPRN_IVOR13);
1281         ivor[14] = mfspr(SPRN_IVOR14);
1282         ivor[15] = mfspr(SPRN_IVOR15);
1283
1284         for (i = 0; i < 16; i++) {
1285                 if (ivor[i] > max_ivor)
1286                         max_ivor = ivor[i];
1287
1288                 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1289                        kvmppc_handlers_start + i * kvmppc_handler_len,
1290                        kvmppc_handler_len);
1291         }
1292         flush_icache_range(kvmppc_booke_handlers,
1293                            kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1294 #endif /* !BOOKE_HV */
1295         return 0;
1296 }
1297
1298 void __exit kvmppc_booke_exit(void)
1299 {
1300         free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1301         kvm_exit();
1302 }