2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/ppc_asm.h>
22 #include <asm/kvm_asm.h>
25 #include <asm/asm-offsets.h>
27 #define KVM_MAGIC_PAGE (-4096)
30 #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
31 #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
33 #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
34 #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
37 #define SCRATCH_SAVE \
38 /* Enable critical section. We are critical if \
39 shared->critical == r1 */ \
40 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
43 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
44 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
46 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
48 #define SCRATCH_RESTORE \
50 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
51 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
53 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
55 /* Disable critical section. We are critical if \
56 shared->critical == r1 and r2 is always != r1 */ \
57 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
59 .global kvm_template_start
62 .global kvm_emulate_mtmsrd
67 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
68 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
69 lis r30, (~(MSR_EE | MSR_RI))@h
70 ori r30, r30, (~(MSR_EE | MSR_RI))@l
73 /* OR the register's (MSR_EE|MSR_RI) on MSR */
74 kvm_emulate_mtmsrd_reg:
76 andi. r30, r30, (MSR_EE|MSR_RI)
79 /* Put MSR back into magic page */
80 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
82 /* Check if we have to fetch an interrupt */
83 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
87 /* Check if we may trigger an interrupt */
88 andi. r30, r30, MSR_EE
94 kvm_emulate_mtmsrd_orig_ins:
97 b kvm_emulate_mtmsrd_branch
103 /* Go back to caller */
104 kvm_emulate_mtmsrd_branch:
106 kvm_emulate_mtmsrd_end:
108 .global kvm_emulate_mtmsrd_branch_offs
109 kvm_emulate_mtmsrd_branch_offs:
110 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
112 .global kvm_emulate_mtmsrd_reg_offs
113 kvm_emulate_mtmsrd_reg_offs:
114 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
116 .global kvm_emulate_mtmsrd_orig_ins_offs
117 kvm_emulate_mtmsrd_orig_ins_offs:
118 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
120 .global kvm_emulate_mtmsrd_len
121 kvm_emulate_mtmsrd_len:
122 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
125 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
126 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
128 .global kvm_emulate_mtmsr
133 /* Fetch old MSR in r31 */
134 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
136 /* Find the changed bits between old and new MSR */
137 kvm_emulate_mtmsr_reg1:
141 /* Check if we need to really do mtmsr */
142 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
145 /* No critical bits changed? Maybe we can stay in the guest. */
146 beq maybe_stay_in_guest
152 /* Just fire off the mtmsr if it's critical */
153 kvm_emulate_mtmsr_orig_ins:
156 b kvm_emulate_mtmsr_branch
160 /* Get the target register in r30 */
161 kvm_emulate_mtmsr_reg2:
164 /* Put MSR into magic page because we don't call mtmsr */
165 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
167 /* Check if we have to fetch an interrupt */
168 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
172 /* Check if we may trigger an interrupt */
173 andi. r31, r30, MSR_EE
180 /* Go back to caller */
181 kvm_emulate_mtmsr_branch:
183 kvm_emulate_mtmsr_end:
185 .global kvm_emulate_mtmsr_branch_offs
186 kvm_emulate_mtmsr_branch_offs:
187 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
189 .global kvm_emulate_mtmsr_reg1_offs
190 kvm_emulate_mtmsr_reg1_offs:
191 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
193 .global kvm_emulate_mtmsr_reg2_offs
194 kvm_emulate_mtmsr_reg2_offs:
195 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
197 .global kvm_emulate_mtmsr_orig_ins_offs
198 kvm_emulate_mtmsr_orig_ins_offs:
199 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
201 .global kvm_emulate_mtmsr_len
202 kvm_emulate_mtmsr_len:
203 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
205 /* also used for wrteei 1 */
206 .global kvm_emulate_wrtee
211 /* Fetch old MSR in r31 */
212 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
214 /* Insert new MSR[EE] */
215 kvm_emulate_wrtee_reg:
217 rlwimi r31, r30, 0, MSR_EE
220 * If MSR[EE] is now set, check for a pending interrupt.
221 * We could skip this if MSR[EE] was already on, but that
222 * should be rare, so don't bother.
224 andi. r30, r30, MSR_EE
226 /* Put MSR into magic page because we don't call wrtee */
227 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
231 /* Check if we have to fetch an interrupt */
232 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
239 /* Go back to caller */
240 kvm_emulate_wrtee_branch:
246 /* Just fire off the wrtee if it's critical */
247 kvm_emulate_wrtee_orig_ins:
250 b kvm_emulate_wrtee_branch
252 kvm_emulate_wrtee_end:
254 .global kvm_emulate_wrtee_branch_offs
255 kvm_emulate_wrtee_branch_offs:
256 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
258 .global kvm_emulate_wrtee_reg_offs
259 kvm_emulate_wrtee_reg_offs:
260 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
262 .global kvm_emulate_wrtee_orig_ins_offs
263 kvm_emulate_wrtee_orig_ins_offs:
264 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
266 .global kvm_emulate_wrtee_len
267 kvm_emulate_wrtee_len:
268 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
270 .global kvm_emulate_wrteei_0
271 kvm_emulate_wrteei_0:
274 /* Fetch old MSR in r31 */
275 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
277 /* Remove MSR_EE from old MSR */
278 rlwinm r31, r31, 0, ~MSR_EE
280 /* Write new MSR value back */
281 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
285 /* Go back to caller */
286 kvm_emulate_wrteei_0_branch:
288 kvm_emulate_wrteei_0_end:
290 .global kvm_emulate_wrteei_0_branch_offs
291 kvm_emulate_wrteei_0_branch_offs:
292 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
294 .global kvm_emulate_wrteei_0_len
295 kvm_emulate_wrteei_0_len:
296 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
298 .global kvm_emulate_mtsrin
303 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
304 andi. r31, r31, MSR_DR | MSR_IR
305 beq kvm_emulate_mtsrin_reg1
309 kvm_emulate_mtsrin_orig_ins:
311 b kvm_emulate_mtsrin_branch
313 kvm_emulate_mtsrin_reg1:
315 rlwinm r30,r0,6,26,29
317 kvm_emulate_mtsrin_reg2:
318 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
322 /* Go back to caller */
323 kvm_emulate_mtsrin_branch:
325 kvm_emulate_mtsrin_end:
327 .global kvm_emulate_mtsrin_branch_offs
328 kvm_emulate_mtsrin_branch_offs:
329 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
331 .global kvm_emulate_mtsrin_reg1_offs
332 kvm_emulate_mtsrin_reg1_offs:
333 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
335 .global kvm_emulate_mtsrin_reg2_offs
336 kvm_emulate_mtsrin_reg2_offs:
337 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
339 .global kvm_emulate_mtsrin_orig_ins_offs
340 kvm_emulate_mtsrin_orig_ins_offs:
341 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
343 .global kvm_emulate_mtsrin_len
344 kvm_emulate_mtsrin_len:
345 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
347 .global kvm_template_end