2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21 #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22 #define UNBOLT_SLB_ENTRY(num) \
23 ld r9, SHADOW_SLB_ESID(num)(r12); \
24 /* Invalid? Skip. */; \
25 rldicl. r0, r9, 37, 63; \
26 beq slb_entry_skip_ ## num; \
27 xoris r9, r9, SLB_ESID_V@h; \
28 std r9, SHADOW_SLB_ESID(num)(r12); \
29 slb_entry_skip_ ## num:
31 #define REBOLT_SLB_ENTRY(num) \
32 ld r10, SHADOW_SLB_ESID(num)(r11); \
34 beq slb_exit_skip_ ## num; \
35 oris r10, r10, SLB_ESID_V@h; \
36 ld r9, SHADOW_SLB_VSID(num)(r11); \
38 std r10, SHADOW_SLB_ESID(num)(r11); \
39 slb_exit_skip_ ## num:
41 /******************************************************************************
45 *****************************************************************************/
47 .macro LOAD_GUEST_SEGMENTS
56 * all other volatile GPRS = free except R4, R6
57 * SVCPU[CR] = guest CR
58 * SVCPU[XER] = guest XER
59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
63 /* Remove LPAR shadow entries */
65 #if SLB_NUM_BOLTED == 3
67 ld r12, PACA_SLBSHADOWPTR(r13)
69 /* Remove bolted entries */
75 #error unknown number of bolted entries
84 /* Fill SLB with our shadow */
86 lbz r12, SVCPU_SLB_MAX(r3)
88 addi r12, r12, SVCPU_SLB
91 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
99 rldicl. r0, r10, 37, 63
100 beq slb_loop_enter_skip
114 /******************************************************************************
118 *****************************************************************************/
120 .macro LOAD_HOST_SEGMENTS
122 /* Register usage at this point:
126 * R12 = exit handler id
127 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
129 * SVCPU[CR] = guest CR
130 * SVCPU[XER] = guest XER
131 * SVCPU[CTR] = guest CTR
132 * SVCPU[LR] = guest LR
136 /* Restore bolted entries from the shadow and fix it along the way */
138 /* We don't store anything in entry 0, so we don't need to take care of it */
142 #if SLB_NUM_BOLTED == 3
144 ld r11, PACA_SLBSHADOWPTR(r13)
151 #error unknown number of bolted entries