1 //===-------------------- UnwindRegistersRestore.S ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
15 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
17 # void libunwind::Registers_x86::jumpto()
21 # +-----------------------+
22 # + thread_state pointer +
23 # +-----------------------+
25 # +-----------------------+ <-- SP
28 # set up eax and ret on new stack location
29 movl 28(%eax), %edx # edx holds new stack pointer
36 # we now have ret and eax pushed onto where new stack will be
37 # restore all registers
47 pop %eax # eax was already pushed on new stack
48 ret # eip was already pushed on new stack
57 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
59 # void libunwind::Registers_x86_64::jumpto()
61 # On entry, thread_state pointer is in rdi
63 movq 56(%rdi), %rax # rax holds new stack pointer
66 movq 32(%rdi), %rbx # store new rdi on new stack
68 movq 128(%rdi), %rbx # store new rip on new stack
70 # restore all registers
91 movq 56(%rdi), %rsp # cut back rsp to new location
92 pop %rdi # rdi was saved here earlier
93 ret # rip was saved here
98 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
100 ; void libunwind::Registers_ppc::jumpto()
103 ; thread_state pointer is in r3
106 ; restore integral registerrs
140 ; restore float registers
174 ; restore vector registers if any are in use
175 lwz r5,156(r3) ; test VRsave
180 rlwinm r4,r4,0,0,27 ; mask low 4-bits
181 ; r4 is now a 16-byte aligned pointer into the red zone
182 ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
185 #define LOAD_VECTOR_UNALIGNEDl(_index) \
186 andis. r0,r5,(1<<(15-_index)) @\
187 beq Ldone ## _index @\
188 lwz r0, 424+_index*16(r3) @\
190 lwz r0, 424+_index*16+4(r3) @\
192 lwz r0, 424+_index*16+8(r3) @\
194 lwz r0, 424+_index*16+12(r3)@\
196 lvx v ## _index,0,r4 @\
199 #define LOAD_VECTOR_UNALIGNEDh(_index) \
200 andi. r0,r5,(1<<(31-_index)) @\
201 beq Ldone ## _index @\
202 lwz r0, 424+_index*16(r3) @\
204 lwz r0, 424+_index*16+4(r3) @\
206 lwz r0, 424+_index*16+8(r3) @\
208 lwz r0, 424+_index*16+12(r3)@\
210 lvx v ## _index,0,r4 @\
214 LOAD_VECTOR_UNALIGNEDl(0)
215 LOAD_VECTOR_UNALIGNEDl(1)
216 LOAD_VECTOR_UNALIGNEDl(2)
217 LOAD_VECTOR_UNALIGNEDl(3)
218 LOAD_VECTOR_UNALIGNEDl(4)
219 LOAD_VECTOR_UNALIGNEDl(5)
220 LOAD_VECTOR_UNALIGNEDl(6)
221 LOAD_VECTOR_UNALIGNEDl(7)
222 LOAD_VECTOR_UNALIGNEDl(8)
223 LOAD_VECTOR_UNALIGNEDl(9)
224 LOAD_VECTOR_UNALIGNEDl(10)
225 LOAD_VECTOR_UNALIGNEDl(11)
226 LOAD_VECTOR_UNALIGNEDl(12)
227 LOAD_VECTOR_UNALIGNEDl(13)
228 LOAD_VECTOR_UNALIGNEDl(14)
229 LOAD_VECTOR_UNALIGNEDl(15)
230 LOAD_VECTOR_UNALIGNEDh(16)
231 LOAD_VECTOR_UNALIGNEDh(17)
232 LOAD_VECTOR_UNALIGNEDh(18)
233 LOAD_VECTOR_UNALIGNEDh(19)
234 LOAD_VECTOR_UNALIGNEDh(20)
235 LOAD_VECTOR_UNALIGNEDh(21)
236 LOAD_VECTOR_UNALIGNEDh(22)
237 LOAD_VECTOR_UNALIGNEDh(23)
238 LOAD_VECTOR_UNALIGNEDh(24)
239 LOAD_VECTOR_UNALIGNEDh(25)
240 LOAD_VECTOR_UNALIGNEDh(26)
241 LOAD_VECTOR_UNALIGNEDh(27)
242 LOAD_VECTOR_UNALIGNEDh(28)
243 LOAD_VECTOR_UNALIGNEDh(29)
244 LOAD_VECTOR_UNALIGNEDh(30)
245 LOAD_VECTOR_UNALIGNEDh(31)
248 lwz r0, 136(r3) ; __cr
250 lwz r0, 148(r3) ; __ctr
252 lwz r0, 0(r3) ; __ssr0
254 lwz r0, 8(r3) ; do r0 now
255 lwz r5,28(r3) ; do r5 now
256 lwz r4,24(r3) ; do r4 now
257 lwz r1,12(r3) ; do sp now
258 lwz r3,20(r3) ; do r3 last
264 ; void libunwind::Registers_arm64::jumpto()
267 ; thread_state pointer is in x0
270 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
271 ; skip restore of x0,x1 for now
272 ldp x2, x3, [x0, #0x010]
273 ldp x4, x5, [x0, #0x020]
274 ldp x6, x7, [x0, #0x030]
275 ldp x8, x9, [x0, #0x040]
276 ldp x10,x11, [x0, #0x050]
277 ldp x12,x13, [x0, #0x060]
278 ldp x14,x15, [x0, #0x070]
279 ldp x16,x17, [x0, #0x080]
280 ldp x18,x19, [x0, #0x090]
281 ldp x20,x21, [x0, #0x0A0]
282 ldp x22,x23, [x0, #0x0B0]
283 ldp x24,x25, [x0, #0x0C0]
284 ldp x26,x27, [x0, #0x0D0]
285 ldp x28,fp, [x0, #0x0E0]
286 ldr lr, [x0, #0x100] ; restore pc into lr
288 mov sp,x1 ; restore sp
290 ldp d0, d1, [x0, #0x110]
291 ldp d2, d3, [x0, #0x120]
292 ldp d4, d5, [x0, #0x130]
293 ldp d6, d7, [x0, #0x140]
294 ldp d8, d9, [x0, #0x150]
295 ldp d10,d11, [x0, #0x160]
296 ldp d12,d13, [x0, #0x170]
297 ldp d14,d15, [x0, #0x180]
298 ldp d16,d17, [x0, #0x190]
299 ldp d18,d19, [x0, #0x1A0]
300 ldp d20,d21, [x0, #0x1B0]
301 ldp d22,d23, [x0, #0x1C0]
302 ldp d24,d25, [x0, #0x1D0]
303 ldp d26,d27, [x0, #0x1E0]
304 ldp d28,d29, [x0, #0x1F0]
305 ldr d30, [x0, #0x200]
306 ldr d31, [x0, #0x208]
308 ldp x0, x1, [x0, #0x000] ; restore x0,x1
314 @ void libunwind::Registers_arm::jumpto()
317 @ thread_state pointer is in r0
320 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm6jumptoEv)
321 @ Use lr as base so that r0 can be restored.
323 @ 32bit thumb-2 restrictions for ldm:
324 @ . the sp (r13) cannot be in the list
325 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
328 ldr lr, [lr, #60] @ restore pc into lr