1 /* -----------------------------------------------------------------------
2 sysv.S - Copyright (c) 2017 Anthony Green
3 - Copyright (c) 2013 The Written Word, Inc.
4 - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc.
6 X86 Foreign Function Interface
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 ``Software''), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice shall be included
17 in all copies or substantial portions of the Software.
19 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
27 ----------------------------------------------------------------------- */
33 #include <fficonfig.h>
37 #define C2(X, Y) X ## Y
38 #define C1(X, Y) C2(X, Y)
39 #ifdef __USER_LABEL_PREFIX__
40 # define C(X) C1(__USER_LABEL_PREFIX__, X)
46 # define L(X) C1(L, X)
48 # define L(X) C1(.L, X)
52 # define ENDF(X) .type X,@function; .size X, . - X
57 /* Handle win32 fastcall name mangling. */
59 # define ffi_call_i386 "@ffi_call_i386@8"
60 # define ffi_closure_inner "@ffi_closure_inner@8"
62 # define ffi_call_i386 C(ffi_call_i386)
63 # define ffi_closure_inner C(ffi_closure_inner)
66 /* This macro allows the safe creation of jump tables without an
67 actual table. The entry points into the table are all 8 bytes.
68 The use of ORG asserts that we're at the correct location. */
69 /* ??? The clang assembler doesn't handle .org with symbolic expressions. */
70 #if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__))
71 # define E(BASE, X) .balign 8
73 # define E(BASE, X) .balign 8; .org BASE + X * 8
79 FFI_HIDDEN(ffi_call_i386)
81 /* This is declared as
83 void ffi_call_i386(struct call_frame *frame, char *argp)
84 __attribute__((fastcall));
86 Thus the arguments are present in
100 movl (%esp), %eax /* move the return address */
101 movl %ebp, (%ecx) /* store %ebp into local frame */
102 movl %eax, 4(%ecx) /* store retaddr into local frame */
104 /* New stack frame based off ebp. This is a itty bit of unwind
105 trickery in that the CFA *has* changed. There is no easy way
106 to describe it correctly on entry to the function. Fortunately,
107 it doesn't matter too much since at all points we can correctly
108 unwind back to ffi_call. Note that the location to which we
109 moved the return address is (the new) CFA-4, so from the
110 perspective of the unwind info, it hasn't moved. */
113 # cfi_def_cfa(%ebp, 8)
114 # cfi_rel_offset(%ebp, 0)
116 movl %edx, %esp /* set outgoing argument stack */
117 movl 20+R_EAX*4(%ebp), %eax /* set register arguments */
118 movl 20+R_EDX*4(%ebp), %edx
119 movl 20+R_ECX*4(%ebp), %ecx
123 movl 12(%ebp), %ecx /* load return type code */
124 movl %ebx, 8(%ebp) /* preserve %ebx */
126 # cfi_rel_offset(%ebx, 8)
128 andl $X86_RET_TYPE_MASK, %ecx
130 call C(__x86.get_pc_thunk.bx)
132 leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx
134 leal L(store_table)(,%ecx, 8), %ebx
136 movl 16(%ebp), %ecx /* load result address */
137 _CET_NOTRACK jmp *%ebx
141 E(L(store_table), X86_RET_FLOAT)
144 E(L(store_table), X86_RET_DOUBLE)
147 E(L(store_table), X86_RET_LDOUBLE)
150 E(L(store_table), X86_RET_SINT8)
154 E(L(store_table), X86_RET_SINT16)
158 E(L(store_table), X86_RET_UINT8)
162 E(L(store_table), X86_RET_UINT16)
166 E(L(store_table), X86_RET_INT64)
169 E(L(store_table), X86_RET_INT32)
172 E(L(store_table), X86_RET_VOID)
179 # cfi_def_cfa(%esp, 4)
186 E(L(store_table), X86_RET_STRUCTPOP)
188 E(L(store_table), X86_RET_STRUCTARG)
190 E(L(store_table), X86_RET_STRUCT_1B)
193 E(L(store_table), X86_RET_STRUCT_2B)
197 /* Fill out the table so that bad values are predictable. */
198 E(L(store_table), X86_RET_UNUSED14)
200 E(L(store_table), X86_RET_UNUSED15)
207 /* The inner helper is declared as
209 void ffi_closure_inner(struct closure_frame *frame, char *argp)
210 __attribute_((fastcall))
212 Thus the arguments are placed in
218 /* Macros to help setting up the closure_data structure. */
221 # define closure_FS (40 + 4)
222 # define closure_CF 0
224 # define closure_FS (8 + 40 + 12)
225 # define closure_CF 8
228 #define FFI_CLOSURE_SAVE_REGS \
229 movl %eax, closure_CF+16+R_EAX*4(%esp); \
230 movl %edx, closure_CF+16+R_EDX*4(%esp); \
231 movl %ecx, closure_CF+16+R_ECX*4(%esp)
233 #define FFI_CLOSURE_COPY_TRAMP_DATA \
234 movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \
235 movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \
236 movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \
237 movl %edx, closure_CF+28(%esp); \
238 movl %ecx, closure_CF+32(%esp); \
239 movl %eax, closure_CF+36(%esp)
242 # define FFI_CLOSURE_PREP_CALL \
243 movl %esp, %ecx; /* load closure_data */ \
244 leal closure_FS+4(%esp), %edx; /* load incoming stack */
246 # define FFI_CLOSURE_PREP_CALL \
247 leal closure_CF(%esp), %ecx; /* load closure_data */ \
248 leal closure_FS+4(%esp), %edx; /* load incoming stack */ \
253 #define FFI_CLOSURE_CALL_INNER(UWN) \
254 call ffi_closure_inner
256 #define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
257 andl $X86_RET_TYPE_MASK, %eax; \
258 leal L(C1(load_table,N))(, %eax, 8), %edx; \
259 movl closure_CF(%esp), %eax; /* optimiztic load */ \
260 _CET_NOTRACK jmp *%edx
263 # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
264 # undef FFI_CLOSURE_MASK_AND_JUMP
265 # define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
266 andl $X86_RET_TYPE_MASK, %eax; \
267 call C(__x86.get_pc_thunk.dx); \
269 leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \
270 movl closure_CF(%esp), %eax; /* optimiztic load */ \
271 _CET_NOTRACK jmp *%edx
273 # define FFI_CLOSURE_CALL_INNER_SAVE_EBX
274 # undef FFI_CLOSURE_CALL_INNER
275 # define FFI_CLOSURE_CALL_INNER(UWN) \
276 movl %ebx, 40(%esp); /* save ebx */ \
278 /* cfi_rel_offset(%ebx, 40); */ \
279 call C(__x86.get_pc_thunk.bx); /* load got register */ \
280 addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \
281 call ffi_closure_inner@PLT
282 # undef FFI_CLOSURE_MASK_AND_JUMP
283 # define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \
284 andl $X86_RET_TYPE_MASK, %eax; \
285 leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \
286 movl 40(%esp), %ebx; /* restore ebx */ \
288 /* cfi_restore(%ebx); */ \
289 movl closure_CF(%esp), %eax; /* optimiztic load */ \
290 _CET_NOTRACK jmp *%edx
291 # endif /* DARWIN || HIDDEN */
295 .globl C(ffi_go_closure_EAX)
296 FFI_HIDDEN(C(ffi_go_closure_EAX))
297 C(ffi_go_closure_EAX):
301 subl $closure_FS, %esp
303 # cfi_def_cfa_offset(closure_FS + 4)
304 FFI_CLOSURE_SAVE_REGS
305 movl 4(%eax), %edx /* copy cif */
306 movl 8(%eax), %ecx /* copy fun */
307 movl %edx, closure_CF+28(%esp)
308 movl %ecx, closure_CF+32(%esp)
309 movl %eax, closure_CF+36(%esp) /* closure is user_data */
310 jmp L(do_closure_i386)
313 ENDF(C(ffi_go_closure_EAX))
316 .globl C(ffi_go_closure_ECX)
317 FFI_HIDDEN(C(ffi_go_closure_ECX))
318 C(ffi_go_closure_ECX):
322 subl $closure_FS, %esp
324 # cfi_def_cfa_offset(closure_FS + 4)
325 FFI_CLOSURE_SAVE_REGS
326 movl 4(%ecx), %edx /* copy cif */
327 movl 8(%ecx), %eax /* copy fun */
328 movl %edx, closure_CF+28(%esp)
329 movl %eax, closure_CF+32(%esp)
330 movl %ecx, closure_CF+36(%esp) /* closure is user_data */
331 jmp L(do_closure_i386)
334 ENDF(C(ffi_go_closure_ECX))
336 /* The closure entry points are reached from the ffi_closure trampoline.
337 On entry, %eax contains the address of the ffi_closure. */
340 .globl C(ffi_closure_i386)
341 FFI_HIDDEN(C(ffi_closure_i386))
347 subl $closure_FS, %esp
349 # cfi_def_cfa_offset(closure_FS + 4)
351 FFI_CLOSURE_SAVE_REGS
352 FFI_CLOSURE_COPY_TRAMP_DATA
354 /* Entry point from preceeding Go closures. */
357 FFI_CLOSURE_PREP_CALL
358 FFI_CLOSURE_CALL_INNER(14)
359 FFI_CLOSURE_MASK_AND_JUMP(2, 15)
363 E(L(load_table2), X86_RET_FLOAT)
364 flds closure_CF(%esp)
366 E(L(load_table2), X86_RET_DOUBLE)
367 fldl closure_CF(%esp)
369 E(L(load_table2), X86_RET_LDOUBLE)
370 fldt closure_CF(%esp)
372 E(L(load_table2), X86_RET_SINT8)
375 E(L(load_table2), X86_RET_SINT16)
378 E(L(load_table2), X86_RET_UINT8)
381 E(L(load_table2), X86_RET_UINT16)
384 E(L(load_table2), X86_RET_INT64)
385 movl closure_CF+4(%esp), %edx
387 E(L(load_table2), X86_RET_INT32)
390 E(L(load_table2), X86_RET_VOID)
392 addl $closure_FS, %esp
394 # cfi_adjust_cfa_offset(-closure_FS)
397 # cfi_adjust_cfa_offset(closure_FS)
398 E(L(load_table2), X86_RET_STRUCTPOP)
399 addl $closure_FS, %esp
401 # cfi_adjust_cfa_offset(-closure_FS)
404 # cfi_adjust_cfa_offset(closure_FS)
405 E(L(load_table2), X86_RET_STRUCTARG)
407 E(L(load_table2), X86_RET_STRUCT_1B)
410 E(L(load_table2), X86_RET_STRUCT_2B)
414 /* Fill out the table so that bad values are predictable. */
415 E(L(load_table2), X86_RET_UNUSED14)
417 E(L(load_table2), X86_RET_UNUSED15)
422 ENDF(C(ffi_closure_i386))
425 .globl C(ffi_go_closure_STDCALL)
426 FFI_HIDDEN(C(ffi_go_closure_STDCALL))
427 C(ffi_go_closure_STDCALL):
431 subl $closure_FS, %esp
433 # cfi_def_cfa_offset(closure_FS + 4)
434 FFI_CLOSURE_SAVE_REGS
435 movl 4(%ecx), %edx /* copy cif */
436 movl 8(%ecx), %eax /* copy fun */
437 movl %edx, closure_CF+28(%esp)
438 movl %eax, closure_CF+32(%esp)
439 movl %ecx, closure_CF+36(%esp) /* closure is user_data */
440 jmp L(do_closure_STDCALL)
443 ENDF(C(ffi_go_closure_STDCALL))
445 /* For REGISTER, we have no available parameter registers, and so we
446 enter here having pushed the closure onto the stack. */
449 .globl C(ffi_closure_REGISTER)
450 FFI_HIDDEN(C(ffi_closure_REGISTER))
451 C(ffi_closure_REGISTER):
454 # cfi_def_cfa(%esp, 8)
455 # cfi_offset(%eip, -8)
457 subl $closure_FS-4, %esp
459 # cfi_def_cfa_offset(closure_FS + 4)
460 FFI_CLOSURE_SAVE_REGS
461 movl closure_FS-4(%esp), %ecx /* load retaddr */
462 movl closure_FS(%esp), %eax /* load closure */
463 movl %ecx, closure_FS(%esp) /* move retaddr */
464 jmp L(do_closure_REGISTER)
467 ENDF(C(ffi_closure_REGISTER))
469 /* For STDCALL (and others), we need to pop N bytes of arguments off
470 the stack following the closure. The amount needing to be popped
471 is returned to us from ffi_closure_inner. */
474 .globl C(ffi_closure_STDCALL)
475 FFI_HIDDEN(C(ffi_closure_STDCALL))
476 C(ffi_closure_STDCALL):
480 subl $closure_FS, %esp
482 # cfi_def_cfa_offset(closure_FS + 4)
484 FFI_CLOSURE_SAVE_REGS
486 /* Entry point from ffi_closure_REGISTER. */
487 L(do_closure_REGISTER):
489 FFI_CLOSURE_COPY_TRAMP_DATA
491 /* Entry point from preceeding Go closure. */
492 L(do_closure_STDCALL):
494 FFI_CLOSURE_PREP_CALL
495 FFI_CLOSURE_CALL_INNER(29)
498 shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */
499 leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */
500 movl closure_FS(%esp), %edx /* move return address */
503 /* From this point on, the value of %esp upon return is %ecx+4,
504 and we've copied the return address to %ecx to make return easy.
505 There's no point in representing this in the unwind info, as
506 there is always a window between the mov and the ret which
507 will be wrong from one point of view or another. */
509 FFI_CLOSURE_MASK_AND_JUMP(3, 30)
513 E(L(load_table3), X86_RET_FLOAT)
514 flds closure_CF(%esp)
517 E(L(load_table3), X86_RET_DOUBLE)
518 fldl closure_CF(%esp)
521 E(L(load_table3), X86_RET_LDOUBLE)
522 fldt closure_CF(%esp)
525 E(L(load_table3), X86_RET_SINT8)
529 E(L(load_table3), X86_RET_SINT16)
533 E(L(load_table3), X86_RET_UINT8)
537 E(L(load_table3), X86_RET_UINT16)
541 E(L(load_table3), X86_RET_INT64)
542 movl closure_CF+4(%esp), %edx
545 E(L(load_table3), X86_RET_INT32)
548 E(L(load_table3), X86_RET_VOID)
551 E(L(load_table3), X86_RET_STRUCTPOP)
554 E(L(load_table3), X86_RET_STRUCTARG)
557 E(L(load_table3), X86_RET_STRUCT_1B)
561 E(L(load_table3), X86_RET_STRUCT_2B)
566 /* Fill out the table so that bad values are predictable. */
567 E(L(load_table3), X86_RET_UNUSED14)
569 E(L(load_table3), X86_RET_UNUSED15)
574 ENDF(C(ffi_closure_STDCALL))
576 #if defined(FFI_EXEC_STATIC_TRAMP)
578 .globl C(ffi_closure_i386_alt)
579 FFI_HIDDEN(C(ffi_closure_i386_alt))
580 C(ffi_closure_i386_alt):
581 /* See the comments above trampoline_code_table. */
583 movl 4(%esp), %eax /* Load closure in eax */
584 add $8, %esp /* Restore the stack */
585 jmp C(ffi_closure_i386)
586 ENDF(C(ffi_closure_i386_alt))
589 .globl C(ffi_closure_REGISTER_alt)
590 FFI_HIDDEN(C(ffi_closure_REGISTER_alt))
591 C(ffi_closure_REGISTER_alt):
592 /* See the comments above trampoline_code_table. */
594 movl (%esp), %eax /* Restore eax */
595 add $4, %esp /* Leave closure on stack */
596 jmp C(ffi_closure_REGISTER)
597 ENDF(C(ffi_closure_REGISTER_alt))
600 .globl C(ffi_closure_STDCALL_alt)
601 FFI_HIDDEN(C(ffi_closure_STDCALL_alt))
602 C(ffi_closure_STDCALL_alt):
603 /* See the comments above trampoline_code_table. */
605 movl 4(%esp), %eax /* Load closure in eax */
606 add $8, %esp /* Restore the stack */
607 jmp C(ffi_closure_STDCALL)
608 ENDF(C(ffi_closure_STDCALL_alt))
611 * Below is the definition of the trampoline code table. Each element in
612 * the code table is a trampoline.
614 * Because we jump to the trampoline, we place a _CET_ENDBR at the
615 * beginning of the trampoline to mark it as a valid branch target. This is
616 * part of the the Intel CET (Control Flow Enforcement Technology).
619 * The trampoline uses register eax. It saves the original value of eax on
622 * The trampoline has two parameters - target code to jump to and data for
623 * the target code. The trampoline extracts the parameters from its parameter
624 * block (see tramp_table_map()). The trampoline saves the data address on
625 * the stack. Finally, it jumps to the target code.
627 * The target code can choose to:
629 * - restore the value of eax
630 * - load the data address in a register
631 * - restore the stack pointer to what it was when the trampoline was invoked.
634 #define X86_DATA_OFFSET 4081
635 #define X86_CODE_OFFSET 4070
637 #define X86_DATA_OFFSET 4085
638 #define X86_CODE_OFFSET 4074
641 .align X86_TRAMP_MAP_SIZE
642 .globl C(trampoline_code_table)
643 FFI_HIDDEN(C(trampoline_code_table))
644 C(trampoline_code_table):
645 .rept X86_TRAMP_MAP_SIZE / X86_TRAMP_SIZE
648 movl %eax, (%esp) /* Save %eax on stack */
649 call 1f /* Get next PC into %eax */
650 movl X86_DATA_OFFSET(%eax), %eax /* Copy data into %eax */
651 movl %eax, 4(%esp) /* Save data on stack */
652 call 1f /* Get next PC into %eax */
653 movl X86_CODE_OFFSET(%eax), %eax /* Copy code into %eax */
654 jmp *%eax /* Jump to code */
660 ENDF(C(trampoline_code_table))
661 .align X86_TRAMP_MAP_SIZE
662 #endif /* FFI_EXEC_STATIC_TRAMP */
666 #define raw_closure_S_FS (16+16+12)
669 .globl C(ffi_closure_raw_SYSV)
670 FFI_HIDDEN(C(ffi_closure_raw_SYSV))
671 C(ffi_closure_raw_SYSV):
675 subl $raw_closure_S_FS, %esp
677 # cfi_def_cfa_offset(raw_closure_S_FS + 4)
678 movl %ebx, raw_closure_S_FS-4(%esp)
680 # cfi_rel_offset(%ebx, raw_closure_S_FS-4)
682 movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
684 leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */
686 leal 16(%esp), %edx /* load &res */
688 movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
690 call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
692 movl 20(%ebx), %eax /* load cif->flags */
693 andl $X86_RET_TYPE_MASK, %eax
695 call C(__x86.get_pc_thunk.bx)
697 leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx
699 leal L(load_table4)(,%eax, 8), %ecx
701 movl raw_closure_S_FS-4(%esp), %ebx
704 movl 16(%esp), %eax /* Optimistic load */
709 E(L(load_table4), X86_RET_FLOAT)
712 E(L(load_table4), X86_RET_DOUBLE)
715 E(L(load_table4), X86_RET_LDOUBLE)
718 E(L(load_table4), X86_RET_SINT8)
721 E(L(load_table4), X86_RET_SINT16)
724 E(L(load_table4), X86_RET_UINT8)
727 E(L(load_table4), X86_RET_UINT16)
730 E(L(load_table4), X86_RET_INT64)
731 movl 16+4(%esp), %edx
733 E(L(load_table4), X86_RET_INT32)
736 E(L(load_table4), X86_RET_VOID)
738 addl $raw_closure_S_FS, %esp
740 # cfi_adjust_cfa_offset(-raw_closure_S_FS)
743 # cfi_adjust_cfa_offset(raw_closure_S_FS)
744 E(L(load_table4), X86_RET_STRUCTPOP)
745 addl $raw_closure_S_FS, %esp
747 # cfi_adjust_cfa_offset(-raw_closure_S_FS)
750 # cfi_adjust_cfa_offset(raw_closure_S_FS)
751 E(L(load_table4), X86_RET_STRUCTARG)
753 E(L(load_table4), X86_RET_STRUCT_1B)
756 E(L(load_table4), X86_RET_STRUCT_2B)
760 /* Fill out the table so that bad values are predictable. */
761 E(L(load_table4), X86_RET_UNUSED14)
763 E(L(load_table4), X86_RET_UNUSED15)
768 ENDF(C(ffi_closure_raw_SYSV))
770 #define raw_closure_T_FS (16+16+8)
773 .globl C(ffi_closure_raw_THISCALL)
774 FFI_HIDDEN(C(ffi_closure_raw_THISCALL))
775 C(ffi_closure_raw_THISCALL):
779 /* Rearrange the stack such that %ecx is the first argument.
780 This means moving the return address. */
783 # cfi_def_cfa_offset(0)
784 # cfi_register(%eip, %edx)
787 # cfi_adjust_cfa_offset(4)
790 # cfi_adjust_cfa_offset(4)
791 # cfi_rel_offset(%eip, 0)
792 subl $raw_closure_T_FS, %esp
794 # cfi_adjust_cfa_offset(raw_closure_T_FS)
795 movl %ebx, raw_closure_T_FS-4(%esp)
797 # cfi_rel_offset(%ebx, raw_closure_T_FS-4)
799 movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
801 leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */
803 leal 16(%esp), %edx /* load &res */
805 movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */
807 call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */
809 movl 20(%ebx), %eax /* load cif->flags */
810 andl $X86_RET_TYPE_MASK, %eax
812 call C(__x86.get_pc_thunk.bx)
814 leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx
816 leal L(load_table5)(,%eax, 8), %ecx
818 movl raw_closure_T_FS-4(%esp), %ebx
821 movl 16(%esp), %eax /* Optimistic load */
826 E(L(load_table5), X86_RET_FLOAT)
829 E(L(load_table5), X86_RET_DOUBLE)
832 E(L(load_table5), X86_RET_LDOUBLE)
835 E(L(load_table5), X86_RET_SINT8)
838 E(L(load_table5), X86_RET_SINT16)
841 E(L(load_table5), X86_RET_UINT8)
844 E(L(load_table5), X86_RET_UINT16)
847 E(L(load_table5), X86_RET_INT64)
848 movl 16+4(%esp), %edx
850 E(L(load_table5), X86_RET_INT32)
853 E(L(load_table5), X86_RET_VOID)
855 addl $raw_closure_T_FS, %esp
857 # cfi_adjust_cfa_offset(-raw_closure_T_FS)
858 /* Remove the extra %ecx argument we pushed. */
861 # cfi_adjust_cfa_offset(raw_closure_T_FS)
862 E(L(load_table5), X86_RET_STRUCTPOP)
863 addl $raw_closure_T_FS, %esp
865 # cfi_adjust_cfa_offset(-raw_closure_T_FS)
868 # cfi_adjust_cfa_offset(raw_closure_T_FS)
869 E(L(load_table5), X86_RET_STRUCTARG)
871 E(L(load_table5), X86_RET_STRUCT_1B)
874 E(L(load_table5), X86_RET_STRUCT_2B)
878 /* Fill out the table so that bad values are predictable. */
879 E(L(load_table5), X86_RET_UNUSED14)
881 E(L(load_table5), X86_RET_UNUSED15)
886 ENDF(C(ffi_closure_raw_THISCALL))
888 #endif /* !FFI_NO_RAW_API */
892 .section __TEXT,__text,coalesced,pure_instructions; \
893 .weak_definition X; \
895 #elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__))
897 .section .text.X,"axG",@progbits,X,comdat; \
905 COMDAT(C(__x86.get_pc_thunk.bx))
906 C(__x86.get_pc_thunk.bx):
909 ENDF(C(__x86.get_pc_thunk.bx))
910 # if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
911 COMDAT(C(__x86.get_pc_thunk.dx))
912 C(__x86.get_pc_thunk.dx):
915 ENDF(C(__x86.get_pc_thunk.dx))
916 #endif /* DARWIN || HIDDEN */
919 /* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
922 .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
924 #elif defined(X86_WIN32)
925 .section .eh_frame,"r"
926 #elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
927 .section .eh_frame,EH_FRAME_FLAGS,@unwind
929 .section .eh_frame,EH_FRAME_FLAGS,@progbits
932 #ifdef HAVE_AS_X86_PCREL
933 # define PCREL(X) X - .
935 # define PCREL(X) X@rel
938 /* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
939 #define ADV(N, P) .byte 2, L(N)-L(P)
943 .set L(set0),L(ECIE)-L(SCIE)
944 .long L(set0) /* CIE Length */
946 .long 0 /* CIE Identifier Tag */
947 .byte 1 /* CIE Version */
948 .ascii "zR\0" /* CIE Augmentation */
949 .byte 1 /* CIE Code Alignment Factor */
950 .byte 0x7c /* CIE Data Alignment Factor */
951 .byte 0x8 /* CIE RA Column */
952 .byte 1 /* Augmentation size */
953 .byte 0x1b /* FDE Encoding (pcrel sdata4) */
954 .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */
955 .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */
959 .set L(set1),L(EFDE1)-L(SFDE1)
960 .long L(set1) /* FDE Length */
962 .long L(SFDE1)-L(CIE) /* FDE CIE offset */
963 .long PCREL(L(UW0)) /* Initial location */
964 .long L(UW5)-L(UW0) /* Address range */
965 .byte 0 /* Augmentation size */
967 .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */
968 .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */
970 .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */
972 .byte 0xa /* DW_CFA_remember_state */
973 .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */
974 .byte 0xc0+3 /* DW_CFA_restore, %ebx */
975 .byte 0xc0+5 /* DW_CFA_restore, %ebp */
977 .byte 0xb /* DW_CFA_restore_state */
981 .set L(set2),L(EFDE2)-L(SFDE2)
982 .long L(set2) /* FDE Length */
984 .long L(SFDE2)-L(CIE) /* FDE CIE offset */
985 .long PCREL(L(UW6)) /* Initial location */
986 .long L(UW8)-L(UW6) /* Address range */
987 .byte 0 /* Augmentation size */
989 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
993 .set L(set3),L(EFDE3)-L(SFDE3)
994 .long L(set3) /* FDE Length */
996 .long L(SFDE3)-L(CIE) /* FDE CIE offset */
997 .long PCREL(L(UW9)) /* Initial location */
998 .long L(UW11)-L(UW9) /* Address range */
999 .byte 0 /* Augmentation size */
1001 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1005 .set L(set4),L(EFDE4)-L(SFDE4)
1006 .long L(set4) /* FDE Length */
1008 .long L(SFDE4)-L(CIE) /* FDE CIE offset */
1009 .long PCREL(L(UW12)) /* Initial location */
1010 .long L(UW20)-L(UW12) /* Address range */
1011 .byte 0 /* Augmentation size */
1013 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1014 #ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
1016 .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
1018 .byte 0xc0+3 /* DW_CFA_restore %ebx */
1023 .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
1025 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1027 .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
1029 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1033 .set L(set5),L(EFDE5)-L(SFDE5)
1034 .long L(set5) /* FDE Length */
1036 .long L(SFDE5)-L(CIE) /* FDE CIE offset */
1037 .long PCREL(L(UW21)) /* Initial location */
1038 .long L(UW23)-L(UW21) /* Address range */
1039 .byte 0 /* Augmentation size */
1041 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1045 .set L(set6),L(EFDE6)-L(SFDE6)
1046 .long L(set6) /* FDE Length */
1048 .long L(SFDE6)-L(CIE) /* FDE CIE offset */
1049 .long PCREL(L(UW24)) /* Initial location */
1050 .long L(UW26)-L(UW24) /* Address range */
1051 .byte 0 /* Augmentation size */
1052 .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
1053 .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */
1055 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1059 .set L(set7),L(EFDE7)-L(SFDE7)
1060 .long L(set7) /* FDE Length */
1062 .long L(SFDE7)-L(CIE) /* FDE CIE offset */
1063 .long PCREL(L(UW27)) /* Initial location */
1064 .long L(UW31)-L(UW27) /* Address range */
1065 .byte 0 /* Augmentation size */
1067 .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
1068 #ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
1070 .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
1072 .byte 0xc0+3 /* DW_CFA_restore %ebx */
1078 .set L(set8),L(EFDE8)-L(SFDE8)
1079 .long L(set8) /* FDE Length */
1081 .long L(SFDE8)-L(CIE) /* FDE CIE offset */
1082 .long PCREL(L(UW32)) /* Initial location */
1083 .long L(UW40)-L(UW32) /* Address range */
1084 .byte 0 /* Augmentation size */
1086 .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
1088 .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */
1090 .byte 0xc0+3 /* DW_CFA_restore %ebx */
1092 .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
1094 .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
1096 .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
1098 .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
1102 .set L(set9),L(EFDE9)-L(SFDE9)
1103 .long L(set9) /* FDE Length */
1105 .long L(SFDE9)-L(CIE) /* FDE CIE offset */
1106 .long PCREL(L(UW41)) /* Initial location */
1107 .long L(UW52)-L(UW41) /* Address range */
1108 .byte 0 /* Augmentation size */
1110 .byte 0xe, 0 /* DW_CFA_def_cfa_offset */
1111 .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */
1113 .byte 0xe, 4 /* DW_CFA_def_cfa_offset */
1115 .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
1116 .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */
1118 .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
1120 .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */
1122 .byte 0xc0+3 /* DW_CFA_restore %ebx */
1124 .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
1126 .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
1128 .byte 0xe, 8 /* DW_CFA_def_cfa_offset */
1130 .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
1133 #endif /* !FFI_NO_RAW_API */
1145 .subsections_via_symbols
1146 .section __LD,__compact_unwind,regular,debug
1148 /* compact unwind for ffi_call_i386 */
1149 .long C(ffi_call_i386)
1150 .set L1,L(UW5)-L(UW0)
1152 .long 0x04000000 /* use dwarf unwind info */
1156 /* compact unwind for ffi_go_closure_EAX */
1157 .long C(ffi_go_closure_EAX)
1158 .set L2,L(UW8)-L(UW6)
1160 .long 0x04000000 /* use dwarf unwind info */
1164 /* compact unwind for ffi_go_closure_ECX */
1165 .long C(ffi_go_closure_ECX)
1166 .set L3,L(UW11)-L(UW9)
1168 .long 0x04000000 /* use dwarf unwind info */
1172 /* compact unwind for ffi_closure_i386 */
1173 .long C(ffi_closure_i386)
1174 .set L4,L(UW20)-L(UW12)
1176 .long 0x04000000 /* use dwarf unwind info */
1180 /* compact unwind for ffi_go_closure_STDCALL */
1181 .long C(ffi_go_closure_STDCALL)
1182 .set L5,L(UW23)-L(UW21)
1184 .long 0x04000000 /* use dwarf unwind info */
1188 /* compact unwind for ffi_closure_REGISTER */
1189 .long C(ffi_closure_REGISTER)
1190 .set L6,L(UW26)-L(UW24)
1192 .long 0x04000000 /* use dwarf unwind info */
1196 /* compact unwind for ffi_closure_STDCALL */
1197 .long C(ffi_closure_STDCALL)
1198 .set L7,L(UW31)-L(UW27)
1200 .long 0x04000000 /* use dwarf unwind info */
1204 /* compact unwind for ffi_closure_raw_SYSV */
1205 .long C(ffi_closure_raw_SYSV)
1206 .set L8,L(UW40)-L(UW32)
1208 .long 0x04000000 /* use dwarf unwind info */
1212 /* compact unwind for ffi_closure_raw_THISCALL */
1213 .long C(ffi_closure_raw_THISCALL)
1214 .set L9,L(UW52)-L(UW41)
1216 .long 0x04000000 /* use dwarf unwind info */
1219 #endif /* __APPLE__ */
1221 #endif /* ifndef _MSC_VER */
1223 #endif /* ifdef __i386__ */
1225 #if defined __ELF__ && defined __linux__
1226 .section .note.GNU-stack,"",@progbits