1 /* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
3 Permission is hereby granted, free of charge, to any person obtaining
4 a copy of this software and associated documentation files (the
5 ``Software''), to deal in the Software without restriction, including
6 without limitation the rights to use, copy, modify, merge, publish,
7 distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so, subject to
9 the following conditions:
11 The above copyright notice and this permission notice shall be
12 included in all copies or substantial portions of the Software.
14 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
15 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18 CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
22 #if defined(__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)
26 #include <fficonfig.h>
28 #include <ffi_common.h>
31 #include <windows.h> /* FlushInstructionCache */
35 /* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE;
36 all further uses in this file will refer to the 128-bit type. */
37 #if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE
38 # if FFI_TYPE_LONGDOUBLE != 4
39 # error FFI_TYPE_LONGDOUBLE out of date
42 # undef FFI_TYPE_LONGDOUBLE
43 # define FFI_TYPE_LONGDOUBLE 4
54 union _d d[2] __attribute__((aligned(16)));
59 struct _v v[N_V_ARG_REG];
60 UINT64 x[N_X_ARG_REG];
63 #if FFI_EXEC_TRAMPOLINE_TABLE
69 #include <mach/vm_param.h>
74 #if defined (__clang__) && defined (__APPLE__)
75 extern void sys_icache_invalidate (void *start, size_t len);
79 ffi_clear_cache (void *start, void *end)
81 #if defined (__clang__) && defined (__APPLE__)
82 sys_icache_invalidate (start, (char *)end - (char *)start);
83 #elif defined (__GNUC__)
84 __builtin___clear_cache (start, end);
85 #elif defined (_WIN32)
86 FlushInstructionCache(GetCurrentProcess(), start, (char*)end - (char*)start);
88 #error "Missing builtin to flush instruction cache"
94 /* A subroutine of is_vfp_type. Given a structure type, return the type code
95 of the first non-structure element. Recurse for structure elements.
96 Return -1 if the structure is in fact empty, i.e. no nested elements. */
99 is_hfa0 (const ffi_type *ty)
101 ffi_type **elements = ty->elements;
104 if (elements != NULL)
105 for (i = 0; elements[i]; ++i)
107 ret = elements[i]->type;
108 if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX)
110 ret = is_hfa0 (elements[i]);
120 /* A subroutine of is_vfp_type. Given a structure type, return true if all
121 of the non-structure elements are the same as CANDIDATE. */
124 is_hfa1 (const ffi_type *ty, int candidate)
126 ffi_type **elements = ty->elements;
129 if (elements != NULL)
130 for (i = 0; elements[i]; ++i)
132 int t = elements[i]->type;
133 if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
135 if (!is_hfa1 (elements[i], candidate))
138 else if (t != candidate)
145 /* Determine if TY may be allocated to the FP registers. This is both an
146 fp scalar type as well as an homogenous floating point aggregate (HFA).
147 That is, a structure consisting of 1 to 4 members of all the same type,
148 where that type is an fp scalar.
150 Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_*
151 constant for the type. */
154 is_vfp_type (const ffi_type *ty)
158 size_t size, ele_count;
160 /* Quickest tests first. */
161 candidate = ty->type;
167 case FFI_TYPE_DOUBLE:
168 case FFI_TYPE_LONGDOUBLE:
171 case FFI_TYPE_COMPLEX:
172 candidate = ty->elements[0]->type;
176 case FFI_TYPE_DOUBLE:
177 case FFI_TYPE_LONGDOUBLE:
182 case FFI_TYPE_STRUCT:
186 /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */
188 if (size < 4 || size > 64)
191 /* Find the type of the first non-structure member. */
192 elements = ty->elements;
193 candidate = elements[0]->type;
194 if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX)
198 candidate = is_hfa0 (elements[i]);
204 /* If the first member is not a floating point type, it's not an HFA.
205 Also quickly re-check the size of the structure. */
209 ele_count = size / sizeof(float);
210 if (size != ele_count * sizeof(float))
213 case FFI_TYPE_DOUBLE:
214 ele_count = size / sizeof(double);
215 if (size != ele_count * sizeof(double))
218 case FFI_TYPE_LONGDOUBLE:
219 ele_count = size / sizeof(long double);
220 if (size != ele_count * sizeof(long double))
229 /* Finally, make sure that all scalar elements are the same type. */
230 for (i = 0; elements[i]; ++i)
232 int t = elements[i]->type;
233 if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX)
235 if (!is_hfa1 (elements[i], candidate))
238 else if (t != candidate)
242 /* All tests succeeded. Encode the result. */
244 return candidate * 4 + (4 - (int)ele_count);
247 /* Representation of the procedure call argument marshalling
250 The terse state variable names match the names used in the AARCH64
255 unsigned ngrn; /* Next general-purpose register number. */
256 unsigned nsrn; /* Next vector register number. */
257 size_t nsaa; /* Next stack offset. */
259 #if defined (__APPLE__)
260 unsigned allocating_variadic;
264 /* Initialize a procedure call argument marshalling state. */
266 arg_init (struct arg_state *state)
271 #if defined (__APPLE__)
272 state->allocating_variadic = 0;
276 /* Allocate an aligned slot on the stack and return a pointer to it. */
278 allocate_to_stack (struct arg_state *state, void *stack,
279 size_t alignment, size_t size)
281 size_t nsaa = state->nsaa;
283 /* Round up the NSAA to the larger of 8 or the natural
284 alignment of the argument's type. */
285 #if defined (__APPLE__)
286 if (state->allocating_variadic && alignment < 8)
293 nsaa = FFI_ALIGN (nsaa, alignment);
294 state->nsaa = nsaa + size;
296 return (char *)stack + nsaa;
300 extend_integer_type (void *source, int type)
305 return *(UINT8 *) source;
307 return *(SINT8 *) source;
308 case FFI_TYPE_UINT16:
309 return *(UINT16 *) source;
310 case FFI_TYPE_SINT16:
311 return *(SINT16 *) source;
312 case FFI_TYPE_UINT32:
313 return *(UINT32 *) source;
315 case FFI_TYPE_SINT32:
316 return *(SINT32 *) source;
317 case FFI_TYPE_UINT64:
318 case FFI_TYPE_SINT64:
319 return *(UINT64 *) source;
321 case FFI_TYPE_POINTER:
322 return *(uintptr_t *) source;
328 #if defined(_MSC_VER)
329 void extend_hfa_type (void *dest, void *src, int h);
332 extend_hfa_type (void *dest, void *src, int h)
334 ssize_t f = h - AARCH64_RET_S4;
341 "0: ldp s16, s17, [%3]\n" /* S4 */
342 " ldp s18, s19, [%3, #8]\n"
344 " ldp s16, s17, [%3]\n" /* S3 */
345 " ldr s18, [%3, #8]\n"
347 " ldp s16, s17, [%3]\n" /* S2 */
350 " ldr s16, [%3]\n" /* S1 */
353 " ldp d16, d17, [%3]\n" /* D4 */
354 " ldp d18, d19, [%3, #16]\n"
356 " ldp d16, d17, [%3]\n" /* D3 */
357 " ldr d18, [%3, #16]\n"
359 " ldp d16, d17, [%3]\n" /* D2 */
362 " ldr d16, [%3]\n" /* D1 */
365 " ldp q16, q17, [%3]\n" /* Q4 */
366 " ldp q18, q19, [%3, #32]\n"
368 " ldp q16, q17, [%3]\n" /* Q3 */
369 " ldr q18, [%3, #32]\n"
371 " ldp q16, q17, [%3]\n" /* Q2 */
374 " ldr q16, [%3]\n" /* Q1 */
376 "4: str q19, [%2, #48]\n"
377 "3: str q18, [%2, #32]\n"
378 "2: str q17, [%2, #16]\n"
381 : "r"(f * 12), "r"(dest), "r"(src)
382 : "memory", "v16", "v17", "v18", "v19");
386 #if defined(_MSC_VER)
387 void* compress_hfa_type (void *dest, void *src, int h);
390 compress_hfa_type (void *dest, void *reg, int h)
402 *(float *)dest = *(float *)reg;
405 asm ("ldp q16, q17, [%1]\n\t"
406 "st2 { v16.s, v17.s }[0], [%0]"
407 : : "r"(dest), "r"(reg) : "memory", "v16", "v17");
410 asm ("ldp q16, q17, [%1]\n\t"
411 "ldr q18, [%1, #32]\n\t"
412 "st3 { v16.s, v17.s, v18.s }[0], [%0]"
413 : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18");
416 asm ("ldp q16, q17, [%1]\n\t"
417 "ldp q18, q19, [%1, #32]\n\t"
418 "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]"
419 : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19");
430 *(double *)dest = *(double *)reg;
433 asm ("ldp q16, q17, [%1]\n\t"
434 "st2 { v16.d, v17.d }[0], [%0]"
435 : : "r"(dest), "r"(reg) : "memory", "v16", "v17");
438 asm ("ldp q16, q17, [%1]\n\t"
439 "ldr q18, [%1, #32]\n\t"
440 "st3 { v16.d, v17.d, v18.d }[0], [%0]"
441 : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18");
444 asm ("ldp q16, q17, [%1]\n\t"
445 "ldp q18, q19, [%1, #32]\n\t"
446 "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]"
447 : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19");
452 return memcpy (dest, reg, 16 * (4 - (h & 3)));
459 /* Either allocate an appropriate register for the argument type, or if
460 none are available, allocate a stack slot and return a pointer
461 to the allocated space. */
464 allocate_int_to_reg_or_stack (struct call_context *context,
465 struct arg_state *state,
466 void *stack, size_t size)
468 if (state->ngrn < N_X_ARG_REG)
469 return &context->x[state->ngrn++];
471 state->ngrn = N_X_ARG_REG;
472 return allocate_to_stack (state, stack, size, size);
475 ffi_status FFI_HIDDEN
476 ffi_prep_cif_machdep (ffi_cif *cif)
478 ffi_type *rtype = cif->rtype;
479 size_t bytes = cif->bytes;
485 flags = AARCH64_RET_VOID;
488 flags = AARCH64_RET_UINT8;
490 case FFI_TYPE_UINT16:
491 flags = AARCH64_RET_UINT16;
493 case FFI_TYPE_UINT32:
494 flags = AARCH64_RET_UINT32;
497 flags = AARCH64_RET_SINT8;
499 case FFI_TYPE_SINT16:
500 flags = AARCH64_RET_SINT16;
503 case FFI_TYPE_SINT32:
504 flags = AARCH64_RET_SINT32;
506 case FFI_TYPE_SINT64:
507 case FFI_TYPE_UINT64:
508 flags = AARCH64_RET_INT64;
510 case FFI_TYPE_POINTER:
511 flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64);
515 case FFI_TYPE_DOUBLE:
516 case FFI_TYPE_LONGDOUBLE:
517 case FFI_TYPE_STRUCT:
518 case FFI_TYPE_COMPLEX:
519 flags = is_vfp_type (rtype);
522 size_t s = rtype->size;
525 flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM;
529 flags = AARCH64_RET_INT128;
531 flags = AARCH64_RET_INT64;
533 flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY;
541 for (i = 0, n = cif->nargs; i < n; i++)
542 if (is_vfp_type (cif->arg_types[i]))
544 flags |= AARCH64_FLAG_ARG_V;
548 /* Round the stack up to a multiple of the stack alignment requirement. */
549 cif->bytes = (unsigned) FFI_ALIGN(bytes, 16);
551 #if defined (__APPLE__)
552 cif->aarch64_nfixedargs = 0;
558 #if defined (__APPLE__)
559 /* Perform Apple-specific cif processing for variadic calls */
560 ffi_status FFI_HIDDEN
561 ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs,
562 unsigned int ntotalargs)
564 ffi_status status = ffi_prep_cif_machdep (cif);
565 cif->aarch64_nfixedargs = nfixedargs;
569 ffi_status FFI_HIDDEN
570 ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs)
572 ffi_status status = ffi_prep_cif_machdep (cif);
573 cif->flags |= AARCH64_FLAG_VARARG;
576 #endif /* __APPLE__ */
578 extern void ffi_call_SYSV (struct call_context *context, void *frame,
579 void (*fn)(void), void *rvalue, int flags,
580 void *closure) FFI_HIDDEN;
582 /* Call a function with the provided arguments and capture the return
585 ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue,
586 void **avalue, void *closure)
588 struct call_context *context;
589 void *stack, *frame, *rvalue;
590 struct arg_state state;
591 size_t stack_bytes, rtype_size, rsize;
592 int i, nargs, flags, isvariadic = 0;
597 rtype_size = rtype->size;
598 stack_bytes = cif->bytes;
600 if (flags & AARCH64_FLAG_VARARG)
603 flags &= ~AARCH64_FLAG_VARARG;
606 /* If the target function returns a structure via hidden pointer,
607 then we cannot allow a null rvalue. Otherwise, mash a null
608 rvalue to void return type. */
610 if (flags & AARCH64_RET_IN_MEM)
612 if (orig_rvalue == NULL)
615 else if (orig_rvalue == NULL)
616 flags &= AARCH64_FLAG_ARG_V;
617 else if (flags & AARCH64_RET_NEED_COPY)
620 /* Allocate consectutive stack for everything we'll need. */
621 context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize);
623 frame = (void*)((uintptr_t)stack + (uintptr_t)stack_bytes);
624 rvalue = (rsize ? (void*)((uintptr_t)frame + 32) : orig_rvalue);
627 for (i = 0, nargs = cif->nargs; i < nargs; i++)
629 ffi_type *ty = cif->arg_types[i];
641 /* If the argument is a basic type the argument is allocated to an
642 appropriate register, or if none are available, to the stack. */
646 case FFI_TYPE_UINT16:
647 case FFI_TYPE_SINT16:
648 case FFI_TYPE_UINT32:
649 case FFI_TYPE_SINT32:
650 case FFI_TYPE_UINT64:
651 case FFI_TYPE_SINT64:
652 case FFI_TYPE_POINTER:
655 ffi_arg ext = extend_integer_type (a, t);
656 if (state.ngrn < N_X_ARG_REG)
657 context->x[state.ngrn++] = ext;
660 void *d = allocate_to_stack (&state, stack, ty->alignment, s);
661 state.ngrn = N_X_ARG_REG;
662 /* Note that the default abi extends each argument
663 to a full 64-bit slot, while the iOS abi allocates
664 only enough space. */
675 case FFI_TYPE_DOUBLE:
676 case FFI_TYPE_LONGDOUBLE:
677 case FFI_TYPE_STRUCT:
678 case FFI_TYPE_COMPLEX:
682 h = is_vfp_type (ty);
685 int elems = 4 - (h & 3);
686 if (cif->abi == FFI_WIN64 && isvariadic)
688 if (state.ngrn + elems <= N_X_ARG_REG)
690 dest = &context->x[state.ngrn];
692 extend_hfa_type(dest, a, h);
695 state.nsrn = N_X_ARG_REG;
696 dest = allocate_to_stack(&state, stack, ty->alignment, s);
700 if (state.nsrn + elems <= N_V_ARG_REG)
702 dest = &context->v[state.nsrn];
704 extend_hfa_type (dest, a, h);
707 state.nsrn = N_V_ARG_REG;
708 dest = allocate_to_stack (&state, stack, ty->alignment, s);
713 /* If the argument is a composite type that is larger than 16
714 bytes, then the argument has been copied to memory, and
715 the argument is replaced by a pointer to the copy. */
717 t = FFI_TYPE_POINTER;
723 size_t n = (s + 7) / 8;
724 if (state.ngrn + n <= N_X_ARG_REG)
726 /* If the argument is a composite type and the size in
727 double-words is not more than the number of available
728 X registers, then the argument is copied into
729 consecutive X registers. */
730 dest = &context->x[state.ngrn];
731 state.ngrn += (unsigned int)n;
735 /* Otherwise, there are insufficient X registers. Further
736 X register allocations are prevented, the NSAA is
737 adjusted and the argument is copied to memory at the
739 state.ngrn = N_X_ARG_REG;
740 dest = allocate_to_stack (&state, stack, ty->alignment, s);
751 #if defined (__APPLE__)
752 if (i + 1 == cif->aarch64_nfixedargs)
754 state.ngrn = N_X_ARG_REG;
755 state.nsrn = N_V_ARG_REG;
756 state.allocating_variadic = 1;
761 ffi_call_SYSV (context, frame, fn, rvalue, flags, closure);
763 if (flags & AARCH64_RET_NEED_COPY)
764 memcpy (orig_rvalue, rvalue, rtype_size);
768 ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
770 ffi_call_int (cif, fn, rvalue, avalue, NULL);
773 #ifdef FFI_GO_CLOSURES
775 ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue,
776 void **avalue, void *closure)
778 ffi_call_int (cif, fn, rvalue, avalue, closure);
780 #endif /* FFI_GO_CLOSURES */
782 /* Build a trampoline. */
784 extern void ffi_closure_SYSV (void) FFI_HIDDEN;
785 extern void ffi_closure_SYSV_V (void) FFI_HIDDEN;
786 #if defined(FFI_EXEC_STATIC_TRAMP)
787 extern void ffi_closure_SYSV_alt (void) FFI_HIDDEN;
788 extern void ffi_closure_SYSV_V_alt (void) FFI_HIDDEN;
792 ffi_prep_closure_loc (ffi_closure *closure,
794 void (*fun)(ffi_cif*,void*,void**,void*),
798 if (cif->abi != FFI_SYSV)
803 if (cif->flags & AARCH64_FLAG_ARG_V)
804 start = ffi_closure_SYSV_V;
806 start = ffi_closure_SYSV;
808 #if FFI_EXEC_TRAMPOLINE_TABLE
811 codeloc = ptrauth_strip (codeloc, ptrauth_key_asia);
813 void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE);
818 static const unsigned char trampoline[16] = {
819 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */
820 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */
821 0x00, 0x02, 0x1f, 0xd6 /* br x16 */
823 char *tramp = closure->tramp;
825 #if defined(FFI_EXEC_STATIC_TRAMP)
826 if (ffi_tramp_is_present(closure))
828 /* Initialize the static trampoline's parameters. */
829 if (start == ffi_closure_SYSV_V)
830 start = ffi_closure_SYSV_V_alt;
832 start = ffi_closure_SYSV_alt;
833 ffi_tramp_set_parms (closure->ftramp, start, closure);
838 /* Initialize the dynamic trampoline. */
839 memcpy (tramp, trampoline, sizeof(trampoline));
841 *(UINT64 *)(tramp + 16) = (uintptr_t)start;
843 ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE);
845 /* Also flush the cache for code mapping. */
847 // Not using dlmalloc.c for Windows ARM64 builds
848 // so calling ffi_data_to_code_pointer() isn't necessary
849 unsigned char *tramp_code = tramp;
851 unsigned char *tramp_code = ffi_data_to_code_pointer (tramp);
853 ffi_clear_cache (tramp_code, tramp_code + FFI_TRAMPOLINE_SIZE);
859 closure->user_data = user_data;
864 #ifdef FFI_GO_CLOSURES
865 extern void ffi_go_closure_SYSV (void) FFI_HIDDEN;
866 extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN;
869 ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif,
870 void (*fun)(ffi_cif*,void*,void**,void*))
874 if (cif->abi != FFI_SYSV)
877 if (cif->flags & AARCH64_FLAG_ARG_V)
878 start = ffi_go_closure_SYSV_V;
880 start = ffi_go_closure_SYSV;
882 closure->tramp = start;
888 #endif /* FFI_GO_CLOSURES */
890 /* Primary handler to setup and invoke a function within a closure.
892 A closure when invoked enters via the assembler wrapper
893 ffi_closure_SYSV(). The wrapper allocates a call context on the
894 stack, saves the interesting registers (from the perspective of
895 the calling convention) into the context then passes control to
896 ffi_closure_SYSV_inner() passing the saved context and a pointer to
897 the stack at the point ffi_closure_SYSV() was invoked.
899 On the return path the assembler wrapper will reload call context
902 ffi_closure_SYSV_inner() marshalls the call context into ffi value
903 descriptors, invokes the wrapped function, then marshalls the return
904 value back into the call context. */
907 ffi_closure_SYSV_inner (ffi_cif *cif,
908 void (*fun)(ffi_cif*,void*,void**,void*),
910 struct call_context *context,
911 void *stack, void *rvalue, void *struct_rvalue)
913 void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
914 int i, h, nargs, flags;
915 struct arg_state state;
919 for (i = 0, nargs = cif->nargs; i < nargs; i++)
921 ffi_type *ty = cif->arg_types[i];
923 size_t n, s = ty->size;
934 case FFI_TYPE_UINT16:
935 case FFI_TYPE_SINT16:
936 case FFI_TYPE_UINT32:
937 case FFI_TYPE_SINT32:
938 case FFI_TYPE_UINT64:
939 case FFI_TYPE_SINT64:
940 case FFI_TYPE_POINTER:
941 avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s);
945 case FFI_TYPE_DOUBLE:
946 case FFI_TYPE_LONGDOUBLE:
947 case FFI_TYPE_STRUCT:
948 case FFI_TYPE_COMPLEX:
949 h = is_vfp_type (ty);
953 #ifdef _WIN32 /* for handling armasm calling convention */
954 if (cif->is_variadic)
956 if (state.ngrn + n <= N_X_ARG_REG)
958 void *reg = &context->x[state.ngrn];
959 state.ngrn += (unsigned int)n;
961 /* Eeek! We need a pointer to the structure, however the
962 homogeneous float elements are being passed in individual
963 registers, therefore for float and double the structure
964 is not represented as a contiguous sequence of bytes in
965 our saved register context. We don't need the original
966 contents of the register storage, so we reformat the
967 structure into the same memory. */
968 avalue[i] = compress_hfa_type(reg, reg, h);
972 state.ngrn = N_X_ARG_REG;
973 state.nsrn = N_V_ARG_REG;
974 avalue[i] = allocate_to_stack(&state, stack,
980 #endif /* for handling armasm calling convention */
981 if (state.nsrn + n <= N_V_ARG_REG)
983 void *reg = &context->v[state.nsrn];
984 state.nsrn += (unsigned int)n;
985 avalue[i] = compress_hfa_type(reg, reg, h);
989 state.nsrn = N_V_ARG_REG;
990 avalue[i] = allocate_to_stack(&state, stack,
993 #ifdef _WIN32 /* for handling armasm calling convention */
995 #endif /* for handling armasm calling convention */
999 /* Replace Composite type of size greater than 16 with a
1001 avalue[i] = *(void **)
1002 allocate_int_to_reg_or_stack (context, &state, stack,
1008 if (state.ngrn + n <= N_X_ARG_REG)
1010 avalue[i] = &context->x[state.ngrn];
1011 state.ngrn += (unsigned int)n;
1015 state.ngrn = N_X_ARG_REG;
1016 avalue[i] = allocate_to_stack(&state, stack,
1026 #if defined (__APPLE__)
1027 if (i + 1 == cif->aarch64_nfixedargs)
1029 state.ngrn = N_X_ARG_REG;
1030 state.nsrn = N_V_ARG_REG;
1031 state.allocating_variadic = 1;
1037 if (flags & AARCH64_RET_IN_MEM)
1038 rvalue = struct_rvalue;
1040 fun (cif, rvalue, avalue, user_data);
1045 #if defined(FFI_EXEC_STATIC_TRAMP)
1047 ffi_tramp_arch (size_t *tramp_size, size_t *map_size)
1049 extern void *trampoline_code_table;
1051 *tramp_size = AARCH64_TRAMP_SIZE;
1052 *map_size = AARCH64_TRAMP_MAP_SIZE;
1053 return &trampoline_code_table;
1057 #endif /* (__aarch64__) || defined(__arm64__)|| defined (_M_ARM64)*/