-<?xml version="1.0" encoding="utf-8"?>\r
-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
- <ItemGroup Label="ProjectConfigurations">\r
- <ProjectConfiguration Include="Debug|ARM64">\r
- <Configuration>Debug</Configuration>\r
- <Platform>ARM64</Platform>\r
- </ProjectConfiguration>\r
- <ProjectConfiguration Include="Release|ARM64">\r
- <Configuration>Release</Configuration>\r
- <Platform>ARM64</Platform>\r
- </ProjectConfiguration>\r
- </ItemGroup>\r
- <PropertyGroup Label="Globals">\r
- <VCProjectVersion>15.0</VCProjectVersion>\r
- <ProjectGuid>{115502C0-BE05-4767-BF19-5C87D805FAD6}</ProjectGuid>\r
- <Keyword>Win32Proj</Keyword>\r
- <RootNamespace>FfistaticLib</RootNamespace>\r
- <WindowsTargetPlatformVersion>10.0.17763.0</WindowsTargetPlatformVersion>\r
- <ProjectName>Ffi_staticLib_arm64</ProjectName>\r
- </PropertyGroup>\r
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="Configuration">\r
- <ConfigurationType>StaticLibrary</ConfigurationType>\r
- <UseDebugLibraries>true</UseDebugLibraries>\r
- <PlatformToolset>v141</PlatformToolset>\r
- <CharacterSet>Unicode</CharacterSet>\r
- </PropertyGroup>\r
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="Configuration">\r
- <ConfigurationType>StaticLibrary</ConfigurationType>\r
- <UseDebugLibraries>false</UseDebugLibraries>\r
- <PlatformToolset>v141</PlatformToolset>\r
- <WholeProgramOptimization>true</WholeProgramOptimization>\r
- <CharacterSet>Unicode</CharacterSet>\r
- </PropertyGroup>\r
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
- <ImportGroup Label="ExtensionSettings">\r
- </ImportGroup>\r
- <ImportGroup Label="Shared">\r
- </ImportGroup>\r
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="PropertySheets">\r
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
- </ImportGroup>\r
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="PropertySheets">\r
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
- </ImportGroup>\r
- <PropertyGroup Label="UserMacros" />\r
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">\r
- <LinkIncremental>true</LinkIncremental>\r
- </PropertyGroup>\r
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'">\r
- <LinkIncremental>false</LinkIncremental>\r
- </PropertyGroup>\r
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">\r
- <ClCompile>\r
- <PrecompiledHeader>NotUsing</PrecompiledHeader>\r
- <WarningLevel>Level3</WarningLevel>\r
- <Optimization>Disabled</Optimization>\r
- <SDLCheck>true</SDLCheck>\r
- <PreprocessorDefinitions>FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
- <ConformanceMode>true</ConformanceMode>\r
- <AdditionalIncludeDirectories>..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
- <IgnoreStandardIncludePath>false</IgnoreStandardIncludePath>\r
- <BrowseInformation>true</BrowseInformation>\r
- <OmitFramePointers>\r
- </OmitFramePointers>\r
- <WholeProgramOptimization>false</WholeProgramOptimization>\r
- </ClCompile>\r
- <Link>\r
- <SubSystem>Windows</SubSystem>\r
- <GenerateDebugInformation>true</GenerateDebugInformation>\r
- </Link>\r
- </ItemDefinitionGroup>\r
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'">\r
- <ClCompile>\r
- <PrecompiledHeader>NotUsing</PrecompiledHeader>\r
- <WarningLevel>Level3</WarningLevel>\r
- <Optimization>MaxSpeed</Optimization>\r
- <FunctionLevelLinking>true</FunctionLevelLinking>\r
- <IntrinsicFunctions>true</IntrinsicFunctions>\r
- <SDLCheck>true</SDLCheck>\r
- <PreprocessorDefinitions>FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
- <ConformanceMode>true</ConformanceMode>\r
- <AdditionalIncludeDirectories>..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
- <OmitFramePointers>true</OmitFramePointers>\r
- <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
- <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
- <AdditionalUsingDirectories>..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories)</AdditionalUsingDirectories>\r
- </ClCompile>\r
- <Link>\r
- <SubSystem>Windows</SubSystem>\r
- <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
- <OptimizeReferences>true</OptimizeReferences>\r
- <GenerateDebugInformation>true</GenerateDebugInformation>\r
- </Link>\r
- <ProjectReference>\r
- <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
- </ProjectReference>\r
- </ItemDefinitionGroup>\r
- <ItemGroup>\r
- <ClInclude Include=".\aarch64_include\ffi.h" />\r
- <ClInclude Include=".\aarch64_include\fficonfig.h" />\r
- <ClInclude Include="..\..\src\aarch64\ffitarget.h" />\r
- <ClInclude Include="..\include\ffi_cfi.h" />\r
- <ClInclude Include="..\include\ffi_common.h" />\r
- <ClInclude Include="..\..\src\aarch64\internal.h" />\r
- </ItemGroup>\r
- <ItemGroup>\r
- <ClCompile Include="..\..\src\closures.c" />\r
- <ClCompile Include="..\..\src\dlmalloc.c" />\r
- <ClCompile Include="..\..\src\aarch64\ffi.c" />\r
- <ClCompile Include="..\..\src\prep_cif.c" />\r
- <ClCompile Include="..\..\src\types.c" />\r
- </ItemGroup>\r
- <!--ItemGroup>\r
- <Object Include="..\..\..\..\Downloads\libffi-master-win64\src\aarch64\win64_armasm.obj" />\r
- </ItemGroup-->\r
- <ItemGroup>\r
- <CustomBuild Include="..\..\src\aarch64\win64_armasm.S">\r
- <!--ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild -->\r
- <Command>\r
- cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i\r
- armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj"\r
- </Command>\r
- <Outputs>win64_armasm.obj;%(Outputs)</Outputs>\r
- </CustomBuild>\r
- </ItemGroup>\r
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
- <ImportGroup Label="ExtensionTargets">\r
- </ImportGroup>\r
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|ARM64">
+ <Configuration>Debug</Configuration>
+ <Platform>ARM64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|ARM64">
+ <Configuration>Release</Configuration>
+ <Platform>ARM64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <VCProjectVersion>15.0</VCProjectVersion>
+ <ProjectGuid>{115502C0-BE05-4767-BF19-5C87D805FAD6}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>FfistaticLib</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0.17763.0</WindowsTargetPlatformVersion>
+ <ProjectName>Ffi_staticLib_arm64</ProjectName>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <PlatformToolset>v141</PlatformToolset>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="Configuration">
+ <ConfigurationType>StaticLibrary</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <PlatformToolset>v141</PlatformToolset>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="Shared">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">
+ <LinkIncremental>true</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'">
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <SDLCheck>true</SDLCheck>
+ <PreprocessorDefinitions>FFI_BUILDING_DLL;_DEBUG;_LIB;USE_DL_PREFIX;ARM64;_M_ARM64;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <ConformanceMode>true</ConformanceMode>
+ <AdditionalIncludeDirectories>..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <IgnoreStandardIncludePath>false</IgnoreStandardIncludePath>
+ <BrowseInformation>true</BrowseInformation>
+ <OmitFramePointers>
+ </OmitFramePointers>
+ <WholeProgramOptimization>false</WholeProgramOptimization>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <SDLCheck>true</SDLCheck>
+ <PreprocessorDefinitions>FFI_BUILDING_DLL;USE_DL_PREFIX;ARM64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <ConformanceMode>true</ConformanceMode>
+ <AdditionalIncludeDirectories>..\..\include;.\aarch64_include;..\..\src\aarch64;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <OmitFramePointers>true</OmitFramePointers>
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>
+ <AdditionalUsingDirectories>..\..\src;..\..\src\aarch64;%(AdditionalUsingDirectories)</AdditionalUsingDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ <ProjectReference>
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>
+ </ProjectReference>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include=".\aarch64_include\ffi.h" />
+ <ClInclude Include=".\aarch64_include\fficonfig.h" />
+ <ClInclude Include="..\..\src\aarch64\ffitarget.h" />
+ <ClInclude Include="..\include\ffi_cfi.h" />
+ <ClInclude Include="..\include\ffi_common.h" />
+ <ClInclude Include="..\..\src\aarch64\internal.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\src\closures.c" />
+ <ClCompile Include="..\..\src\dlmalloc.c" />
+ <ClCompile Include="..\..\src\aarch64\ffi.c" />
+ <ClCompile Include="..\..\src\prep_cif.c" />
+ <ClCompile Include="..\..\src\types.c" />
+ </ItemGroup>
+ <!--ItemGroup>
+ <Object Include="..\..\..\..\Downloads\libffi-master-win64\src\aarch64\win64_armasm.obj" />
+ </ItemGroup-->
+ <ItemGroup>
+ <CustomBuild Include="..\..\src\aarch64\win64_armasm.S">
+ <!--ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild -->
+ <Command>
+ cl /FA /EP /nologo /I"..\..\include" /I".\aarch64_include" /I"..\..\src\aarch64" "%(FullPath)" > $(IntDir)win64_armasm.i
+ armasm64 $(IntDir)win64_armasm.i /I"src\" /I"..\..\include" /I"..\..\src\aarch64" -o "$(IntDir)win64_armasm.obj"
+ </Command>
+ <Outputs>win64_armasm.obj;%(Outputs)</Outputs>
+ </CustomBuild>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
</Project>
\ No newline at end of file
-/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.\r
-Permission is hereby granted, free of charge, to any person obtaining\r
-a copy of this software and associated documentation files (the\r
-``Software''), to deal in the Software without restriction, including\r
-without limitation the rights to use, copy, modify, merge, publish,\r
-distribute, sublicense, and/or sell copies of the Software, and to\r
-permit persons to whom the Software is furnished to do so, subject to\r
-the following conditions:\r
-The above copyright notice and this permission notice shall be\r
-included in all copies or substantial portions of the Software.\r
-THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,\r
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */\r
-\r
-#define LIBFFI_ASM\r
-#include <fficonfig.h>\r
-#include <ffi.h>\r
-#include <ffi_cfi.h>\r
-#include "internal.h"\r
-\r
- OPT 2 /*disable listing */\r
-/* For some macros to add unwind information */\r
-#include "ksarm64.h"\r
- OPT 1 /*re-enable listing */\r
-\r
-#define BE(X) 0\r
-#define PTR_REG(n) x##n\r
-#define PTR_SIZE 8\r
-\r
- IMPORT ffi_closure_SYSV_inner\r
- EXPORT ffi_call_SYSV\r
- EXPORT ffi_closure_SYSV_V\r
- EXPORT ffi_closure_SYSV\r
- EXPORT extend_hfa_type\r
- EXPORT compress_hfa_type\r
-#ifdef FFI_GO_CLOSURES\r
- EXPORT ffi_go_closure_SYSV_V\r
- EXPORT ffi_go_closure_SYSV\r
-#endif\r
-\r
- TEXTAREA, ALLIGN=8\r
-\r
-/* ffi_call_SYSV\r
- extern void ffi_call_SYSV (void *stack, void *frame,\r
- void (*fn)(void), void *rvalue,\r
- int flags, void *closure);\r
- Therefore on entry we have:\r
- x0 stack\r
- x1 frame\r
- x2 fn\r
- x3 rvalue\r
- x4 flags\r
- x5 closure\r
-*/\r
-\r
- NESTED_ENTRY ffi_call_SYSV_fake\r
-\r
- /* For unwind information, Windows has to store fp and lr */\r
- PROLOG_SAVE_REG_PAIR x29, x30, #-32!\r
-\r
- ALTERNATE_ENTRY ffi_call_SYSV\r
- /* Use a stack frame allocated by our caller. */\r
- stp x29, x30, [x1]\r
- mov x29, x1\r
- mov sp, x0\r
-\r
- mov x9, x2 /* save fn */\r
- mov x8, x3 /* install structure return */\r
-#ifdef FFI_GO_CLOSURES\r
- /*mov x18, x5 install static chain */\r
-#endif\r
- stp x3, x4, [x29, #16] /* save rvalue and flags */\r
- \r
- /* Load the vector argument passing registers, if necessary. */\r
- tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1\r
- ldp q0, q1, [sp, #0]\r
- ldp q2, q3, [sp, #32]\r
- ldp q4, q5, [sp, #64]\r
- ldp q6, q7, [sp, #96]\r
-\r
-ffi_call_SYSV_L1\r
- /* Load the core argument passing registers, including\r
- the structure return pointer. */\r
- ldp x0, x1, [sp, #16*N_V_ARG_REG + 0]\r
- ldp x2, x3, [sp, #16*N_V_ARG_REG + 16]\r
- ldp x4, x5, [sp, #16*N_V_ARG_REG + 32]\r
- ldp x6, x7, [sp, #16*N_V_ARG_REG + 48]\r
-\r
- /* Deallocate the context, leaving the stacked arguments. */\r
- add sp, sp, #CALL_CONTEXT_SIZE \r
-\r
- blr x9 /* call fn */\r
-\r
- ldp x3, x4, [x29, #16] /* reload rvalue and flags */\r
-\r
- /* Partially deconstruct the stack frame. */\r
- mov sp, x29 \r
- ldp x29, x30, [x29]\r
-\r
- /* Save the return value as directed. */\r
- adr x5, ffi_call_SYSV_return\r
- and w4, w4, #AARCH64_RET_MASK\r
- add x5, x5, x4, lsl #3\r
- br x5\r
- \r
- /* Note that each table entry is 2 insns, and thus 8 bytes.\r
- For integer data, note that we're storing into ffi_arg\r
- and therefore we want to extend to 64 bits; these types\r
- have two consecutive entries allocated for them. */\r
- ALIGN 4\r
-ffi_call_SYSV_return\r
- ret /* VOID */\r
- nop\r
- str x0, [x3] /* INT64 */\r
- ret\r
- stp x0, x1, [x3] /* INT128 */\r
- ret\r
- brk #1000 /* UNUSED */\r
- ret\r
- brk #1000 /* UNUSED */\r
- ret\r
- brk #1000 /* UNUSED */\r
- ret\r
- brk #1000 /* UNUSED */\r
- ret\r
- brk #1000 /* UNUSED */\r
- ret\r
- st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */\r
- ret\r
- st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */\r
- ret\r
- stp s0, s1, [x3] /* S2 */\r
- ret\r
- str s0, [x3] /* S1 */\r
- ret\r
- st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */\r
- ret\r
- st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */\r
- ret\r
- stp d0, d1, [x3] /* D2 */\r
- ret\r
- str d0, [x3] /* D1 */\r
- ret\r
- str q3, [x3, #48] /* Q4 */\r
- nop\r
- str q2, [x3, #32] /* Q3 */\r
- nop\r
- stp q0, q1, [x3] /* Q2 */\r
- ret\r
- str q0, [x3] /* Q1 */\r
- ret\r
- uxtb w0, w0 /* UINT8 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- uxth w0, w0 /* UINT16 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- mov w0, w0 /* UINT32 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- sxtb x0, w0 /* SINT8 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- sxth x0, w0 /* SINT16 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- sxtw x0, w0 /* SINT32 */\r
- str x0, [x3]\r
- ret /* reserved */\r
- nop\r
- \r
- \r
- NESTED_END ffi_call_SYSV_fake\r
- \r
-\r
-/* ffi_closure_SYSV\r
- Closure invocation glue. This is the low level code invoked directly by\r
- the closure trampoline to setup and call a closure.\r
- On entry x17 points to a struct ffi_closure, x16 has been clobbered\r
- all other registers are preserved.\r
- We allocate a call context and save the argument passing registers,\r
- then invoked the generic C ffi_closure_SYSV_inner() function to do all\r
- the real work, on return we load the result passing registers back from\r
- the call context.\r
-*/\r
-\r
-#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)\r
-\r
- NESTED_ENTRY ffi_closure_SYSV_V\r
- PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!\r
-\r
- /* Save the argument passing vector registers. */\r
- stp q0, q1, [sp, #16 + 0]\r
- stp q2, q3, [sp, #16 + 32]\r
- stp q4, q5, [sp, #16 + 64]\r
- stp q6, q7, [sp, #16 + 96]\r
-\r
- b ffi_closure_SYSV_save_argument\r
- NESTED_END ffi_closure_SYSV_V\r
-\r
- NESTED_ENTRY ffi_closure_SYSV\r
- PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!\r
-\r
-ffi_closure_SYSV_save_argument\r
- /* Save the argument passing core registers. */\r
- stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]\r
- stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]\r
- stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]\r
- stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]\r
-\r
- /* Load ffi_closure_inner arguments. */\r
- ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */\r
- ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */\r
-\r
-do_closure\r
- add x3, sp, #16 /* load context */\r
- add x4, sp, #ffi_closure_SYSV_FS /* load stack */\r
- add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */\r
- mov x6, x8 /* load struct_rval */\r
-\r
- bl ffi_closure_SYSV_inner\r
-\r
- /* Load the return value as directed. */\r
- adr x1, ffi_closure_SYSV_return_base\r
- and w0, w0, #AARCH64_RET_MASK\r
- add x1, x1, x0, lsl #3\r
- add x3, sp, #16+CALL_CONTEXT_SIZE\r
- br x1\r
-\r
- /* Note that each table entry is 2 insns, and thus 8 bytes. */\r
- ALIGN 8\r
-ffi_closure_SYSV_return_base\r
- b ffi_closure_SYSV_epilog /* VOID */\r
- nop\r
- ldr x0, [x3] /* INT64 */\r
- b ffi_closure_SYSV_epilog\r
- ldp x0, x1, [x3] /* INT128 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* UNUSED */\r
- nop\r
- brk #1000 /* UNUSED */\r
- nop\r
- brk #1000 /* UNUSED */\r
- nop\r
- brk #1000 /* UNUSED */\r
- nop\r
- brk #1000 /* UNUSED */\r
- nop\r
- ldr s3, [x3, #12] /* S4 */\r
- nop\r
- ldr s2, [x3, #8] /* S3 */\r
- nop\r
- ldp s0, s1, [x3] /* S2 */\r
- b ffi_closure_SYSV_epilog\r
- ldr s0, [x3] /* S1 */\r
- b ffi_closure_SYSV_epilog\r
- ldr d3, [x3, #24] /* D4 */\r
- nop\r
- ldr d2, [x3, #16] /* D3 */\r
- nop\r
- ldp d0, d1, [x3] /* D2 */\r
- b ffi_closure_SYSV_epilog\r
- ldr d0, [x3] /* D1 */\r
- b ffi_closure_SYSV_epilog\r
- ldr q3, [x3, #48] /* Q4 */\r
- nop\r
- ldr q2, [x3, #32] /* Q3 */\r
- nop\r
- ldp q0, q1, [x3] /* Q2 */\r
- b ffi_closure_SYSV_epilog\r
- ldr q0, [x3] /* Q1 */\r
- b ffi_closure_SYSV_epilog\r
- ldrb w0, [x3, #BE(7)] /* UINT8 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* reserved */\r
- nop\r
- ldrh w0, [x3, #BE(6)] /* UINT16 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* reserved */\r
- nop\r
- ldr w0, [x3, #BE(4)] /* UINT32 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* reserved */\r
- nop\r
- ldrsb x0, [x3, #BE(7)] /* SINT8 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* reserved */\r
- nop\r
- ldrsh x0, [x3, #BE(6)] /* SINT16 */\r
- b ffi_closure_SYSV_epilog\r
- brk #1000 /* reserved */\r
- nop\r
- ldrsw x0, [x3, #BE(4)] /* SINT32 */\r
- nop\r
- /* reserved */\r
-\r
-ffi_closure_SYSV_epilog\r
- EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS!\r
- EPILOG_RETURN\r
- NESTED_END ffi_closure_SYSV\r
-\r
-\r
-#ifdef FFI_GO_CLOSURES\r
- NESTED_ENTRY ffi_go_closure_SYSV_V\r
- PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!\r
-\r
- /* Save the argument passing vector registers. */\r
- stp q0, q1, [sp, #16 + 0]\r
- stp q2, q3, [sp, #16 + 32]\r
- stp q4, q5, [sp, #16 + 64]\r
- stp q6, q7, [sp, #16 + 96]\r
- b ffi_go_closure_SYSV_save_argument\r
- NESTED_END ffi_go_closure_SYSV_V\r
-\r
- NESTED_ENTRY ffi_go_closure_SYSV\r
- PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!\r
-\r
-ffi_go_closure_SYSV_save_argument\r
- /* Save the argument passing core registers. */\r
- stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]\r
- stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]\r
- stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]\r
- stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]\r
-\r
- /* Load ffi_closure_inner arguments. */\r
- ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */\r
- mov x2, x18 /* load user_data */\r
- b do_closure\r
- NESTED_END ffi_go_closure_SYSV\r
-\r
-#endif /* FFI_GO_CLOSURES */\r
-\r
-\r
-/* void extend_hfa_type (void *dest, void *src, int h) */\r
-\r
- LEAF_ENTRY extend_hfa_type\r
-\r
- adr x3, extend_hfa_type_jump_base\r
- and w2, w2, #AARCH64_RET_MASK\r
- sub x2, x2, #AARCH64_RET_S4\r
- add x3, x3, x2, lsl #4\r
- br x3\r
-\r
- ALIGN 4\r
-extend_hfa_type_jump_base\r
- ldp s16, s17, [x1] /* S4 */\r
- ldp s18, s19, [x1, #8]\r
- b extend_hfa_type_store_4\r
- nop\r
-\r
- ldp s16, s17, [x1] /* S3 */\r
- ldr s18, [x1, #8]\r
- b extend_hfa_type_store_3\r
- nop\r
-\r
- ldp s16, s17, [x1] /* S2 */\r
- b extend_hfa_type_store_2\r
- nop\r
- nop\r
-\r
- ldr s16, [x1] /* S1 */\r
- b extend_hfa_type_store_1\r
- nop\r
- nop\r
-\r
- ldp d16, d17, [x1] /* D4 */\r
- ldp d18, d19, [x1, #16]\r
- b extend_hfa_type_store_4\r
- nop\r
-\r
- ldp d16, d17, [x1] /* D3 */\r
- ldr d18, [x1, #16]\r
- b extend_hfa_type_store_3\r
- nop\r
-\r
- ldp d16, d17, [x1] /* D2 */\r
- b extend_hfa_type_store_2\r
- nop\r
- nop\r
-\r
- ldr d16, [x1] /* D1 */\r
- b extend_hfa_type_store_1\r
- nop\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q4 */\r
- ldp q18, q19, [x1, #16]\r
- b extend_hfa_type_store_4\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q3 */\r
- ldr q18, [x1, #16]\r
- b extend_hfa_type_store_3\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q2 */\r
- b extend_hfa_type_store_2\r
- nop\r
- nop\r
-\r
- ldr q16, [x1] /* Q1 */\r
- b extend_hfa_type_store_1\r
-\r
-extend_hfa_type_store_4\r
- str q19, [x0, #48]\r
-extend_hfa_type_store_3\r
- str q18, [x0, #32]\r
-extend_hfa_type_store_2\r
- str q17, [x0, #16]\r
-extend_hfa_type_store_1\r
- str q16, [x0]\r
- ret\r
-\r
- LEAF_END extend_hfa_type\r
-\r
-\r
-/* void compress_hfa_type (void *dest, void *reg, int h) */\r
-\r
- LEAF_ENTRY compress_hfa_type\r
-\r
- adr x3, compress_hfa_type_jump_base\r
- and w2, w2, #AARCH64_RET_MASK\r
- sub x2, x2, #AARCH64_RET_S4\r
- add x3, x3, x2, lsl #4\r
- br x3\r
-\r
- ALIGN 4\r
-compress_hfa_type_jump_base\r
- ldp q16, q17, [x1] /* S4 */\r
- ldp q18, q19, [x1, #32]\r
- st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0]\r
- ret\r
-\r
- ldp q16, q17, [x1] /* S3 */\r
- ldr q18, [x1, #32]\r
- st3 { v16.s, v17.s, v18.s }[0], [x0]\r
- ret\r
-\r
- ldp q16, q17, [x1] /* S2 */\r
- st2 { v16.s, v17.s }[0], [x0]\r
- ret\r
- nop\r
-\r
- ldr q16, [x1] /* S1 */\r
- st1 { v16.s }[0], [x0]\r
- ret\r
- nop\r
-\r
- ldp q16, q17, [x1] /* D4 */\r
- ldp q18, q19, [x1, #32]\r
- st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0]\r
- ret\r
-\r
- ldp q16, q17, [x1] /* D3 */\r
- ldr q18, [x1, #32]\r
- st3 { v16.d, v17.d, v18.d }[0], [x0]\r
- ret\r
-\r
- ldp q16, q17, [x1] /* D2 */\r
- st2 { v16.d, v17.d }[0], [x0]\r
- ret\r
- nop\r
-\r
- ldr q16, [x1] /* D1 */\r
- st1 { v16.d }[0], [x0]\r
- ret\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q4 */\r
- ldp q18, q19, [x1, #32]\r
- b compress_hfa_type_store_q4\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q3 */\r
- ldr q18, [x1, #32]\r
- b compress_hfa_type_store_q3\r
- nop\r
-\r
- ldp q16, q17, [x1] /* Q2 */\r
- stp q16, q17, [x0]\r
- ret\r
- nop\r
-\r
- ldr q16, [x1] /* Q1 */\r
- str q16, [x0]\r
- ret\r
-\r
-compress_hfa_type_store_q4\r
- str q19, [x0, #48]\r
-compress_hfa_type_store_q3\r
- str q18, [x0, #32]\r
- stp q16, q17, [x0]\r
- ret\r
-\r
- LEAF_END compress_hfa_type\r
-\r
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+#include <ffi_cfi.h>
+#include "internal.h"
+
+ OPT 2 /*disable listing */
+/* For some macros to add unwind information */
+#include "ksarm64.h"
+ OPT 1 /*re-enable listing */
+
+#define BE(X) 0
+#define PTR_REG(n) x##n
+#define PTR_SIZE 8
+
+ IMPORT ffi_closure_SYSV_inner
+ EXPORT ffi_call_SYSV
+ EXPORT ffi_closure_SYSV_V
+ EXPORT ffi_closure_SYSV
+ EXPORT extend_hfa_type
+ EXPORT compress_hfa_type
+#ifdef FFI_GO_CLOSURES
+ EXPORT ffi_go_closure_SYSV_V
+ EXPORT ffi_go_closure_SYSV
+#endif
+
+ TEXTAREA, ALLIGN=8
+
+/* ffi_call_SYSV
+ extern void ffi_call_SYSV (void *stack, void *frame,
+ void (*fn)(void), void *rvalue,
+ int flags, void *closure);
+ Therefore on entry we have:
+ x0 stack
+ x1 frame
+ x2 fn
+ x3 rvalue
+ x4 flags
+ x5 closure
+*/
+
+ NESTED_ENTRY ffi_call_SYSV_fake
+
+ /* For unwind information, Windows has to store fp and lr */
+ PROLOG_SAVE_REG_PAIR x29, x30, #-32!
+
+ ALTERNATE_ENTRY ffi_call_SYSV
+ /* Use a stack frame allocated by our caller. */
+ stp x29, x30, [x1]
+ mov x29, x1
+ mov sp, x0
+
+ mov x9, x2 /* save fn */
+ mov x8, x3 /* install structure return */
+#ifdef FFI_GO_CLOSURES
+ /*mov x18, x5 install static chain */
+#endif
+ stp x3, x4, [x29, #16] /* save rvalue and flags */
+
+ /* Load the vector argument passing registers, if necessary. */
+ tbz x4, #AARCH64_FLAG_ARG_V_BIT, ffi_call_SYSV_L1
+ ldp q0, q1, [sp, #0]
+ ldp q2, q3, [sp, #32]
+ ldp q4, q5, [sp, #64]
+ ldp q6, q7, [sp, #96]
+
+ffi_call_SYSV_L1
+ /* Load the core argument passing registers, including
+ the structure return pointer. */
+ ldp x0, x1, [sp, #16*N_V_ARG_REG + 0]
+ ldp x2, x3, [sp, #16*N_V_ARG_REG + 16]
+ ldp x4, x5, [sp, #16*N_V_ARG_REG + 32]
+ ldp x6, x7, [sp, #16*N_V_ARG_REG + 48]
+
+ /* Deallocate the context, leaving the stacked arguments. */
+ add sp, sp, #CALL_CONTEXT_SIZE
+
+ blr x9 /* call fn */
+
+ ldp x3, x4, [x29, #16] /* reload rvalue and flags */
+
+ /* Partially deconstruct the stack frame. */
+ mov sp, x29
+ ldp x29, x30, [x29]
+
+ /* Save the return value as directed. */
+ adr x5, ffi_call_SYSV_return
+ and w4, w4, #AARCH64_RET_MASK
+ add x5, x5, x4, lsl #3
+ br x5
+
+ /* Note that each table entry is 2 insns, and thus 8 bytes.
+ For integer data, note that we're storing into ffi_arg
+ and therefore we want to extend to 64 bits; these types
+ have two consecutive entries allocated for them. */
+ ALIGN 4
+ffi_call_SYSV_return
+ ret /* VOID */
+ nop
+ str x0, [x3] /* INT64 */
+ ret
+ stp x0, x1, [x3] /* INT128 */
+ ret
+ brk #1000 /* UNUSED */
+ ret
+ brk #1000 /* UNUSED */
+ ret
+ brk #1000 /* UNUSED */
+ ret
+ brk #1000 /* UNUSED */
+ ret
+ brk #1000 /* UNUSED */
+ ret
+ st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */
+ ret
+ st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */
+ ret
+ stp s0, s1, [x3] /* S2 */
+ ret
+ str s0, [x3] /* S1 */
+ ret
+ st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */
+ ret
+ st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */
+ ret
+ stp d0, d1, [x3] /* D2 */
+ ret
+ str d0, [x3] /* D1 */
+ ret
+ str q3, [x3, #48] /* Q4 */
+ nop
+ str q2, [x3, #32] /* Q3 */
+ nop
+ stp q0, q1, [x3] /* Q2 */
+ ret
+ str q0, [x3] /* Q1 */
+ ret
+ uxtb w0, w0 /* UINT8 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+ uxth w0, w0 /* UINT16 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+ mov w0, w0 /* UINT32 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+ sxtb x0, w0 /* SINT8 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+ sxth x0, w0 /* SINT16 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+ sxtw x0, w0 /* SINT32 */
+ str x0, [x3]
+ ret /* reserved */
+ nop
+
+
+ NESTED_END ffi_call_SYSV_fake
+
+
+/* ffi_closure_SYSV
+ Closure invocation glue. This is the low level code invoked directly by
+ the closure trampoline to setup and call a closure.
+ On entry x17 points to a struct ffi_closure, x16 has been clobbered
+ all other registers are preserved.
+ We allocate a call context and save the argument passing registers,
+ then invoked the generic C ffi_closure_SYSV_inner() function to do all
+ the real work, on return we load the result passing registers back from
+ the call context.
+*/
+
+#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)
+
+ NESTED_ENTRY ffi_closure_SYSV_V
+ PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [sp, #16 + 0]
+ stp q2, q3, [sp, #16 + 32]
+ stp q4, q5, [sp, #16 + 64]
+ stp q6, q7, [sp, #16 + 96]
+
+ b ffi_closure_SYSV_save_argument
+ NESTED_END ffi_closure_SYSV_V
+
+ NESTED_ENTRY ffi_closure_SYSV
+ PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!
+
+ffi_closure_SYSV_save_argument
+ /* Save the argument passing core registers. */
+ stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
+ stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
+ stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
+ stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
+
+ /* Load ffi_closure_inner arguments. */
+ ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */
+ ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */
+
+do_closure
+ add x3, sp, #16 /* load context */
+ add x4, sp, #ffi_closure_SYSV_FS /* load stack */
+ add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */
+ mov x6, x8 /* load struct_rval */
+
+ bl ffi_closure_SYSV_inner
+
+ /* Load the return value as directed. */
+ adr x1, ffi_closure_SYSV_return_base
+ and w0, w0, #AARCH64_RET_MASK
+ add x1, x1, x0, lsl #3
+ add x3, sp, #16+CALL_CONTEXT_SIZE
+ br x1
+
+ /* Note that each table entry is 2 insns, and thus 8 bytes. */
+ ALIGN 8
+ffi_closure_SYSV_return_base
+ b ffi_closure_SYSV_epilog /* VOID */
+ nop
+ ldr x0, [x3] /* INT64 */
+ b ffi_closure_SYSV_epilog
+ ldp x0, x1, [x3] /* INT128 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* UNUSED */
+ nop
+ brk #1000 /* UNUSED */
+ nop
+ brk #1000 /* UNUSED */
+ nop
+ brk #1000 /* UNUSED */
+ nop
+ brk #1000 /* UNUSED */
+ nop
+ ldr s3, [x3, #12] /* S4 */
+ nop
+ ldr s2, [x3, #8] /* S3 */
+ nop
+ ldp s0, s1, [x3] /* S2 */
+ b ffi_closure_SYSV_epilog
+ ldr s0, [x3] /* S1 */
+ b ffi_closure_SYSV_epilog
+ ldr d3, [x3, #24] /* D4 */
+ nop
+ ldr d2, [x3, #16] /* D3 */
+ nop
+ ldp d0, d1, [x3] /* D2 */
+ b ffi_closure_SYSV_epilog
+ ldr d0, [x3] /* D1 */
+ b ffi_closure_SYSV_epilog
+ ldr q3, [x3, #48] /* Q4 */
+ nop
+ ldr q2, [x3, #32] /* Q3 */
+ nop
+ ldp q0, q1, [x3] /* Q2 */
+ b ffi_closure_SYSV_epilog
+ ldr q0, [x3] /* Q1 */
+ b ffi_closure_SYSV_epilog
+ ldrb w0, [x3, #BE(7)] /* UINT8 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* reserved */
+ nop
+ ldrh w0, [x3, #BE(6)] /* UINT16 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* reserved */
+ nop
+ ldr w0, [x3, #BE(4)] /* UINT32 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* reserved */
+ nop
+ ldrsb x0, [x3, #BE(7)] /* SINT8 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* reserved */
+ nop
+ ldrsh x0, [x3, #BE(6)] /* SINT16 */
+ b ffi_closure_SYSV_epilog
+ brk #1000 /* reserved */
+ nop
+ ldrsw x0, [x3, #BE(4)] /* SINT32 */
+ nop
+ /* reserved */
+
+ffi_closure_SYSV_epilog
+ EPILOG_RESTORE_REG_PAIR x29, x30, #ffi_closure_SYSV_FS!
+ EPILOG_RETURN
+ NESTED_END ffi_closure_SYSV
+
+
+#ifdef FFI_GO_CLOSURES
+ NESTED_ENTRY ffi_go_closure_SYSV_V
+ PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [sp, #16 + 0]
+ stp q2, q3, [sp, #16 + 32]
+ stp q4, q5, [sp, #16 + 64]
+ stp q6, q7, [sp, #16 + 96]
+ b ffi_go_closure_SYSV_save_argument
+ NESTED_END ffi_go_closure_SYSV_V
+
+ NESTED_ENTRY ffi_go_closure_SYSV
+ PROLOG_SAVE_REG_PAIR x29, x30, #-ffi_closure_SYSV_FS!
+
+ffi_go_closure_SYSV_save_argument
+ /* Save the argument passing core registers. */
+ stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
+ stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
+ stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
+ stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
+
+ /* Load ffi_closure_inner arguments. */
+ ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */
+ mov x2, x18 /* load user_data */
+ b do_closure
+ NESTED_END ffi_go_closure_SYSV
+
+#endif /* FFI_GO_CLOSURES */
+
+
+/* void extend_hfa_type (void *dest, void *src, int h) */
+
+ LEAF_ENTRY extend_hfa_type
+
+ adr x3, extend_hfa_type_jump_base
+ and w2, w2, #AARCH64_RET_MASK
+ sub x2, x2, #AARCH64_RET_S4
+ add x3, x3, x2, lsl #4
+ br x3
+
+ ALIGN 4
+extend_hfa_type_jump_base
+ ldp s16, s17, [x1] /* S4 */
+ ldp s18, s19, [x1, #8]
+ b extend_hfa_type_store_4
+ nop
+
+ ldp s16, s17, [x1] /* S3 */
+ ldr s18, [x1, #8]
+ b extend_hfa_type_store_3
+ nop
+
+ ldp s16, s17, [x1] /* S2 */
+ b extend_hfa_type_store_2
+ nop
+ nop
+
+ ldr s16, [x1] /* S1 */
+ b extend_hfa_type_store_1
+ nop
+ nop
+
+ ldp d16, d17, [x1] /* D4 */
+ ldp d18, d19, [x1, #16]
+ b extend_hfa_type_store_4
+ nop
+
+ ldp d16, d17, [x1] /* D3 */
+ ldr d18, [x1, #16]
+ b extend_hfa_type_store_3
+ nop
+
+ ldp d16, d17, [x1] /* D2 */
+ b extend_hfa_type_store_2
+ nop
+ nop
+
+ ldr d16, [x1] /* D1 */
+ b extend_hfa_type_store_1
+ nop
+ nop
+
+ ldp q16, q17, [x1] /* Q4 */
+ ldp q18, q19, [x1, #16]
+ b extend_hfa_type_store_4
+ nop
+
+ ldp q16, q17, [x1] /* Q3 */
+ ldr q18, [x1, #16]
+ b extend_hfa_type_store_3
+ nop
+
+ ldp q16, q17, [x1] /* Q2 */
+ b extend_hfa_type_store_2
+ nop
+ nop
+
+ ldr q16, [x1] /* Q1 */
+ b extend_hfa_type_store_1
+
+extend_hfa_type_store_4
+ str q19, [x0, #48]
+extend_hfa_type_store_3
+ str q18, [x0, #32]
+extend_hfa_type_store_2
+ str q17, [x0, #16]
+extend_hfa_type_store_1
+ str q16, [x0]
+ ret
+
+ LEAF_END extend_hfa_type
+
+
+/* void compress_hfa_type (void *dest, void *reg, int h) */
+
+ LEAF_ENTRY compress_hfa_type
+
+ adr x3, compress_hfa_type_jump_base
+ and w2, w2, #AARCH64_RET_MASK
+ sub x2, x2, #AARCH64_RET_S4
+ add x3, x3, x2, lsl #4
+ br x3
+
+ ALIGN 4
+compress_hfa_type_jump_base
+ ldp q16, q17, [x1] /* S4 */
+ ldp q18, q19, [x1, #32]
+ st4 { v16.s, v17.s, v18.s, v19.s }[0], [x0]
+ ret
+
+ ldp q16, q17, [x1] /* S3 */
+ ldr q18, [x1, #32]
+ st3 { v16.s, v17.s, v18.s }[0], [x0]
+ ret
+
+ ldp q16, q17, [x1] /* S2 */
+ st2 { v16.s, v17.s }[0], [x0]
+ ret
+ nop
+
+ ldr q16, [x1] /* S1 */
+ st1 { v16.s }[0], [x0]
+ ret
+ nop
+
+ ldp q16, q17, [x1] /* D4 */
+ ldp q18, q19, [x1, #32]
+ st4 { v16.d, v17.d, v18.d, v19.d }[0], [x0]
+ ret
+
+ ldp q16, q17, [x1] /* D3 */
+ ldr q18, [x1, #32]
+ st3 { v16.d, v17.d, v18.d }[0], [x0]
+ ret
+
+ ldp q16, q17, [x1] /* D2 */
+ st2 { v16.d, v17.d }[0], [x0]
+ ret
+ nop
+
+ ldr q16, [x1] /* D1 */
+ st1 { v16.d }[0], [x0]
+ ret
+ nop
+
+ ldp q16, q17, [x1] /* Q4 */
+ ldp q18, q19, [x1, #32]
+ b compress_hfa_type_store_q4
+ nop
+
+ ldp q16, q17, [x1] /* Q3 */
+ ldr q18, [x1, #32]
+ b compress_hfa_type_store_q3
+ nop
+
+ ldp q16, q17, [x1] /* Q2 */
+ stp q16, q17, [x0]
+ ret
+ nop
+
+ ldr q16, [x1] /* Q1 */
+ str q16, [x0]
+ ret
+
+compress_hfa_type_store_q4
+ str q19, [x0, #48]
+compress_hfa_type_store_q3
+ str q18, [x0, #32]
+ stp q16, q17, [x0]
+ ret
+
+ LEAF_END compress_hfa_type
+
END
\ No newline at end of file