From fde334aec8c594fa2f3d057ea75e7e427b7c0757 Mon Sep 17 00:00:00 2001 From: Aurojit Panda Date: Sun, 19 Jul 2015 00:27:23 -0700 Subject: [PATCH] Build fails when using binutils version 2.25.0 On Linux, when using binutils version 2.25.0, XMM instructions (e.g., movdqa) requires specifying the size of the operand (e.g., specifying xmmword ptr). With the current code it gives an error: invalid operand for instruction. This commit changes movdqa occurances to fix the problem. --- src/debug/di/amd64/floatconversion.S | 2 +- src/pal/inc/unixasmmacros.inc | 2 +- src/pal/inc/unixasmmacrosamd64.inc | 6 +- src/vm/amd64/UMThunkStub.asm | 104 +++++++++++++++++------------------ src/vm/amd64/umthunkstub.S | 16 +++--- src/vm/amd64/unixasmhelpers.S | 12 ++-- 6 files changed, 71 insertions(+), 71 deletions(-) diff --git a/src/debug/di/amd64/floatconversion.S b/src/debug/di/amd64/floatconversion.S index ae02176..f10e816 100644 --- a/src/debug/di/amd64/floatconversion.S +++ b/src/debug/di/amd64/floatconversion.S @@ -7,6 +7,6 @@ #include LEAF_ENTRY FPFillR8, _TEXT - movdqa xmm0, [rdi] + movdqa xmm0, xmmword ptr [rdi] ret LEAF_END FPFillR8, _TEXT diff --git a/src/pal/inc/unixasmmacros.inc b/src/pal/inc/unixasmmacros.inc index 45a2642..f996f95 100644 --- a/src/pal/inc/unixasmmacros.inc +++ b/src/pal/inc/unixasmmacros.inc @@ -60,4 +60,4 @@ #include "unixasmmacrosamd64.inc" #elif defined(_ARM_) #include "unixasmmacrosarm.inc" -#endif \ No newline at end of file +#endif diff --git a/src/pal/inc/unixasmmacrosamd64.inc b/src/pal/inc/unixasmmacrosamd64.inc index 4689ad1..5ba65e3 100644 --- a/src/pal/inc/unixasmmacrosamd64.inc +++ b/src/pal/inc/unixasmmacrosamd64.inc @@ -96,14 +96,14 @@ C_FUNC(\Name\()_End): .macro save_xmm128_postrsp Reg, Offset __Offset = \Offset - movdqa [rsp + __Offset], \Reg + movdqa xmmword ptr [rsp + __Offset], \Reg // NOTE: We cannot use ".cfi_rel_offset \Reg, __Offset" here, // the xmm registers are not supported by the libunwind .endm .macro restore_xmm128 Reg, ofs __Offset = \ofs - movdqa \Reg, [rsp + __Offset] + movdqa \Reg, xmmword ptr [rsp + __Offset] // NOTE: We cannot use ".cfi_restore \Reg" here, // the xmm registers are not supported by the libunwind @@ -302,4 +302,4 @@ C_FUNC(\Name\()_End): POP_ARGUMENT_REGISTERS POP_CALLEE_SAVED_REGISTERS -.endm \ No newline at end of file +.endm diff --git a/src/vm/amd64/UMThunkStub.asm b/src/vm/amd64/UMThunkStub.asm index 9479f3f..3170257 100644 --- a/src/vm/amd64/UMThunkStub.asm +++ b/src/vm/amd64/UMThunkStub.asm @@ -83,10 +83,10 @@ endif mov r8, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 18h] mov r9, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 20h] - movdqa xmm0, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET] - movdqa xmm1, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h] - movdqa xmm2, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h] - movdqa xmm3, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h] + movdqa xmm0, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET] + movdqa xmm1, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h] + movdqa xmm2, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h] + movdqa xmm3, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h] ; ; epilogue @@ -257,10 +257,10 @@ DoThreadSetup: ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls ; initial measurements indidcate that this could be worth about a 5% savings in reverse ; pinvoke overhead. - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 + movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 + movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 + movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 + movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER call CreateThreadBlockThrow @@ -272,10 +272,10 @@ DoThreadSetup: mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h] ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls - movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] - movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] - movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] - movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] + movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] + movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] + movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] + movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] jmp HaveThread @@ -289,10 +289,10 @@ DoTrapReturningThreadsTHROW: ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls ; initial measurements indidcate that this could be worth about a 5% savings in reverse ; pinvoke overhead. - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER mov rcx, r12 ; Thread* pThread @@ -306,10 +306,10 @@ DoTrapReturningThreadsTHROW: mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h] ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls - movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] - movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] - movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] - movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] + movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] + movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] + movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] + movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] jmp InCooperativeMode @@ -357,10 +357,10 @@ NotifyHost_ReverseEnterRuntime: ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls ; initial measurements indidcate that this could be worth about a 5% savings in reverse ; pinvoke overhead. - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 mov rcx, r12 call ReverseEnterRuntimeHelper @@ -372,10 +372,10 @@ NotifyHost_ReverseEnterRuntime: mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h] ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls - movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] - movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] - movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] - movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] + movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] + movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h] + movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h] + movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h] mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET] @@ -385,7 +385,7 @@ NotifyHost_ReverseLeaveRuntime: ; save rax, xmm0 mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rax - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 mov rcx, r12 call ReverseLeaveRuntimeHelper @@ -393,7 +393,7 @@ NotifyHost_ReverseLeaveRuntime: ; restore rax, xmm0 mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h] - movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] + movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] jmp Done_NotifyHost_ReverseLeaveRuntime endif @@ -410,10 +410,10 @@ WrongAppDomain: ; ; save off xmm registers ; - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 - movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2 + movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3 ; ; call our helper to perform the AD transtion @@ -426,7 +426,7 @@ WrongAppDomain: ; restore return value mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h] - movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] + movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h] jmp PostCall @@ -486,10 +486,10 @@ ArgumentsSetup: mov r8, [rsi + 10h] mov r9, [rsi + 18h] - movdqa xmm0, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h] - movdqa xmm1, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h] - movdqa xmm2, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 20h] - movdqa xmm3, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 30h] + movdqa xmm0, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h] + movdqa xmm1, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h] + movdqa xmm2, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 20h] + movdqa xmm3, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 30h] mov rax, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo] ; rax <- UMThunkMarshInfo* mov rax, [rax + OFFSETOF__UMThunkMarshInfo__m_pILStub] ; rax <- Stub* @@ -497,7 +497,7 @@ ArgumentsSetup: ; make sure we don't trash the return value mov [rsi + 0h], rax - movdqa [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], xmm0 + movdqa xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], xmm0 lea rsp, [rbp - UM2MThunk_WrapperHelper_FRAME_OFFSET + UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE] pop rbp @@ -546,17 +546,17 @@ IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE = 20h + 40h END_PROLOGUE - movdqa [rsp + 20h + 0h], xmm0 - movdqa [rsp + 20h + 10h], xmm1 - movdqa [rsp + 20h + 20h], xmm2 - movdqa [rsp + 20h + 30h], xmm3 + movdqa xmmword ptr [rsp + 20h + 0h], xmm0 + movdqa xmmword ptr [rsp + 20h + 10h], xmm1 + movdqa xmmword ptr [rsp + 20h + 20h], xmm2 + movdqa xmmword ptr [rsp + 20h + 30h], xmm3 call NDirect__IsHostHookEnabled - movdqa xmm0, [rsp + 20h + 0h] - movdqa xmm1, [rsp + 20h + 10h] - movdqa xmm2, [rsp + 20h + 20h] - movdqa xmm3, [rsp + 20h + 30h] + movdqa xmm0, xmmword ptr [rsp + 20h + 0h] + movdqa xmm1, xmmword ptr [rsp + 20h + 10h] + movdqa xmm2, xmmword ptr [rsp + 20h + 20h] + movdqa xmm3, xmmword ptr [rsp + 20h + 30h] ; epilog add rsp, IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE @@ -591,10 +591,10 @@ NESTED_ENTRY IJWNOADThunk__MakeCall, _TEXT mov rcx, METHODDESC_REGISTER call IJWNOADThunk__FindThunkTarget - movdqa xmm0, [rsp + 20h] - movdqa xmm1, [rsp + 30h] - movdqa xmm2, [rsp + 40h] - movdqa xmm3, [rsp + 50h] + movdqa xmm0, xmmword ptr [rsp + 20h] + movdqa xmm1, xmmword ptr [rsp + 30h] + movdqa xmm2, xmmword ptr [rsp + 40h] + movdqa xmm3, xmmword ptr [rsp + 50h] mov rcx, [rsp + 70h] mov rdx, [rsp + 78h] diff --git a/src/vm/amd64/umthunkstub.S b/src/vm/amd64/umthunkstub.S index 9f1fb6d..a649c2c 100644 --- a/src/vm/amd64/umthunkstub.S +++ b/src/vm/amd64/umthunkstub.S @@ -123,14 +123,14 @@ LOCAL_LABEL(UMThunkStub_ArgumentsSetup): mov rcx, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x18] mov r8, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x20] mov r9, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x28] - movdqa xmm0, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET] - movdqa xmm1, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x10] - movdqa xmm2, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x20] - movdqa xmm3, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x30] - movdqa xmm4, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x40] - movdqa xmm5, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x50] - movdqa xmm6, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x60] - movdqa xmm7, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x70] + movdqa xmm0, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET] + movdqa xmm1, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x10] + movdqa xmm2, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x20] + movdqa xmm3, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x30] + movdqa xmm4, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x40] + movdqa xmm5, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x50] + movdqa xmm6, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x60] + movdqa xmm7, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x70] mov rax, [r11 + OFFSETOF__UMThunkMarshInfo__m_pILStub] // rax <- Stub* call rax diff --git a/src/vm/amd64/unixasmhelpers.S b/src/vm/amd64/unixasmhelpers.S index aaee562..5055a91 100644 --- a/src/vm/amd64/unixasmhelpers.S +++ b/src/vm/amd64/unixasmhelpers.S @@ -137,8 +137,8 @@ NESTED_END NDirectImportThunk, _TEXT // MOVDQA is not an atomic operation. You need to call this function in a crst. // LEAF_ENTRY moveOWord, _TEXT - movdqu xmm0, [rdi] - movdqu [rsi], xmm0 + movdqu xmm0, xmmword ptr [rdi] + movdqu xmmword ptr [rsi], xmm0 ret LEAF_END moveOWord, _TEXT @@ -157,14 +157,14 @@ NESTED_ENTRY JIT_RareDisableHelper, _TEXT, NoHandler alloc_stack 0x28 END_PROLOGUE // First float return register - movdqa [rsp], xmm0 + movdqa xmmword ptr [rsp], xmm0 // Second float return register - movdqa [rsp+0x10], xmm1 + movdqa xmmword ptr [rsp+0x10], xmm1 call C_FUNC(JIT_RareDisableHelperWorker) - movdqa xmm0, [rsp] - movdqa xmm1, [rsp+0x10] + movdqa xmm0, xmmword ptr [rsp] + movdqa xmm1, xmmword ptr [rsp+0x10] free_stack 0x28 pop_register rdx pop_register rax -- 2.7.4