On Linux, when using binutils version 2.25.0, XMM instructions (e.g., movdqa) requires specifying the size of the
operand (e.g., specifying xmmword ptr). With the current code it gives an error: invalid operand for instruction.
This commit changes movdqa occurances to fix the problem.
#include <unixasmmacros.inc>
LEAF_ENTRY FPFillR8, _TEXT
#include <unixasmmacros.inc>
LEAF_ENTRY FPFillR8, _TEXT
+ movdqa xmm0, xmmword ptr [rdi]
ret
LEAF_END FPFillR8, _TEXT
ret
LEAF_END FPFillR8, _TEXT
#include "unixasmmacrosamd64.inc"
#elif defined(_ARM_)
#include "unixasmmacrosarm.inc"
#include "unixasmmacrosamd64.inc"
#elif defined(_ARM_)
#include "unixasmmacrosarm.inc"
-#endif
\ No newline at end of file
.macro save_xmm128_postrsp Reg, Offset
__Offset = \Offset
.macro save_xmm128_postrsp Reg, Offset
__Offset = \Offset
- movdqa [rsp + __Offset], \Reg
+ movdqa xmmword ptr [rsp + __Offset], \Reg
// NOTE: We cannot use ".cfi_rel_offset \Reg, __Offset" here,
// the xmm registers are not supported by the libunwind
.endm
.macro restore_xmm128 Reg, ofs
__Offset = \ofs
// NOTE: We cannot use ".cfi_rel_offset \Reg, __Offset" here,
// the xmm registers are not supported by the libunwind
.endm
.macro restore_xmm128 Reg, ofs
__Offset = \ofs
- movdqa \Reg, [rsp + __Offset]
+ movdqa \Reg, xmmword ptr [rsp + __Offset]
// NOTE: We cannot use ".cfi_restore \Reg" here,
// the xmm registers are not supported by the libunwind
// NOTE: We cannot use ".cfi_restore \Reg" here,
// the xmm registers are not supported by the libunwind
POP_ARGUMENT_REGISTERS
POP_CALLEE_SAVED_REGISTERS
POP_ARGUMENT_REGISTERS
POP_CALLEE_SAVED_REGISTERS
-.endm
\ No newline at end of file
mov r8, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 18h]
mov r9, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 20h]
mov r8, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 18h]
mov r9, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 20h]
- movdqa xmm0, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET]
- movdqa xmm1, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h]
- movdqa xmm2, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h]
- movdqa xmm3, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h]
+ movdqa xmm0, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET]
+ movdqa xmm1, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, xmmword ptr [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+ movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa xmmword ptr[rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
call CreateThreadBlockThrow
mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
call CreateThreadBlockThrow
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
- movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
- movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
- movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
- movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+ movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
mov rcx, r12 ; Thread* pThread
mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
mov rcx, r12 ; Thread* pThread
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
- movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
- movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
- movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
- movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+ movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
; initial measurements indidcate that this could be worth about a 5% savings in reverse
; pinvoke overhead.
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
mov rcx, r12
call ReverseEnterRuntimeHelper
mov rcx, r12
call ReverseEnterRuntimeHelper
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
- movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
- movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
- movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
- movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+ movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET]
mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET]
; save rax, xmm0
mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rax
; save rax, xmm0
mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rax
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
mov rcx, r12
call ReverseLeaveRuntimeHelper
mov rcx, r12
call ReverseLeaveRuntimeHelper
; restore rax, xmm0
mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
; restore rax, xmm0
mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
- movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
jmp Done_NotifyHost_ReverseLeaveRuntime
endif
jmp Done_NotifyHost_ReverseLeaveRuntime
endif
;
; save off xmm registers
;
;
; save off xmm registers
;
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
- movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
;
; call our helper to perform the AD transtion
;
; call our helper to perform the AD transtion
; restore return value
mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
; restore return value
mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
- movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm0, xmmword ptr [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
mov r8, [rsi + 10h]
mov r9, [rsi + 18h]
mov r8, [rsi + 10h]
mov r9, [rsi + 18h]
- movdqa xmm0, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
- movdqa xmm1, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
- movdqa xmm2, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 20h]
- movdqa xmm3, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 30h]
+ movdqa xmm0, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ movdqa xmm1, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+ movdqa xmm2, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 20h]
+ movdqa xmm3, xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 30h]
mov rax, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo] ; rax <- UMThunkMarshInfo*
mov rax, [rax + OFFSETOF__UMThunkMarshInfo__m_pILStub] ; rax <- Stub*
mov rax, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo] ; rax <- UMThunkMarshInfo*
mov rax, [rax + OFFSETOF__UMThunkMarshInfo__m_pILStub] ; rax <- Stub*
; make sure we don't trash the return value
mov [rsi + 0h], rax
; make sure we don't trash the return value
mov [rsi + 0h], rax
- movdqa [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], xmm0
+ movdqa xmmword ptr [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], xmm0
lea rsp, [rbp - UM2MThunk_WrapperHelper_FRAME_OFFSET + UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE]
pop rbp
lea rsp, [rbp - UM2MThunk_WrapperHelper_FRAME_OFFSET + UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE]
pop rbp
- movdqa [rsp + 20h + 0h], xmm0
- movdqa [rsp + 20h + 10h], xmm1
- movdqa [rsp + 20h + 20h], xmm2
- movdqa [rsp + 20h + 30h], xmm3
+ movdqa xmmword ptr [rsp + 20h + 0h], xmm0
+ movdqa xmmword ptr [rsp + 20h + 10h], xmm1
+ movdqa xmmword ptr [rsp + 20h + 20h], xmm2
+ movdqa xmmword ptr [rsp + 20h + 30h], xmm3
call NDirect__IsHostHookEnabled
call NDirect__IsHostHookEnabled
- movdqa xmm0, [rsp + 20h + 0h]
- movdqa xmm1, [rsp + 20h + 10h]
- movdqa xmm2, [rsp + 20h + 20h]
- movdqa xmm3, [rsp + 20h + 30h]
+ movdqa xmm0, xmmword ptr [rsp + 20h + 0h]
+ movdqa xmm1, xmmword ptr [rsp + 20h + 10h]
+ movdqa xmm2, xmmword ptr [rsp + 20h + 20h]
+ movdqa xmm3, xmmword ptr [rsp + 20h + 30h]
; epilog
add rsp, IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE
; epilog
add rsp, IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE
mov rcx, METHODDESC_REGISTER
call IJWNOADThunk__FindThunkTarget
mov rcx, METHODDESC_REGISTER
call IJWNOADThunk__FindThunkTarget
- movdqa xmm0, [rsp + 20h]
- movdqa xmm1, [rsp + 30h]
- movdqa xmm2, [rsp + 40h]
- movdqa xmm3, [rsp + 50h]
+ movdqa xmm0, xmmword ptr [rsp + 20h]
+ movdqa xmm1, xmmword ptr [rsp + 30h]
+ movdqa xmm2, xmmword ptr [rsp + 40h]
+ movdqa xmm3, xmmword ptr [rsp + 50h]
mov rcx, [rsp + 70h]
mov rdx, [rsp + 78h]
mov rcx, [rsp + 70h]
mov rdx, [rsp + 78h]
mov rcx, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x18]
mov r8, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x20]
mov r9, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x28]
mov rcx, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x18]
mov r8, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x20]
mov r9, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_INT_ARG_OFFSET + 0x28]
- movdqa xmm0, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET]
- movdqa xmm1, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x10]
- movdqa xmm2, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x20]
- movdqa xmm3, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x30]
- movdqa xmm4, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x40]
- movdqa xmm5, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x50]
- movdqa xmm6, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x60]
- movdqa xmm7, [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x70]
+ movdqa xmm0, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET]
+ movdqa xmm1, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x10]
+ movdqa xmm2, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x20]
+ movdqa xmm3, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x30]
+ movdqa xmm4, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x40]
+ movdqa xmm5, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x50]
+ movdqa xmm6, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x60]
+ movdqa xmm7, xmmword ptr [rbp - UMThunkStubAMD64_RBP_OFFSET + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0x70]
mov rax, [r11 + OFFSETOF__UMThunkMarshInfo__m_pILStub] // rax <- Stub*
call rax
mov rax, [r11 + OFFSETOF__UMThunkMarshInfo__m_pILStub] // rax <- Stub*
call rax
// MOVDQA is not an atomic operation. You need to call this function in a crst.
// </NOTE>
LEAF_ENTRY moveOWord, _TEXT
// MOVDQA is not an atomic operation. You need to call this function in a crst.
// </NOTE>
LEAF_ENTRY moveOWord, _TEXT
- movdqu xmm0, [rdi]
- movdqu [rsi], xmm0
+ movdqu xmm0, xmmword ptr [rdi]
+ movdqu xmmword ptr [rsi], xmm0
ret
LEAF_END moveOWord, _TEXT
ret
LEAF_END moveOWord, _TEXT
alloc_stack 0x28
END_PROLOGUE
// First float return register
alloc_stack 0x28
END_PROLOGUE
// First float return register
+ movdqa xmmword ptr [rsp], xmm0
// Second float return register
// Second float return register
- movdqa [rsp+0x10], xmm1
+ movdqa xmmword ptr [rsp+0x10], xmm1
call C_FUNC(JIT_RareDisableHelperWorker)
call C_FUNC(JIT_RareDisableHelperWorker)
- movdqa xmm0, [rsp]
- movdqa xmm1, [rsp+0x10]
+ movdqa xmm0, xmmword ptr [rsp]
+ movdqa xmm1, xmmword ptr [rsp+0x10]
free_stack 0x28
pop_register rdx
pop_register rax
free_stack 0x28
pop_register rdx
pop_register rax