1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 // File: DllImportCallback.cpp
16 #include "dllimportcallback.h"
18 #include "comdelegate.h"
21 #include "dbginterface.h"
23 #include "appdomain.inl"
25 #ifndef CROSSGEN_COMPILE
29 UMEntryThunk *pEntryThunk;
35 class UMEntryThunkFreeList
38 UMEntryThunkFreeList(size_t threshold) :
39 m_threshold(threshold),
46 m_crst.Init(CrstLeafLock, CRST_UNSAFE_ANYMODE);
49 UMEntryThunk *GetUMEntryThunk()
53 if (m_count < m_threshold)
56 CrstHolder ch(&m_crst);
58 UMEntryThunk *pThunk = m_pHead;
63 m_pHead = m_pHead->m_pNextFreeThunk;
69 void AddToList(UMEntryThunk *pThunk)
77 CrstHolder ch(&m_crst);
86 m_pTail->m_pNextFreeThunk = pThunk;
90 pThunk->m_pNextFreeThunk = NULL;
96 // Used to delay reusing freed thunks
99 UMEntryThunk *m_pHead;
100 UMEntryThunk *m_pTail;
104 #define DEFAULT_THUNK_FREE_LIST_THRESHOLD 64
106 static UMEntryThunkFreeList s_thunkFreeList(DEFAULT_THUNK_FREE_LIST_THRESHOLD);
108 #if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
110 EXTERN_C VOID __cdecl UMThunkStubRareDisable();
111 EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
113 // argument stack offsets are multiple of sizeof(SLOT) so we can tag them by OR'ing with 1
114 static_assert_no_msg((sizeof(SLOT) & 1) == 0);
115 #define MAKE_BYVAL_STACK_OFFSET(x) (x)
116 #define MAKE_BYREF_STACK_OFFSET(x) ((x) | 1)
117 #define IS_BYREF_STACK_OFFSET(x) ((x) & 1)
118 #define GET_STACK_OFFSET(x) ((x) & ~1)
121 #define UNUSED_STACK_OFFSET (UINT)-1
124 VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
125 CPUSTUBLINKER *pcpusl,
126 UINT *psrcofsregs, // NUM_ARGUMENT_REGISTERS elements
127 UINT *psrcofs, // pInfo->m_cbDstStack/STACK_ELEM_SIZE elements
128 UINT retbufofs) // the large structure return buffer ptr arg offset (if any)
130 STANDARD_VM_CONTRACT;
132 CodeLabel* pSetupThreadLabel = pcpusl->NewCodeLabel();
133 CodeLabel* pRejoinThreadLabel = pcpusl->NewCodeLabel();
134 CodeLabel* pDisableGCLabel = pcpusl->NewCodeLabel();
135 CodeLabel* pRejoinGCLabel = pcpusl->NewCodeLabel();
137 // We come into this code with UMEntryThunk in EAX
138 const X86Reg kEAXentryThunk = kEAX;
140 // For ThisCall, we make it look like a normal stdcall so that
141 // the rest of the code (like repushing the arguments) does not
142 // have to worry about it.
144 if (pInfo->m_wFlags & umtmlThisCall)
146 // pop off the return address into EDX
147 pcpusl->X86EmitPopReg(kEDX);
149 if (pInfo->m_wFlags & umtmlThisCallHiddenArg)
151 // exchange ecx ( "this") with the hidden structure return buffer
153 pcpusl->X86EmitOp(0x87, kECX, (X86Reg)4 /*ESP*/);
156 // jam ecx (the "this" param onto stack. Now it looks like a normal stdcall.)
157 pcpusl->X86EmitPushReg(kECX);
159 // push edx - repush the return address
160 pcpusl->X86EmitPushReg(kEDX);
163 // Setup the EBP frame
164 pcpusl->X86EmitPushEBPframe();
167 pcpusl->X86EmitPushReg(kEBX);
169 // Make space for return value - instead of repeatedly doing push eax edx <trash regs> pop edx eax
170 // we will save the return value once and restore it just before returning.
171 pcpusl->X86EmitSubEsp(sizeof(PCONTEXT(NULL)->Eax) + sizeof(PCONTEXT(NULL)->Edx));
173 // Load thread descriptor into ECX
174 const X86Reg kECXthread = kECX;
177 pcpusl->X86EmitPushReg(kEAXentryThunk);
179 pcpusl->EmitSetup(pSetupThreadLabel);
181 pcpusl->X86EmitMovRegReg(kECX, kEBX);
183 pcpusl->EmitLabel(pRejoinThreadLabel);
185 // restore UMEntryThunk
186 pcpusl->X86EmitPopReg(kEAXentryThunk);
189 // Save incoming registers
190 pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
191 pcpusl->X86EmitPushReg(kECXthread); // thread descriptor
193 pcpusl->X86EmitPushReg(kEAXentryThunk);
194 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) LogUMTransition), 4);
197 pcpusl->X86EmitPopReg(kECXthread);
198 pcpusl->X86EmitPopReg(kEAXentryThunk);
201 #ifdef PROFILING_SUPPORTED
202 // Notify profiler of transition into runtime, before we disable preemptive GC
203 if (CORProfilerTrackTransitions())
205 // Load the methoddesc into EBX (UMEntryThunk->m_pMD)
206 pcpusl->X86EmitIndexRegLoad(kEBX, kEAXentryThunk, UMEntryThunk::GetOffsetOfMethodDesc());
209 pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
210 pcpusl->X86EmitPushReg(kECXthread); // pCurThread
212 // Push arguments and notify profiler
213 pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
214 pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
215 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerUnmanagedToManagedTransitionMD), 8);
218 pcpusl->X86EmitPopReg(kECXthread);
219 pcpusl->X86EmitPopReg(kEAXentryThunk);
221 // Push the MethodDesc* (in EBX) for use by the transition on the way out.
222 pcpusl->X86EmitPushReg(kEBX);
224 #endif // PROFILING_SUPPORTED
226 pcpusl->EmitDisable(pDisableGCLabel, TRUE, kECXthread);
228 pcpusl->EmitLabel(pRejoinGCLabel);
230 // construct a FrameHandlerExRecord
232 // push [ECX]Thread.m_pFrame - corresponding to FrameHandlerExRecord::m_pEntryFrame
233 pcpusl->X86EmitIndexPush(kECXthread, offsetof(Thread, m_pFrame));
235 // push offset FastNExportExceptHandler
236 pcpusl->X86EmitPushImm32((INT32)(size_t)FastNExportExceptHandler);
239 const static BYTE codeSEH1[] = { 0x64, 0xFF, 0x35, 0x0, 0x0, 0x0, 0x0};
240 pcpusl->EmitBytes(codeSEH1, sizeof(codeSEH1));
242 // link in the exception frame
243 // mov dword ptr fs:[0], esp
244 const static BYTE codeSEH2[] = { 0x64, 0x89, 0x25, 0x0, 0x0, 0x0, 0x0};
245 pcpusl->EmitBytes(codeSEH2, sizeof(codeSEH2));
247 // EBX will hold address of start of arguments. Calculate here so the AD switch case can access
248 // the arguments at their original location rather than re-copying them to the inner frame.
249 // lea ebx, [ebp + 8]
250 pcpusl->X86EmitIndexLea(kEBX, kEBP, 8);
253 // ----------------------------------------------------------------------------------------------
255 // From this point on (until noted) we might be executing as the result of calling into the
256 // runtime in order to switch AppDomain. In order for the following code to function in both
257 // scenarios it must be careful when making assumptions about the current stack layout (in the AD
258 // switch case a new inner frame has been pushed which is not identical to the original outer
261 // Our guaranteed state at this point is as follows:
262 // EAX: Pointer to UMEntryThunk
263 // EBX: Pointer to start of caller's arguments
264 // ECX: Pointer to current Thread
265 // EBP: Equals EBX - 8 (no AD switch) or unspecified (AD switch)
269 // +-------------------------+
275 // +-------------------------+
276 // EBX - 20 | Saved Result: EDX/ST(0) |
277 // +- - - - - - - - - - - - -+
278 // EBX - 16 | Saved Result: EAX/ST(0) |
279 // +-------------------------+
280 // EBX - 12 | Caller's EBX |
281 // +-------------------------+
282 // EBX - 8 | Caller's EBP |
283 // +-------------------------+
284 // EBX - 4 | Return address |
285 // +-------------------------+
288 // | Caller's arguments |
291 // +-------------------------+
294 // save the thread pointer
295 pcpusl->X86EmitPushReg(kECXthread);
297 // reserve the space for call slot
298 pcpusl->X86EmitSubEsp(4);
300 // remember stack size for offset computations
301 INT iStackSizeAtCallSlot = pcpusl->GetStackSize();
303 if (!(pInfo->m_wFlags & umtmlSkipStub))
305 // save EDI (it's used by the IL stub invocation code)
306 pcpusl->X86EmitPushReg(kEDI);
309 // repush any stack arguments
310 int arg = pInfo->m_cbDstStack/STACK_ELEM_SIZE;
314 if (IS_BYREF_STACK_OFFSET(psrcofs[arg]))
316 // lea ecx, [ebx + ofs]
317 pcpusl->X86EmitIndexLea(kECX, kEBX, GET_STACK_OFFSET(psrcofs[arg]));
320 pcpusl->X86EmitPushReg(kECX);
324 // push dword ptr [ebx + ofs]
325 pcpusl->X86EmitIndexPush(kEBX, GET_STACK_OFFSET(psrcofs[arg]));
329 // load register arguments
332 #define ARGUMENT_REGISTER(regname) \
333 if (psrcofsregs[regidx] != UNUSED_STACK_OFFSET) \
335 if (IS_BYREF_STACK_OFFSET(psrcofsregs[regidx])) \
337 /* lea reg, [ebx + ofs] */ \
338 pcpusl->X86EmitIndexLea(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
342 /* mov reg, [ebx + ofs] */ \
343 pcpusl->X86EmitIndexRegLoad(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
348 ENUM_ARGUMENT_REGISTERS_BACKWARD();
350 #undef ARGUMENT_REGISTER
352 if (!(pInfo->m_wFlags & umtmlSkipStub))
355 // Call the IL stub which will:
357 // 2) call the managed method
361 // the delegate object is extracted by the stub from UMEntryThunk
362 _ASSERTE(pInfo->m_wFlags & umtmlIsStatic);
364 // mov EDI, [EAX + UMEntryThunk.m_pUMThunkMarshInfo]
365 pcpusl->X86EmitIndexRegLoad(kEDI, kEAXentryThunk, offsetof(UMEntryThunk, m_pUMThunkMarshInfo));
367 // mov EDI, [EDI + UMThunkMarshInfo.m_pILStub]
368 pcpusl->X86EmitIndexRegLoad(kEDI, kEDI, UMThunkMarshInfo::GetOffsetOfStub());
370 // EAX still contains the UMEntryThunk pointer, so we cannot really use SCRATCHREG
371 // we can use EDI, though
373 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
375 // mov [ESP+iCallSlotOffset], EDI
376 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, kEDI);
378 // call [ESP+iCallSlotOffset]
379 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
381 // Emit a NOP so we know that we can call managed code
382 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP));
385 pcpusl->X86EmitPopReg(kEDI);
387 else if (!(pInfo->m_wFlags & umtmlIsStatic))
390 // This is call on delegate
393 // mov THIS, [EAX + UMEntryThunk.m_pObjectHandle]
394 pcpusl->X86EmitOp(0x8b, THIS_kREG, kEAXentryThunk, offsetof(UMEntryThunk, m_pObjectHandle));
397 pcpusl->X86EmitOp(0x8b, THIS_kREG, THIS_kREG);
400 // Inline Delegate.Invoke for perf
403 // mov SCRATCHREG, [THISREG + Delegate.FP] ; Save target stub in register
404 pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtr());
406 // mov THISREG, [THISREG + Delegate.OR] ; replace "this" pointer
407 pcpusl->X86EmitIndexRegLoad(THIS_kREG, THIS_kREG, DelegateObject::GetOffsetOfTarget());
409 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
411 // mov [ESP+iCallSlotOffset], SCRATCHREG
412 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe,iCallSlotOffset,SCRATCH_REGISTER_X86REG);
414 // call [ESP+iCallSlotOffset]
415 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
417 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
422 // Call the managed method
425 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
427 // mov SCRATCH, [SCRATCH + offsetof(UMEntryThunk.m_pManagedTarget)]
428 pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, SCRATCH_REGISTER_X86REG, offsetof(UMEntryThunk, m_pManagedTarget));
430 // mov [ESP+iCallSlotOffset], SCRATCHREG
431 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, SCRATCH_REGISTER_X86REG);
433 // call [ESP+iCallSlotOffset]
434 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
436 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
439 // skip the call slot
440 pcpusl->X86EmitAddEsp(4);
442 // Save the return value to the outer frame
443 if (pInfo->m_wFlags & umtmlFpu)
445 // save FP return value
447 // fstp qword ptr [ebx - 0x8 - 0xc]
448 pcpusl->X86EmitOffsetModRM(0xdd, (X86Reg)3, kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX */);
453 if (retbufofs == UNUSED_STACK_OFFSET)
455 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
456 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EAX */, kEDX);
460 // pretend that the method returned the ret buf hidden argument
461 // (the structure ptr); C++ compiler seems to rely on this
463 // mov dword ptr eax, [ebx + retbufofs]
464 pcpusl->X86EmitIndexRegLoad(kEAX, kEBX, retbufofs);
466 // save it as the return value
467 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
471 // restore the thread pointer
472 pcpusl->X86EmitPopReg(kECXthread);
475 // Once we reach this point in the code we're back to a single scenario: the outer frame of the
478 // ----------------------------------------------------------------------------------------------
481 // move byte ptr [ecx + Thread.m_fPreemptiveGCDisabled],0
482 pcpusl->X86EmitOffsetModRM(0xc6, (X86Reg)0, kECXthread, Thread::GetOffsetOfGCFlag());
485 CodeLabel *pRareEnable, *pEnableRejoin;
486 pRareEnable = pcpusl->NewCodeLabel();
487 pEnableRejoin = pcpusl->NewCodeLabel();
489 // test byte ptr [ecx + Thread.m_State], TS_CatchAtSafePoint
490 pcpusl->X86EmitOffsetModRM(0xf6, (X86Reg)0, kECXthread, Thread::GetOffsetOfState());
491 pcpusl->Emit8(Thread::TS_CatchAtSafePoint);
493 pcpusl->X86EmitCondJump(pRareEnable,X86CondCode::kJNZ);
495 pcpusl->EmitLabel(pEnableRejoin);
497 // *** unhook SEH frame
499 // mov edx,[esp] ;;pointer to the next exception record
500 pcpusl->X86EmitEspOffset(0x8B, kEDX, 0);
502 // mov dword ptr fs:[0], edx
503 static const BYTE codeSEH[] = { 0x64, 0x89, 0x15, 0x0, 0x0, 0x0, 0x0 };
504 pcpusl->EmitBytes(codeSEH, sizeof(codeSEH));
506 // deallocate SEH frame
507 pcpusl->X86EmitAddEsp(sizeof(FrameHandlerExRecord));
509 #ifdef PROFILING_SUPPORTED
510 if (CORProfilerTrackTransitions())
512 // Load the MethodDesc* we pushed on the entry transition into EBX.
513 pcpusl->X86EmitPopReg(kEBX);
516 pcpusl->X86EmitPushReg(kECX);
518 // Push arguments and notify profiler
519 pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
520 pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
521 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerManagedToUnmanagedTransitionMD), 8);
524 pcpusl->X86EmitPopReg(kECX);
526 #endif // PROFILING_SUPPORTED
528 // Load the saved return value
529 if (pInfo->m_wFlags & umtmlFpu)
531 // fld qword ptr [esp]
533 pcpusl->Emit16(0x2404);
535 pcpusl->X86EmitAddEsp(8);
539 pcpusl->X86EmitPopReg(kEDX);
540 pcpusl->X86EmitPopReg(kEAX);
543 // Restore EBX, which was saved in prolog
544 pcpusl->X86EmitPopReg(kEBX);
546 pcpusl->X86EmitPopReg(kEBP);
549 pcpusl->X86EmitReturn(pInfo->m_cbRetPop);
551 //-------------------------------------------------------------
552 // coming here if the thread is not set up yet
555 pcpusl->EmitLabel(pSetupThreadLabel);
557 // call CreateThreadBlock
558 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) CreateThreadBlockThrow), 0);
561 pcpusl->Emit16(0xc189);
563 // jump back into the main code path
564 pcpusl->X86EmitNearJump(pRejoinThreadLabel);
566 //-------------------------------------------------------------
567 // coming here if g_TrapReturningThreads was true
570 pcpusl->EmitLabel(pDisableGCLabel);
572 // call UMThunkStubRareDisable. This may throw if we are not allowed
573 // to enter. Note that we have not set up our SEH yet (deliberately).
574 // This is important to handle the case where we cannot enter the CLR
575 // during shutdown and cannot coordinate with the GC because of
577 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) UMThunkStubRareDisable), 0);
579 // jump back into the main code path
580 pcpusl->X86EmitNearJump(pRejoinGCLabel);
582 //-------------------------------------------------------------
583 // Coming here for rare case when enabling GC pre-emptive mode
586 pcpusl->EmitLabel(pRareEnable);
588 // Thread object is expected to be in EBX. So first save caller's EBX
589 pcpusl->X86EmitPushReg(kEBX);
591 pcpusl->X86EmitMovRegReg(kEBX, kECXthread);
593 pcpusl->EmitRareEnable(NULL);
596 pcpusl->X86EmitPopReg(kEBX);
598 // return to mainline of function
599 pcpusl->X86EmitNearJump(pEnableRejoin);
602 // Compiles an unmanaged to managed thunk for the given signature.
603 Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub)
605 STANDARD_VM_CONTRACT;
607 // stub is always static
608 BOOL fIsStatic = (fNoStub ? pSigInfo->IsStatic() : TRUE);
610 ArgIterator argit(pMetaSig);
612 UINT nStackBytes = argit.SizeOfArgStack();
613 _ASSERTE((nStackBytes % STACK_ELEM_SIZE) == 0);
615 // size of stack passed to us from unmanaged, may be bigger that nStackBytes if there are
616 // parameters with copy constructors where we perform value-to-reference transformation
617 UINT nStackBytesIncoming = nStackBytes;
619 UINT *psrcofs = (UINT *)_alloca((nStackBytes / STACK_ELEM_SIZE) * sizeof(UINT));
620 UINT psrcofsregs[NUM_ARGUMENT_REGISTERS];
621 UINT retbufofs = UNUSED_STACK_OFFSET;
623 for (int i = 0; i < NUM_ARGUMENT_REGISTERS; i++)
624 psrcofsregs[i] = UNUSED_STACK_OFFSET;
626 UINT nNumArgs = pMetaSig->NumFixedArgs();
629 int numRegistersUsed = 0;
630 int numStackSlotsIndex = nStackBytes / STACK_ELEM_SIZE;
635 // just reserve ECX, instance target is special-cased in the thunk compiler
639 // process the return buffer parameter
640 if (argit.HasRetBuffArg())
643 _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
644 psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
647 nOffset += StackElemSize(sizeof(LPVOID));
650 // process ordinary parameters
651 for (DWORD i = nNumArgs; i > 0; i--)
653 TypeHandle thValueType;
654 CorElementType type = pMetaSig->NextArgNormalized(&thValueType);
656 UINT cbSize = MetaSig::GetElemSize(type, thValueType);
658 BOOL fPassPointer = FALSE;
659 if (!fNoStub && type == ELEMENT_TYPE_PTR)
661 // this is a copy-constructed argument - get its size
662 TypeHandle thPtr = pMetaSig->GetLastTypeHandleThrowing();
664 _ASSERTE(thPtr.IsPointer());
665 cbSize = thPtr.AsTypeDesc()->GetTypeParam().GetSize();
667 // the incoming stack may be bigger that the outgoing (IL stub) stack
668 nStackBytesIncoming += (StackElemSize(cbSize) - StackElemSize(sizeof(LPVOID)));
672 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
674 _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
675 psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] =
677 MAKE_BYREF_STACK_OFFSET(nOffset) : // the register will get pointer to the incoming stack slot
678 MAKE_BYVAL_STACK_OFFSET(nOffset)); // the register will get the incoming stack slot
680 else if (fPassPointer)
682 // the stack slot will get pointer to the incoming stack slot
683 psrcofs[--numStackSlotsIndex] = MAKE_BYREF_STACK_OFFSET(nOffset);
687 // stack slots will get incoming stack slots (we may need more stack slots for larger parameters)
688 for (UINT nSlotOfs = StackElemSize(cbSize); nSlotOfs > 0; nSlotOfs -= STACK_ELEM_SIZE)
690 // note the reverse order here which is necessary to maintain
691 // the original layout of the structure (it'll be reversed once
692 // more when repushing)
693 psrcofs[--numStackSlotsIndex] = MAKE_BYVAL_STACK_OFFSET(nOffset + nSlotOfs - STACK_ELEM_SIZE);
697 nOffset += StackElemSize(cbSize);
699 _ASSERTE(numStackSlotsIndex == 0);
701 UINT cbActualArgSize = nStackBytesIncoming + (numRegistersUsed * STACK_ELEM_SIZE);
706 cbActualArgSize -= StackElemSize(sizeof(LPVOID));
709 m_cbActualArgSize = cbActualArgSize;
711 m_callConv = static_cast<UINT16>(pSigInfo->GetCallConv());
713 UMThunkStubInfo stubInfo;
714 memset(&stubInfo, 0, sizeof(stubInfo));
716 if (!FitsInU2(m_cbActualArgSize))
717 COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
719 stubInfo.m_cbSrcStack = static_cast<UINT16>(m_cbActualArgSize);
720 stubInfo.m_cbDstStack = nStackBytes;
722 if (pSigInfo->GetCallConv() == pmCallConvCdecl)
730 m_cbRetPop = static_cast<UINT16>(m_cbActualArgSize);
732 if (pSigInfo->GetCallConv() == pmCallConvThiscall)
734 stubInfo.m_wFlags |= umtmlThisCall;
735 if (argit.HasRetBuffArg())
737 stubInfo.m_wFlags |= umtmlThisCallHiddenArg;
741 stubInfo.m_cbRetPop = m_cbRetPop;
743 if (fIsStatic) stubInfo.m_wFlags |= umtmlIsStatic;
744 if (fNoStub) stubInfo.m_wFlags |= umtmlSkipStub;
746 if (pMetaSig->HasFPReturn()) stubInfo.m_wFlags |= umtmlFpu;
749 CPUSTUBLINKER *pcpusl = &cpusl;
751 // call the worker to emit the actual thunk
752 UMEntryThunk::CompileUMThunkWorker(&stubInfo, pcpusl, psrcofsregs, psrcofs, retbufofs);
754 return pcpusl->Link(pLoaderHeap);
757 #else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
759 PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
761 LIMITED_METHOD_CONTRACT;
763 return GetEEFuncEntryPoint(UMThunkStub);
766 #endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
768 UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) :
769 m_crst(CrstUMEntryThunkCache),
773 _ASSERTE(pDomain != NULL);
776 UMEntryThunkCache::~UMEntryThunkCache()
780 for (SHash<ThunkSHashTraits>::Iterator i = m_hash.Begin(); i != m_hash.End(); i++)
782 // UMEntryThunks in this cache own UMThunkMarshInfo in 1-1 fashion
783 DestroyMarshInfo(i->m_pThunk->GetUMThunkMarshInfo());
784 UMEntryThunk::FreeUMEntryThunk(i->m_pThunk);
788 UMEntryThunk *UMEntryThunkCache::GetUMEntryThunk(MethodDesc *pMD)
790 CONTRACT (UMEntryThunk *)
795 PRECONDITION(CheckPointer(pMD));
796 POSTCONDITION(CheckPointer(RETVAL));
800 UMEntryThunk *pThunk;
802 CrstHolder ch(&m_crst);
804 const CacheElement *pElement = m_hash.LookupPtr(pMD);
805 if (pElement != NULL)
807 pThunk = pElement->m_pThunk;
811 // cache miss -> create a new thunk
812 pThunk = UMEntryThunk::CreateUMEntryThunk();
813 Holder<UMEntryThunk *, DoNothing, UMEntryThunk::FreeUMEntryThunk> umHolder;
814 umHolder.Assign(pThunk);
816 UMThunkMarshInfo *pMarshInfo = (UMThunkMarshInfo *)(void *)(m_pDomain->GetStubHeap()->AllocMem(S_SIZE_T(sizeof(UMThunkMarshInfo))));
817 Holder<UMThunkMarshInfo *, DoNothing, UMEntryThunkCache::DestroyMarshInfo> miHolder;
818 miHolder.Assign(pMarshInfo);
820 pMarshInfo->LoadTimeInit(pMD);
821 pThunk->LoadTimeInit(NULL, NULL, pMarshInfo, pMD);
823 // add it to the cache
824 CacheElement element;
826 element.m_pThunk = pThunk;
829 miHolder.SuppressRelease();
830 umHolder.SuppressRelease();
836 // FailFast if a native callable method invoked directly from managed code.
837 // UMThunkStub.asm check the mode and call this function to failfast.
838 extern "C" VOID STDCALL ReversePInvokeBadTransition()
840 STATIC_CONTRACT_THROWS;
841 STATIC_CONTRACT_GC_TRIGGERS;
843 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(
844 COR_E_EXECUTIONENGINE,
845 W("Invalid Program: attempted to call a NativeCallable method from runtime-typesafe code.")
849 // Disable from a place that is calling into managed code via a UMEntryThunk.
850 extern "C" VOID STDCALL UMThunkStubRareDisableWorker(Thread *pThread, UMEntryThunk *pUMEntryThunk)
852 STATIC_CONTRACT_THROWS;
853 STATIC_CONTRACT_GC_TRIGGERS;
855 // Do not add a CONTRACT here. We haven't set up SEH.
858 // when we start executing here, we are actually in cooperative mode. But we
859 // haven't synchronized with the barrier to reentry yet. So we are in a highly
860 // dangerous mode. If we call managed code, we will potentially be active in
861 // the GC heap, even as GC's are occuring!
863 // We must do the following in this order, because otherwise we would be constructing
864 // the exception for the abort without synchronizing with the GC. Also, we have no
865 // CLR SEH set up, despite the fact that we may throw a ThreadAbortException.
866 pThread->RareDisablePreemptiveGC();
867 pThread->HandleThreadAbort();
869 #ifdef DEBUGGING_SUPPORTED
870 // If the debugger is attached, we use this opportunity to see if
871 // we're disabling preemptive GC on the way into the runtime from
872 // unmanaged code. We end up here because
873 // Increment/DecrementTraceCallCount() will bump
874 // g_TrapReturningThreads for us.
875 if (CORDebuggerTraceCall())
876 g_pDebugInterface->TraceCall((const BYTE *)pUMEntryThunk->GetManagedTarget());
877 #endif // DEBUGGING_SUPPORTED
880 PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk)
882 STATIC_CONTRACT_THROWS;
883 STATIC_CONTRACT_GC_TRIGGERS;
884 STATIC_CONTRACT_MODE_PREEMPTIVE;
886 Thread * pThread = GetThreadNULLOk();
888 pThread = CreateThreadBlockThrow();
890 GCX_COOP_THREAD_EXISTS(pThread);
892 if (pThread->IsAbortRequested())
893 pThread->HandleThreadAbort();
895 UMEntryThunk::DoRunTimeInit(pUMEntryThunk);
897 return (PCODE)pUMEntryThunk->GetCode();
900 void RunTimeInit_Wrapper(LPVOID /* UMThunkMarshInfo * */ ptr)
904 UMEntryThunk::DoRunTimeInit((UMEntryThunk*)ptr);
909 void STDCALL UMEntryThunk::DoRunTimeInit(UMEntryThunk* pUMEntryThunk)
918 PRECONDITION(CheckPointer(pUMEntryThunk));
922 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
923 // this method is called by stubs which are called by managed code,
924 // so we need an unwind and continue handler so that our internal
925 // exceptions don't leak out into managed code.
926 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
930 pUMEntryThunk->RunTimeInit();
933 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
934 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
937 UMEntryThunk* UMEntryThunk::CreateUMEntryThunk()
939 CONTRACT (UMEntryThunk*)
944 INJECT_FAULT(COMPlusThrowOM());
945 POSTCONDITION(CheckPointer(RETVAL));
951 p = s_thunkFreeList.GetUMEntryThunk();
954 p = (UMEntryThunk *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(sizeof(UMEntryThunk)));
959 void UMEntryThunk::Terminate()
970 if (GetObjectHandle())
972 DestroyLongWeakHandle(GetObjectHandle());
976 s_thunkFreeList.AddToList(this);
979 VOID UMEntryThunk::FreeUMEntryThunk(UMEntryThunk* p)
986 PRECONDITION(CheckPointer(p));
993 #endif // CROSSGEN_COMPILE
995 //-------------------------------------------------------------------------
996 // This function is used to report error when we call collected delegate.
997 // But memory that was allocated for thunk can be reused, due to it this
998 // function will not be called in all cases of the collected delegate call,
999 // also it may crash while trying to report the problem.
1000 //-------------------------------------------------------------------------
1001 VOID __fastcall UMEntryThunk::ReportViolation(UMEntryThunk* pEntryThunk)
1008 PRECONDITION(CheckPointer(pEntryThunk));
1012 MethodDesc* pMethodDesc = pEntryThunk->GetMethod();
1014 SString namespaceOrClassName;
1018 pMethodDesc->GetMethodInfoNoSig(namespaceOrClassName, methodName);
1019 moduleName.SetUTF8(pMethodDesc->GetModule()->GetSimpleName());
1023 message.Printf(W("A callback was made on a garbage collected delegate of type '%s!%s::%s'."),
1024 moduleName.GetUnicode(),
1025 namespaceOrClassName.GetUnicode(),
1026 methodName.GetUnicode());
1028 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_FAILFAST, message.GetUnicode());
1031 UMThunkMarshInfo::~UMThunkMarshInfo()
1041 #if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1043 m_pExecStub->DecRef();
1047 FillMemory(this, sizeof(*this), 0xcc);
1051 MethodDesc* UMThunkMarshInfo::GetILStubMethodDesc(MethodDesc* pInvokeMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwStubFlags)
1053 STANDARD_VM_CONTRACT;
1055 MethodDesc* pStubMD = NULL;
1056 dwStubFlags |= NDIRECTSTUB_FL_REVERSE_INTEROP; // could be either delegate interop or not--that info is passed in from the caller
1058 #if defined(DEBUGGING_SUPPORTED)
1059 // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
1060 CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(pSigInfo->GetModule(), CORJIT_FLAGS());
1061 if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
1063 dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
1065 #endif // DEBUGGING_SUPPORTED
1067 pStubMD = NDirect::CreateCLRToNativeILStub(
1070 pInvokeMD // may be NULL
1076 //----------------------------------------------------------
1077 // This initializer is called during load time.
1078 // It does not do any stub initialization or sigparsing.
1079 // The RunTimeInit() must be called subsequently to fully
1080 // UMThunkMarshInfo.
1081 //----------------------------------------------------------
1082 VOID UMThunkMarshInfo::LoadTimeInit(MethodDesc* pMD)
1084 LIMITED_METHOD_CONTRACT;
1085 PRECONDITION(pMD != NULL);
1087 LoadTimeInit(pMD->GetSignature(), pMD->GetModule(), pMD);
1090 VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc * pMD)
1092 LIMITED_METHOD_CONTRACT;
1094 FillMemory(this, sizeof(UMThunkMarshInfo), 0); // Prevent problems with partial deletes
1096 // This will be overwritten by the actual code pointer (or NULL) at the end of UMThunkMarshInfo::RunTimeInit()
1097 m_pILStub = (PCODE)1;
1100 m_pModule = pModule;
1103 #if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1104 INDEBUG(m_cbRetPop = 0xcccc;)
1108 #ifndef CROSSGEN_COMPILE
1109 //----------------------------------------------------------
1110 // This initializer finishes the init started by LoadTimeInit.
1111 // It does stub creation and can throw an exception.
1113 // It can safely be called multiple times and by concurrent
1115 //----------------------------------------------------------
1116 VOID UMThunkMarshInfo::RunTimeInit()
1118 STANDARD_VM_CONTRACT;
1120 // Nothing to do if already inited
1121 if (IsCompletelyInited())
1124 PCODE pFinalILStub = NULL;
1125 MethodDesc* pStubMD = NULL;
1127 MethodDesc * pMD = GetMethod();
1129 // Lookup NGened stub - currently we only support ngening of reverse delegate invoke interop stubs
1130 if (pMD != NULL && pMD->IsEEImpl())
1132 DWORD dwStubFlags = NDIRECTSTUB_FL_NGENEDSTUB | NDIRECTSTUB_FL_REVERSE_INTEROP | NDIRECTSTUB_FL_DELEGATE;
1134 #if defined(DEBUGGING_SUPPORTED)
1135 // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
1136 CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(GetModule(), CORJIT_FLAGS());
1137 if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
1139 dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
1141 #endif // DEBUGGING_SUPPORTED
1143 pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD);
1146 #if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1147 PInvokeStaticSigInfo sigInfo;
1150 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1152 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1154 Stub *pFinalExecStub = NULL;
1156 // we will always emit the argument-shuffling thunk, m_cbActualArgSize is set inside
1157 LoaderHeap *pHeap = (pMD == NULL ? NULL : pMD->GetLoaderAllocator()->GetStubHeap());
1159 if (pFinalILStub != NULL ||
1160 NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
1162 if (pFinalILStub == NULL)
1164 DWORD dwStubFlags = 0;
1166 if (sigInfo.IsDelegateInterop())
1167 dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
1169 pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
1170 pFinalILStub = JitILStub(pStubMD);
1173 MetaSig msig(pStubMD);
1174 pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, FALSE);
1178 MetaSig msig(GetSignature(), GetModule(), NULL);
1179 pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, TRUE);
1182 if (FastInterlockCompareExchangePointer(&m_pExecStub,
1187 // Some thread swooped in and set us. Our stub is now a
1188 // duplicate, so throw it away.
1190 pFinalExecStub->DecRef();
1193 #else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
1195 if (pFinalILStub == NULL)
1197 if (pMD != NULL && !pMD->IsEEImpl() &&
1198 !NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
1200 // Call the method directly in no-delegate case if possible. This is important to avoid JITing
1201 // for stubs created via code:ICLRRuntimeHost2::CreateDelegate during coreclr startup.
1202 pFinalILStub = pMD->GetMultiCallableAddrOfCode();
1206 // For perf, it is important to avoid expensive initialization of
1207 // PInvokeStaticSigInfo if we have NGened stub.
1208 PInvokeStaticSigInfo sigInfo;
1211 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1213 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1215 DWORD dwStubFlags = 0;
1217 if (sigInfo.IsDelegateInterop())
1218 dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
1220 pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
1221 pFinalILStub = JitILStub(pStubMD);
1226 #if defined(_TARGET_X86_)
1228 int numRegistersUsed = 0;
1229 UINT16 cbRetPop = 0;
1232 // cbStackArgSize represents the number of arg bytes for the MANAGED signature
1234 UINT32 cbStackArgSize = 0;
1239 if (HasRetBuffArgUnmanagedFixup(&sig))
1241 // callee should pop retbuf
1242 numRegistersUsed += 1;
1243 offs += STACK_ELEM_SIZE;
1244 cbRetPop += STACK_ELEM_SIZE;
1246 #endif // UNIX_X86_ABI
1248 for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
1250 TypeHandle thValueType;
1251 CorElementType type = sig.NextArgNormalized(&thValueType);
1252 int cbSize = sig.GetElemSize(type, thValueType);
1253 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
1255 offs += STACK_ELEM_SIZE;
1259 offs += StackElemSize(cbSize);
1260 cbStackArgSize += StackElemSize(cbSize);
1263 m_cbStackArgSize = cbStackArgSize;
1264 m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : offs;
1266 PInvokeStaticSigInfo sigInfo;
1268 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1270 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1271 if (sigInfo.GetCallConv() == pmCallConvCdecl)
1273 m_cbRetPop = cbRetPop;
1277 // For all the other calling convention except cdecl, callee pops the stack arguments
1278 m_cbRetPop = cbRetPop + static_cast<UINT16>(m_cbActualArgSize);
1280 #else // _TARGET_X86_
1282 // m_cbActualArgSize gets the number of arg bytes for the NATIVE signature
1285 (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
1287 #endif // _TARGET_X86_
1289 #endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
1291 // Must be the last thing we set!
1292 InterlockedCompareExchangeT<PCODE>(&m_pILStub, pFinalILStub, (PCODE)1);
1295 #if defined(_TARGET_X86_) && defined(FEATURE_STUBS_AS_IL)
1296 VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst)
1298 MethodDesc *pMD = GetMethod();
1303 // x86 native uses the following stack layout:
1305 // | --------- | <- CFA
1311 // x86 managed, however, uses a bit different stack layout:
1313 // | --------- | <- CFA
1314 // | stkarg M | (NATIVE/MANAGE may have different number of stack arguments)
1319 // This stub bridges the gap between them.
1321 char *pCurSrc = pSrc;
1322 char *pCurDst = pDst + m_cbStackArgSize;
1326 int numRegistersUsed = 0;
1329 if (HasRetBuffArgUnmanagedFixup(&sig))
1331 // Pass retbuf via Ecx
1332 numRegistersUsed += 1;
1333 pArgRegs->Ecx = *((UINT32 *)pCurSrc);
1334 pCurSrc += STACK_ELEM_SIZE;
1336 #endif // UNIX_X86_ABI
1338 for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
1340 TypeHandle thValueType;
1341 CorElementType type = sig.NextArgNormalized(&thValueType);
1342 int cbSize = sig.GetElemSize(type, thValueType);
1343 int elemSize = StackElemSize(cbSize);
1345 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
1347 _ASSERTE(elemSize == STACK_ELEM_SIZE);
1349 if (numRegistersUsed == 1)
1350 pArgRegs->Ecx = *((UINT32 *)pCurSrc);
1351 else if (numRegistersUsed == 2)
1352 pArgRegs->Edx = *((UINT32 *)pCurSrc);
1356 pCurDst -= elemSize;
1357 memcpy(pCurDst, pCurSrc, elemSize);
1360 pCurSrc += elemSize;
1363 _ASSERTE(pDst == pCurDst);
1366 EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshInfo,
1368 UMThunkMarshInfo::ArgumentRegisters *pArgRegs,
1371 pMarshInfo->SetupArguments(pSrc, pArgRegs, pDst);
1373 #endif // _TARGET_X86_ && FEATURE_STUBS_AS_IL
1376 void STDCALL LogUMTransition(UMEntryThunk* thunk)
1384 if (GetThread()) MODE_PREEMPTIVE; else MODE_ANY;
1386 PRECONDITION(CheckPointer(thunk));
1387 PRECONDITION((GetThread() != NULL) ? (!GetThread()->PreemptiveGCDisabled()) : TRUE);
1391 BEGIN_ENTRYPOINT_VOIDRET;
1393 void** retESP = ((void**) &thunk) + 4;
1395 MethodDesc* method = thunk->GetMethod();
1398 LOG((LF_STUBS, LL_INFO1000000, "UNMANAGED -> MANAGED Stub To Method = %s::%s SIG %s Ret Address ESP = 0x%x ret = 0x%x\n",
1399 method->m_pszDebugClassName,
1400 method->m_pszDebugMethodName,
1401 method->m_pszDebugMethodSignature, retESP, *retESP));
1404 END_ENTRYPOINT_VOIDRET;
1409 #endif // CROSSGEN_COMPILE