1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 // Provides an abstraction over platform specific calling conventions (specifically, the calling convention
9 // utilized by the JIT on that platform). The caller enumerates each argument of a signature in turn, and is
10 // provided with information mapping that argument into registers and/or stack locations.
13 #ifndef __CALLING_CONVENTION_INCLUDED
14 #define __CALLING_CONVENTION_INCLUDED
16 BOOL IsRetBuffPassedAsFirstArg();
18 // Describes how a single argument is laid out in registers and/or stack locations when given as an input to a
19 // managed method as part of a larger signature.
21 // Locations are split into floating point registers, general registers and stack offsets. Registers are
22 // obviously architecture dependent but are represented as a zero-based index into the usual sequence in which
23 // such registers are allocated for input on the platform in question. For instance:
24 // X86: 0 == ecx, 1 == edx
25 // ARM: 0 == r0, 1 == r1, 2 == r2 etc.
27 // Stack locations are represented as offsets from the stack pointer (at the point of the call). The offset is
28 // given as an index of a pointer sized slot. Similarly the size of data on the stack is given in slot-sized
29 // units. For instance, given an index of 2 and a size of 3:
30 // X86: argument starts at [ESP + 8] and is 12 bytes long
31 // AMD64: argument starts at [RSP + 16] and is 24 bytes long
33 // The structure is flexible enough to describe an argument that is split over several (consecutive) registers
34 // and possibly on to the stack as well.
37 int m_idxFloatReg; // First floating point register used (or -1)
38 int m_cFloatReg; // Count of floating point registers used (or 0)
40 int m_idxGenReg; // First general register used (or -1)
41 int m_cGenReg; // Count of general registers used (or 0)
43 int m_idxStack; // First stack slot used (or -1)
44 int m_cStack; // Count of stack slots used (or 0)
46 #if defined(UNIX_AMD64_ABI)
48 EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct
50 #endif // UNIX_AMD64_ABI
52 #if defined(_TARGET_ARM64_)
53 bool m_isSinglePrecision; // For determining if HFA is single or double
55 #endif // defined(_TARGET_ARM64_)
57 #if defined(_TARGET_ARM_)
58 BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack
66 // Initialize to represent a non-placed argument (no register or stack slots referenced).
75 #if defined(_TARGET_ARM_)
76 m_fRequires64BitAlignment = FALSE;
78 #if defined(_TARGET_ARM64_)
79 m_isSinglePrecision = FALSE;
80 #endif // defined(_TARGET_ARM64_)
81 #if defined(UNIX_AMD64_ABI)
88 // TransitionBlock is layout of stack frame of method call, saved argument registers and saved callee saved registers. Even though not
89 // all fields are used all the time, we use uniform form for simplicity.
91 struct TransitionBlock
93 #if defined(_TARGET_X86_)
94 ArgumentRegisters m_argumentRegisters;
95 CalleeSavedRegisters m_calleeSavedRegisters;
96 TADDR m_ReturnAddress;
97 #elif defined(_TARGET_AMD64_)
99 ArgumentRegisters m_argumentRegisters;
101 CalleeSavedRegisters m_calleeSavedRegisters;
102 TADDR m_ReturnAddress;
103 #elif defined(_TARGET_ARM_)
105 CalleeSavedRegisters m_calleeSavedRegisters;
106 // alias saved link register as m_ReturnAddress
108 INT32 r4, r5, r6, r7, r8, r9, r10;
110 TADDR m_ReturnAddress;
113 ArgumentRegisters m_argumentRegisters;
114 #elif defined(_TARGET_ARM64_)
116 CalleeSavedRegisters m_calleeSavedRegisters;
118 INT64 x29; // frame pointer
119 TADDR m_ReturnAddress;
120 INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28;
123 TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK
124 INT64 m_x8RetBuffReg;
125 ArgumentRegisters m_argumentRegisters;
127 PORTABILITY_ASSERT("TransitionBlock");
130 // The transition block should define everything pushed by callee. The code assumes in number of places that
131 // end of the transition block is caller's stack pointer.
133 static int GetOffsetOfReturnAddress()
135 LIMITED_METHOD_CONTRACT;
136 return offsetof(TransitionBlock, m_ReturnAddress);
139 #ifdef _TARGET_ARM64_
140 static int GetOffsetOfRetBuffArgReg()
142 LIMITED_METHOD_CONTRACT;
143 return offsetof(TransitionBlock, m_x8RetBuffReg);
147 static BYTE GetOffsetOfArgs()
149 LIMITED_METHOD_CONTRACT;
151 // Offset of the stack args (which are after the TransitionBlock)
152 return sizeof(TransitionBlock);
155 static int GetOffsetOfArgumentRegisters()
157 LIMITED_METHOD_CONTRACT;
159 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
160 offs = sizeof(TransitionBlock);
162 offs = offsetof(TransitionBlock, m_argumentRegisters);
167 static BOOL IsStackArgumentOffset(int offset)
169 LIMITED_METHOD_CONTRACT;
171 #if defined(UNIX_AMD64_ABI)
172 return offset >= sizeof(TransitionBlock);
174 int ofsArgRegs = GetOffsetOfArgumentRegisters();
176 return offset >= (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
180 static BOOL IsArgumentRegisterOffset(int offset)
182 LIMITED_METHOD_CONTRACT;
184 int ofsArgRegs = GetOffsetOfArgumentRegisters();
186 return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
190 static UINT GetArgumentIndexFromOffset(int offset)
192 LIMITED_METHOD_CONTRACT;
194 #if defined(UNIX_AMD64_ABI)
195 _ASSERTE(offset != TransitionBlock::StructInRegsOffset);
197 return (offset - GetOffsetOfArgumentRegisters()) / TARGET_POINTER_SIZE;
200 static UINT GetStackArgumentIndexFromOffset(int offset)
202 LIMITED_METHOD_CONTRACT;
204 return (offset - TransitionBlock::GetOffsetOfArgs()) / STACK_ELEM_SIZE;
209 #ifdef CALLDESCR_FPARGREGS
210 static BOOL IsFloatArgumentRegisterOffset(int offset)
212 LIMITED_METHOD_CONTRACT;
213 #if defined(UNIX_AMD64_ABI)
214 return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0);
220 // Check if an argument has floating point register, that means that it is
221 // either a floating point argument or a struct passed in registers that
222 // has a floating point member.
223 static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs)
225 LIMITED_METHOD_CONTRACT;
226 #if defined(UNIX_AMD64_ABI)
227 if (offset == TransitionBlock::StructInRegsOffset)
229 return argLocDescForStructInRegs->m_cFloatReg > 0;
235 static int GetOffsetOfFloatArgumentRegisters()
237 LIMITED_METHOD_CONTRACT;
238 return -GetNegSpaceSize();
240 #endif // CALLDESCR_FPARGREGS
242 static int GetOffsetOfCalleeSavedRegisters()
244 LIMITED_METHOD_CONTRACT;
245 return offsetof(TransitionBlock, m_calleeSavedRegisters);
248 static int GetNegSpaceSize()
250 LIMITED_METHOD_CONTRACT;
251 int negSpaceSize = 0;
252 #ifdef CALLDESCR_FPARGREGS
253 negSpaceSize += sizeof(FloatArgumentRegisters);
256 negSpaceSize += TARGET_POINTER_SIZE; // padding to make FloatArgumentRegisters address 8-byte aligned
261 static const int InvalidOffset = -1;
262 #if defined(UNIX_AMD64_ABI)
263 // Special offset value to represent struct passed in registers. Such a struct can span both
264 // general purpose and floating point registers, so it can have two different offsets.
265 static const int StructInRegsOffset = -2;
269 //-----------------------------------------------------------------------
270 // ArgIterator is helper for dealing with calling conventions.
271 // It is tightly coupled with TransitionBlock. It uses offsets into
272 // TransitionBlock to represent argument locations for efficiency
273 // reasons. Alternatively, it can also return ArgLocDesc for less
274 // performance critical code.
276 // The ARGITERATOR_BASE argument of the template is provider of the parsed
277 // method signature. Typically, the arg iterator works on top of MetaSig.
278 // Reflection invoke uses alternative implementation to save signature parsing
279 // time because of it has the parsed signature available.
280 //-----------------------------------------------------------------------
281 template<class ARGITERATOR_BASE>
282 class ArgIteratorTemplate : public ARGITERATOR_BASE
285 //------------------------------------------------------------
287 //------------------------------------------------------------
288 ArgIteratorTemplate()
294 UINT SizeOfArgStack()
297 if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
299 _ASSERTE((m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED) != 0);
300 return m_nSizeOfArgStack;
303 // For use with ArgIterator. This function computes the amount of additional
304 // memory required above the TransitionBlock. The parameter offsets
305 // returned by ArgIteratorTemplate::GetNextOffset are relative to a
306 // FramedMethodFrame, and may be in either of these regions.
307 UINT SizeOfFrameArgumentArray()
311 UINT size = SizeOfArgStack();
313 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
314 // The argument registers are not included in the stack size on AMD64
315 size += ARGUMENTREGISTERS_SIZE;
321 //------------------------------------------------------------------------
328 if (this->IsVarArg())
331 return SizeOfArgStack();
335 // Is there a hidden parameter for the return parameter?
340 if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
341 ComputeReturnFlags();
342 return (m_dwFlags & RETURN_HAS_RET_BUFFER);
345 UINT GetFPReturnSize()
348 if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
349 ComputeReturnFlags();
350 return m_dwFlags >> RETURN_FP_SIZE_SHIFT;
354 //=========================================================================
355 // Indicates whether an argument is to be put in a register using the
356 // default IL calling convention. This should be called on each parameter
357 // in the order it appears in the call signature. For a non-static method,
358 // this function should also be called once for the "this" argument, prior
359 // to calling it for the "real" arguments. Pass in a typ of ELEMENT_TYPE_CLASS.
361 // *pNumRegistersUsed: [in,out]: keeps track of the number of argument
362 // registers assigned previously. The caller should
363 // initialize this variable to 0 - then each call
366 // typ: the signature type
367 //=========================================================================
368 static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ)
370 LIMITED_METHOD_CONTRACT;
371 if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) {
372 if (gElementTypeInfo[typ].m_enregister) {
373 (*pNumRegistersUsed)++;
380 #endif // _TARGET_X86_
382 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
384 // Note that this overload does not handle varargs
385 static BOOL IsArgPassedByRef(TypeHandle th)
387 LIMITED_METHOD_CONTRACT;
389 _ASSERTE(!th.IsNull());
391 // This method only works for valuetypes. It includes true value types,
392 // primitives, enums and TypedReference.
393 _ASSERTE(th.IsValueType());
395 size_t size = th.GetSize();
396 #ifdef _TARGET_AMD64_
397 return IsArgPassedByRef(size);
398 #elif defined(_TARGET_ARM64_)
399 // Composites greater than 16 bytes are passed by reference
400 return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA());
402 PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
407 #ifdef _TARGET_AMD64_
408 // This overload should only be used in AMD64-specific code only.
409 static BOOL IsArgPassedByRef(size_t size)
411 LIMITED_METHOD_CONTRACT;
413 #ifdef UNIX_AMD64_ABI
414 // No arguments are passed by reference on AMD64 on Unix
417 // If the size is bigger than ENREGISTERED_PARAM_TYPE_MAXSIZE, or if the size is NOT a power of 2, then
418 // the argument is passed by reference.
419 return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0);
422 #endif // _TARGET_AMD64_
424 // This overload should be used for varargs only.
425 static BOOL IsVarArgPassedByRef(size_t size)
427 LIMITED_METHOD_CONTRACT;
429 #ifdef _TARGET_AMD64_
430 #ifdef UNIX_AMD64_ABI
431 PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef");
433 #else // UNIX_AMD64_ABI
434 return IsArgPassedByRef(size);
435 #endif // UNIX_AMD64_ABI
438 return (size > ENREGISTERED_PARAMTYPE_MAXSIZE);
442 BOOL IsArgPassedByRef()
444 LIMITED_METHOD_CONTRACT;
446 #ifdef _TARGET_AMD64_
447 return IsArgPassedByRef(m_argSize);
448 #elif defined(_TARGET_ARM64_)
449 if (m_argType == ELEMENT_TYPE_VALUETYPE)
451 _ASSERTE(!m_argTypeHandle.IsNull());
452 return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || this->IsVarArg()));
456 PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
461 #endif // ENREGISTERED_PARAMTYPE_MAXSIZE
463 //------------------------------------------------------------
464 // Return the offsets of the special arguments
465 //------------------------------------------------------------
467 static int GetThisOffset();
469 int GetRetBuffArgOffset();
470 int GetVASigCookieOffset();
471 int GetParamTypeArgOffset();
473 //------------------------------------------------------------
474 // Each time this is called, this returns a byte offset of the next
475 // argument from the TransitionBlock* pointer.
477 // Returns TransitionBlock::InvalidOffset once you've hit the end
479 //------------------------------------------------------------
482 CorElementType GetArgType(TypeHandle *pTypeHandle = NULL)
484 LIMITED_METHOD_CONTRACT;
485 if (pTypeHandle != NULL)
487 *pTypeHandle = m_argTypeHandle;
494 LIMITED_METHOD_CONTRACT;
501 // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly
502 // in signatures (this pointer and the like). Whether or not these can be used successfully before all the
503 // explicit arguments have been scanned is platform dependent.
504 void GetThisLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetThisOffset(), pLoc); }
505 void GetParamTypeLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetParamTypeArgOffset(), pLoc); }
506 void GetVASigCookieLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetVASigCookieOffset(), pLoc); }
508 #ifndef CALLDESCR_RETBUFFARGREG
509 void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); }
512 #endif // !_TARGET_X86_
514 ArgLocDesc* GetArgLocDescForStructInRegs()
516 #if defined(UNIX_AMD64_ABI) || defined (_TARGET_ARM64_)
517 return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL;
524 // Get layout information for the argument that the ArgIterator is currently visiting.
525 void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
527 LIMITED_METHOD_CONTRACT;
531 pLoc->m_fRequires64BitAlignment = m_fRequires64BitAlignment;
533 int cSlots = (GetArgSize() + 3) / 4;
535 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
537 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 4;
538 pLoc->m_cFloatReg = cSlots;
542 if (!TransitionBlock::IsStackArgumentOffset(argOffset))
544 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
546 if (cSlots <= (4 - pLoc->m_idxGenReg))
548 pLoc->m_cGenReg = cSlots;
552 pLoc->m_cGenReg = 4 - pLoc->m_idxGenReg;
554 pLoc->m_idxStack = 0;
555 pLoc->m_cStack = cSlots - pLoc->m_cGenReg;
560 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
561 pLoc->m_cStack = cSlots;
564 #endif // _TARGET_ARM_
566 #ifdef _TARGET_ARM64_
567 // Get layout information for the argument that the ArgIterator is currently visiting.
568 void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
570 LIMITED_METHOD_CONTRACT;
574 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
576 // Dividing by 8 as size of each register in FloatArgumentRegisters is 8 bytes.
577 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 8;
579 if (!m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA())
581 CorElementType type = m_argTypeHandle.GetHFAType();
582 bool isFloatType = (type == ELEMENT_TYPE_R4);
584 pLoc->m_cFloatReg = isFloatType ? GetArgSize()/sizeof(float): GetArgSize()/sizeof(double);
585 pLoc->m_isSinglePrecision = isFloatType;
589 pLoc->m_cFloatReg = 1;
594 int cSlots = (GetArgSize() + 7)/ 8;
596 // Composites greater than 16bytes are passed by reference
597 if (GetArgType() == ELEMENT_TYPE_VALUETYPE && GetArgSize() > ENREGISTERED_PARAMTYPE_MAXSIZE)
602 #ifdef _TARGET_ARM64_
603 // Sanity check to make sure no caller is trying to get an ArgLocDesc that
604 // describes the return buffer reg field that's in the TransitionBlock.
605 _ASSERTE(argOffset != TransitionBlock::GetOffsetOfRetBuffArgReg());
608 if (!TransitionBlock::IsStackArgumentOffset(argOffset))
610 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
611 pLoc->m_cGenReg = cSlots;
615 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
616 pLoc->m_cStack = cSlots;
619 #endif // _TARGET_ARM64_
621 #if defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)
622 // Get layout information for the argument that the ArgIterator is currently visiting.
623 void GetArgLoc(int argOffset, ArgLocDesc* pLoc)
625 LIMITED_METHOD_CONTRACT;
627 #if defined(UNIX_AMD64_ABI)
628 if (m_hasArgLocDescForStructInRegs)
630 *pLoc = m_argLocDescForStructInRegs;
633 #endif // UNIX_AMD64_ABI
635 if (argOffset == TransitionBlock::StructInRegsOffset)
637 // We always already have argLocDesc for structs passed in registers, we
638 // compute it in the GetNextOffset for those since it is always needed.
645 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
647 // Dividing by 16 as size of each register in FloatArgumentRegisters is 16 bytes.
648 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 16;
649 pLoc->m_cFloatReg = 1;
651 else if (!TransitionBlock::IsStackArgumentOffset(argOffset))
653 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
658 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
659 pLoc->m_cStack = (GetArgSize() + STACK_ELEM_SIZE - 1) / STACK_ELEM_SIZE;
662 #endif // _TARGET_AMD64_ && UNIX_AMD64_ABI
665 DWORD m_dwFlags; // Cached flags
666 int m_nSizeOfArgStack; // Cached value of SizeOfArgStack
670 // Cached information about last argument
671 CorElementType m_argType;
673 TypeHandle m_argTypeHandle;
674 #if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_)
675 ArgLocDesc m_argLocDescForStructInRegs;
676 bool m_hasArgLocDescForStructInRegs;
677 #endif // (_TARGET_AMD64_ && UNIX_AMD64_ABI) || _TARGET_ARM64_
680 int m_curOfs; // Current position of the stack iterator
681 int m_numRegistersUsed;
684 #ifdef _TARGET_AMD64_
685 #ifdef UNIX_AMD64_ABI
686 int m_idxGenReg; // Next general register to be assigned a value
687 int m_idxStack; // Next stack slot to be assigned a value
688 int m_idxFPReg; // Next floating point register to be assigned a value
689 bool m_fArgInRegisters; // Indicates that the current argument is stored in registers
691 int m_curOfs; // Current position of the stack iterator
696 int m_idxGenReg; // Next general register to be assigned a value
697 int m_idxStack; // Next stack slot to be assigned a value
699 WORD m_wFPRegs; // Bitmask of available floating point argument registers (s0-s15/d0-d7)
700 bool m_fRequires64BitAlignment; // Cached info about the current arg
703 #ifdef _TARGET_ARM64_
704 int m_idxGenReg; // Next general register to be assigned a value
705 int m_idxStack; // Next stack slot to be assigned a value
706 int m_idxFPReg; // Next FP register to be assigned a value
710 ITERATION_STARTED = 0x0001, // Started iterating over arguments
711 SIZE_OF_ARG_STACK_COMPUTED = 0x0002,
712 RETURN_FLAGS_COMPUTED = 0x0004,
713 RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg
716 PARAM_TYPE_REGISTER_MASK = 0x0030,
717 PARAM_TYPE_REGISTER_STACK = 0x0010,
718 PARAM_TYPE_REGISTER_ECX = 0x0020,
719 PARAM_TYPE_REGISTER_EDX = 0x0030,
722 METHOD_INVOKE_NEEDS_ACTIVATION = 0x0040, // Flag used by ArgIteratorForMethodInvoke
724 RETURN_FP_SIZE_SHIFT = 8, // The rest of the flags is cached value of GetFPReturnSize
727 void ComputeReturnFlags();
730 void GetSimpleLoc(int offset, ArgLocDesc * pLoc)
734 #ifdef CALLDESCR_RETBUFFARGREG
735 // Codepaths where this could happen have been removed. If this occurs, something
736 // has been missed and this needs another look.
737 _ASSERTE(offset != TransitionBlock::GetOffsetOfRetBuffArgReg());
741 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(offset);
748 template<class ARGITERATOR_BASE>
749 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetThisOffset()
753 // This pointer is in the first argument register by default
754 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
757 // x86 is special as always
758 ret += offsetof(ArgumentRegisters, ECX);
764 template<class ARGITERATOR_BASE>
765 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetRetBuffArgOffset()
769 _ASSERTE(this->HasRetBuffArg());
771 // RetBuf arg is in the second argument register by default
772 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
775 // x86 is special as always
776 ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX);
778 ret = TransitionBlock::GetOffsetOfRetBuffArgReg();
781 ret += TARGET_POINTER_SIZE;
787 template<class ARGITERATOR_BASE>
788 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetVASigCookieOffset()
792 _ASSERTE(this->IsVarArg());
794 #if defined(_TARGET_X86_)
795 // x86 is special as always
796 return sizeof(TransitionBlock);
798 // VaSig cookie is after this and retbuf arguments by default.
799 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
803 ret += TARGET_POINTER_SIZE;
806 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
808 ret += TARGET_POINTER_SIZE;
815 //-----------------------------------------------------------
816 // Get the extra param offset for shared generic code
817 //-----------------------------------------------------------
818 template<class ARGITERATOR_BASE>
819 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetParamTypeArgOffset()
824 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
825 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
826 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
831 _ASSERTE(this->HasParamType());
834 // x86 is special as always
835 if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
838 switch (m_dwFlags & PARAM_TYPE_REGISTER_MASK)
840 case PARAM_TYPE_REGISTER_ECX:
841 return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, ECX);
842 case PARAM_TYPE_REGISTER_EDX:
843 return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, EDX);
848 // The param type arg is last stack argument otherwise
849 return sizeof(TransitionBlock);
851 // The hidden arg is after this and retbuf arguments by default.
852 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
856 ret += TARGET_POINTER_SIZE;
859 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
861 ret += TARGET_POINTER_SIZE;
868 // To avoid corner case bugs, limit maximum size of the arguments with sufficient margin
869 #define MAX_ARG_SIZE 0xFFFFFF
871 //------------------------------------------------------------
872 // Each time this is called, this returns a byte offset of the next
873 // argument from the Frame* pointer. This offset can be positive *or* negative.
875 // Returns TransitionBlock::InvalidOffset once you've hit the end of the list.
876 //------------------------------------------------------------
877 template<class ARGITERATOR_BASE>
878 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
883 if (!(m_dwFlags & ITERATION_STARTED))
885 int numRegistersUsed = 0;
890 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
893 _ASSERTE(!this->IsVarArg() || !this->HasParamType());
896 if (this->IsVarArg() || this->HasParamType())
903 if (this->IsVarArg())
905 numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
908 #ifdef FEATURE_INTERPRETER
909 BYTE callconv = CallConv();
912 case IMAGE_CEE_CS_CALLCONV_C:
913 case IMAGE_CEE_CS_CALLCONV_STDCALL:
914 m_numRegistersUsed = NUM_ARGUMENT_REGISTERS;
915 m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
916 m_fUnmanagedCallConv = true;
919 case IMAGE_CEE_CS_CALLCONV_THISCALL:
920 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
921 _ASSERTE_MSG(false, "Unsupported calling convention.");
924 m_fUnmanagedCallConv = false;
925 m_numRegistersUsed = numRegistersUsed;
926 m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
929 m_numRegistersUsed = numRegistersUsed;
930 m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
933 #elif defined(_TARGET_AMD64_)
934 #ifdef UNIX_AMD64_ABI
935 m_idxGenReg = numRegistersUsed;
939 m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
941 #elif defined(_TARGET_ARM_)
942 m_idxGenReg = numRegistersUsed;
946 #elif defined(_TARGET_ARM64_)
947 m_idxGenReg = numRegistersUsed;
952 PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
957 m_dwFlags |= ITERATION_STARTED;
960 // We're done going through the args for this MetaSig
961 if (m_argNum == this->NumFixedArgs())
962 return TransitionBlock::InvalidOffset;
964 TypeHandle thValueType;
965 CorElementType argType = this->GetNextArgumentType(m_argNum++, &thValueType);
967 int argSize = MetaSig::GetElemSize(argType, thValueType);
971 m_argTypeHandle = thValueType;
973 #if defined(UNIX_AMD64_ABI)
974 m_hasArgLocDescForStructInRegs = false;
978 #ifdef FEATURE_INTERPRETER
979 if (m_fUnmanagedCallConv)
981 int argOfs = m_curOfs;
982 m_curOfs += StackElemSize(argSize);
986 if (IsArgumentInRegister(&m_numRegistersUsed, argType))
988 return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *);
991 m_curOfs -= StackElemSize(argSize);
992 _ASSERTE(m_curOfs >= TransitionBlock::GetOffsetOfArgs());
994 #elif defined(_TARGET_AMD64_)
995 #ifdef UNIX_AMD64_ABI
997 m_fArgInRegisters = true;
1001 int cbArg = StackElemSize(argSize);
1006 case ELEMENT_TYPE_R4:
1007 // 32-bit floating point argument.
1011 case ELEMENT_TYPE_R8:
1012 // 64-bit floating point argument.
1016 case ELEMENT_TYPE_VALUETYPE:
1018 #ifdef UNIX_AMD64_ABI
1019 MethodTable *pMT = m_argTypeHandle.AsMethodTable();
1020 if (pMT->IsRegPassedStruct())
1022 EEClass* eeClass = pMT->GetClass();
1024 for (int i = 0; i < eeClass->GetNumberEightBytes(); i++)
1026 switch (eeClass->GetEightByteClassification(i))
1028 case SystemVClassificationTypeInteger:
1029 case SystemVClassificationTypeIntegerReference:
1030 case SystemVClassificationTypeIntegerByRef:
1033 case SystemVClassificationTypeSSE:
1042 // Check if we have enough registers available for the struct passing
1043 if ((cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS) && (cGenRegs + m_idxGenReg) <= NUM_ARGUMENT_REGISTERS)
1045 m_argLocDescForStructInRegs.Init();
1046 m_argLocDescForStructInRegs.m_cGenReg = cGenRegs;
1047 m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs;
1048 m_argLocDescForStructInRegs.m_idxGenReg = m_idxGenReg;
1049 m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg;
1050 m_argLocDescForStructInRegs.m_eeClass = eeClass;
1052 m_hasArgLocDescForStructInRegs = true;
1054 m_idxGenReg += cGenRegs;
1055 m_idxFPReg += cFPRegs;
1057 return TransitionBlock::StructInRegsOffset;
1061 // Set the register counts to indicate that this argument will not be passed in registers
1065 #else // UNIX_AMD64_ABI
1066 argSize = sizeof(TADDR);
1067 #endif // UNIX_AMD64_ABI
1073 cGenRegs = cbArg / 8; // GP reg size
1077 if ((cFPRegs > 0) && (cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS))
1079 int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16;
1080 m_idxFPReg += cFPRegs;
1083 else if ((cGenRegs > 0) && (m_idxGenReg + cGenRegs <= NUM_ARGUMENT_REGISTERS))
1085 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
1086 m_idxGenReg += cGenRegs;
1090 #if defined(UNIX_AMD64_ABI)
1091 m_fArgInRegisters = false;
1094 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * STACK_ELEM_SIZE;
1096 int cArgSlots = cbArg / STACK_ELEM_SIZE;
1097 m_idxStack += cArgSlots;
1101 // Each argument takes exactly one slot on AMD64 on Windows
1102 int argOfs = m_curOfs;
1103 m_curOfs += sizeof(void *);
1106 #elif defined(_TARGET_ARM_)
1107 // First look at the underlying type of the argument to determine some basic properties:
1108 // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary).
1109 // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8).
1110 // 3) Whether the argument requires 64-bit alignment (anything that contains a Int64/UInt64).
1112 bool fFloatingPoint = false;
1113 bool fRequiresAlign64Bit = false;
1117 case ELEMENT_TYPE_I8:
1118 case ELEMENT_TYPE_U8:
1119 // 64-bit integers require 64-bit alignment on ARM.
1120 fRequiresAlign64Bit = true;
1123 case ELEMENT_TYPE_R4:
1124 // 32-bit floating point argument.
1125 fFloatingPoint = true;
1128 case ELEMENT_TYPE_R8:
1129 // 64-bit floating point argument.
1130 fFloatingPoint = true;
1131 fRequiresAlign64Bit = true;
1134 case ELEMENT_TYPE_VALUETYPE:
1136 // Value type case: extract the alignment requirement, note that this has to handle
1137 // the interop "native value types".
1138 fRequiresAlign64Bit = thValueType.RequiresAlign8();
1141 // Handle HFAs: packed structures of 1-4 floats or doubles that are passed in FP argument
1142 // registers if possible.
1143 if (thValueType.IsHFA())
1145 fFloatingPoint = true;
1153 // The default is are 4-byte arguments (or promoted to 4 bytes), non-FP and don't require any
1154 // 64-bit alignment.
1158 // Now attempt to place the argument into some combination of floating point or general registers and
1161 // Save the alignment requirement
1162 m_fRequires64BitAlignment = fRequiresAlign64Bit;
1164 int cbArg = StackElemSize(argSize);
1165 int cArgSlots = cbArg / 4;
1167 // Ignore floating point argument placement in registers if we're dealing with a vararg function (the ABI
1168 // specifies this so that vararg processing on the callee side is simplified).
1170 if (fFloatingPoint && !this->IsVarArg())
1172 // Handle floating point (primitive) arguments.
1174 // First determine whether we can place the argument in VFP registers. There are 16 32-bit
1175 // and 8 64-bit argument registers that share the same register space (e.g. D0 overlaps S0 and
1176 // S1). The ABI specifies that VFP values will be passed in the lowest sequence of registers that
1177 // haven't been used yet and have the required alignment. So the sequence (float, double, float)
1178 // would be mapped to (S0, D1, S1) or (S0, S2/S3, S1).
1180 // We use a 16-bit bitmap to record which registers have been used so far.
1182 // So we can use the same basic loop for each argument type (float, double or HFA struct) we set up
1183 // the following input parameters based on the size and alignment requirements of the arguments:
1184 // wAllocMask : bitmask of the number of 32-bit registers we need (1 for 1, 3 for 2, 7 for 3 etc.)
1185 // cSteps : number of loop iterations it'll take to search the 16 registers
1186 // cShift : how many bits to shift the allocation mask on each attempt
1188 WORD wAllocMask = (1 << (cbArg / 4)) - 1;
1189 WORD cSteps = (WORD)(fRequiresAlign64Bit ? 9 - (cbArg / 8) : 17 - (cbArg / 4));
1190 WORD cShift = fRequiresAlign64Bit ? 2 : 1;
1192 // Look through the availability bitmask for a free register or register pair.
1193 for (WORD i = 0; i < cSteps; i++)
1195 if ((m_wFPRegs & wAllocMask) == 0)
1197 // We found one, mark the register or registers as used.
1198 m_wFPRegs |= wAllocMask;
1200 // Indicate the registers used to the caller and return.
1201 return TransitionBlock::GetOffsetOfFloatArgumentRegisters() + (i * cShift * 4);
1203 wAllocMask <<= cShift;
1206 // The FP argument is going to live on the stack. Once this happens the ABI demands we mark all FP
1207 // registers as unavailable.
1210 // Doubles or HFAs containing doubles need the stack aligned appropriately.
1211 if (fRequiresAlign64Bit)
1212 m_idxStack = (int)ALIGN_UP(m_idxStack, 2);
1214 // Indicate the stack location of the argument to the caller.
1215 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
1217 // Record the stack usage.
1218 m_idxStack += cArgSlots;
1222 #endif // ARM_SOFTFP
1225 // Handle the non-floating point case.
1228 if (m_idxGenReg < 4)
1230 if (fRequiresAlign64Bit)
1232 // The argument requires 64-bit alignment. Align either the next general argument register if
1233 // we have any left. See step C.3 in the algorithm in the ABI spec.
1234 m_idxGenReg = (int)ALIGN_UP(m_idxGenReg, 2);
1237 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 4;
1239 int cRemainingRegs = 4 - m_idxGenReg;
1240 if (cArgSlots <= cRemainingRegs)
1242 // Mark the registers just allocated as used.
1243 m_idxGenReg += cArgSlots;
1247 // The ABI supports splitting a non-FP argument across registers and the stack. But this is
1248 // disabled if the FP arguments already overflowed onto the stack (i.e. the stack index is not
1249 // zero). The following code marks the general argument registers as exhausted if this condition
1250 // holds. See steps C.5 in the algorithm in the ABI spec.
1254 if (m_idxStack == 0)
1256 m_idxStack += cArgSlots - cRemainingRegs;
1261 if (fRequiresAlign64Bit)
1263 // The argument requires 64-bit alignment. If it is going to be passed on the stack, align
1264 // the next stack slot. See step C.6 in the algorithm in the ABI spec.
1265 m_idxStack = (int)ALIGN_UP(m_idxStack, 2);
1268 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
1270 // Advance the stack pointer over the argument just placed.
1271 m_idxStack += cArgSlots;
1274 #elif defined(_TARGET_ARM64_)
1281 case ELEMENT_TYPE_R4:
1282 // 32-bit floating point argument.
1286 case ELEMENT_TYPE_R8:
1287 // 64-bit floating point argument.
1291 case ELEMENT_TYPE_VALUETYPE:
1293 // Handle HFAs: packed structures of 2-4 floats or doubles that are passed in FP argument
1294 // registers if possible.
1295 if (thValueType.IsHFA())
1297 CorElementType type = thValueType.GetHFAType();
1298 bool isFloatType = (type == ELEMENT_TYPE_R4);
1300 cFPRegs = (type == ELEMENT_TYPE_R4)? (argSize/sizeof(float)): (argSize/sizeof(double));
1302 m_argLocDescForStructInRegs.Init();
1303 m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs;
1304 m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg;
1306 m_argLocDescForStructInRegs.m_isSinglePrecision = isFloatType;
1308 m_hasArgLocDescForStructInRegs = true;
1312 // Composite greater than 16bytes should be passed by reference
1313 if (argSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
1315 argSize = sizeof(TADDR);
1326 int cbArg = StackElemSize(argSize);
1327 int cArgSlots = cbArg / STACK_ELEM_SIZE;
1329 if (cFPRegs>0 && !this->IsVarArg())
1331 if (cFPRegs + m_idxFPReg <= 8)
1333 int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 8;
1334 m_idxFPReg += cFPRegs;
1344 // Only x0-x7 are valid argument registers (x8 is always the return buffer)
1345 if (m_idxGenReg + cArgSlots <= 8)
1347 // The entirety of the arg fits in the register slots.
1349 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
1350 m_idxGenReg += cArgSlots;
1356 if (this->IsVarArg() && m_idxGenReg < 8)
1358 // Address the Windows ARM64 varargs case where an arg is split between regs and stack.
1359 // This can happen in the varargs case because the first 64 bytes of the stack are loaded
1360 // into x0-x7, and any remaining stack arguments are placed normally.
1361 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
1363 // Increase m_idxStack to account for the space used for the remainder of the arg after
1364 // register slots are filled.
1365 m_idxStack += (m_idxGenReg + cArgSlots - 8);
1367 // We used up the remaining reg slots.
1375 // Don't use reg slots for this. It will be passed purely on the stack arg space.
1381 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 8;
1382 m_idxStack += cArgSlots;
1385 PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
1386 return TransitionBlock::InvalidOffset;
1390 template<class ARGITERATOR_BASE>
1391 void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
1396 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
1397 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
1398 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
1403 TypeHandle thValueType;
1404 CorElementType type = this->GetReturnType(&thValueType);
1406 DWORD flags = RETURN_FLAGS_COMPUTED;
1409 case ELEMENT_TYPE_TYPEDBYREF:
1410 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1411 if (sizeof(TypedByRef) > ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
1412 flags |= RETURN_HAS_RET_BUFFER;
1414 flags |= RETURN_HAS_RET_BUFFER;
1418 case ELEMENT_TYPE_R4:
1420 flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT;
1424 case ELEMENT_TYPE_R8:
1426 flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
1430 case ELEMENT_TYPE_VALUETYPE:
1431 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1433 _ASSERTE(!thValueType.IsNull());
1435 #if defined(UNIX_AMD64_ABI)
1436 MethodTable *pMT = thValueType.AsMethodTable();
1437 if (pMT->IsRegPassedStruct())
1439 EEClass* eeClass = pMT->GetClass();
1441 if (eeClass->GetNumberEightBytes() == 1)
1443 // Structs occupying just one eightbyte are treated as int / double
1444 if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE)
1446 flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
1451 // Size of the struct is 16 bytes
1452 flags |= (16 << RETURN_FP_SIZE_SHIFT);
1453 // The lowest two bits of the size encode the order of the int and SSE fields
1454 if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE)
1456 flags |= (1 << RETURN_FP_SIZE_SHIFT);
1459 if (eeClass->GetEightByteClassification(1) == SystemVClassificationTypeSSE)
1461 flags |= (2 << RETURN_FP_SIZE_SHIFT);
1467 #else // UNIX_AMD64_ABI
1470 if (thValueType.IsHFA() && !this->IsVarArg())
1472 CorElementType hfaType = thValueType.GetHFAType();
1474 flags |= (hfaType == ELEMENT_TYPE_R4) ?
1475 ((4 * sizeof(float)) << RETURN_FP_SIZE_SHIFT) :
1476 ((4 * sizeof(double)) << RETURN_FP_SIZE_SHIFT);
1482 size_t size = thValueType.GetSize();
1484 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1485 // Return value types of size which are not powers of 2 using a RetBuffArg
1486 if ((size & (size-1)) != 0)
1488 flags |= RETURN_HAS_RET_BUFFER;
1493 if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
1495 #endif // UNIX_AMD64_ABI
1497 #endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1499 // Value types are returned using return buffer by default
1500 flags |= RETURN_HAS_RET_BUFFER;
1510 template<class ARGITERATOR_BASE>
1511 void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk()
1516 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
1517 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
1518 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
1523 // This can be only used before the actual argument iteration started
1524 _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0);
1528 // x86 is special as always
1531 int numRegistersUsed = 0;
1532 int nSizeOfArgStack = 0;
1534 if (this->HasThis())
1537 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
1540 if (this->IsVarArg())
1542 nSizeOfArgStack += sizeof(void *);
1543 numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
1546 #ifdef FEATURE_INTERPRETER
1547 BYTE callconv = CallConv();
1550 case IMAGE_CEE_CS_CALLCONV_C:
1551 case IMAGE_CEE_CS_CALLCONV_STDCALL:
1552 numRegistersUsed = NUM_ARGUMENT_REGISTERS;
1553 nSizeOfArgStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
1556 case IMAGE_CEE_CS_CALLCONV_THISCALL:
1557 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
1558 _ASSERTE_MSG(false, "Unsupported calling convention.");
1561 #endif // FEATURE_INTERPRETER
1563 DWORD nArgs = this->NumFixedArgs();
1564 for (DWORD i = 0; i < nArgs; i++)
1566 TypeHandle thValueType;
1567 CorElementType type = this->GetNextArgumentType(i, &thValueType);
1569 if (!IsArgumentInRegister(&numRegistersUsed, type))
1571 int structSize = MetaSig::GetElemSize(type, thValueType);
1573 nSizeOfArgStack += StackElemSize(structSize);
1575 #ifndef DACCESS_COMPILE
1576 if (nSizeOfArgStack > MAX_ARG_SIZE)
1579 // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
1580 // The contract violation is required to workaround bug in the static contract analyzer.
1581 _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
1582 CONTRACT_VIOLATION(ThrowsViolation);
1584 COMPlusThrow(kNotSupportedException);
1590 if (this->HasParamType())
1592 DWORD paramTypeFlags = 0;
1593 if (numRegistersUsed < NUM_ARGUMENT_REGISTERS)
1596 paramTypeFlags = (numRegistersUsed == 1) ?
1597 PARAM_TYPE_REGISTER_ECX : PARAM_TYPE_REGISTER_EDX;
1601 nSizeOfArgStack += sizeof(void *);
1602 paramTypeFlags = PARAM_TYPE_REGISTER_STACK;
1604 m_dwFlags |= paramTypeFlags;
1607 #else // _TARGET_X86_
1609 int maxOffset = TransitionBlock::GetOffsetOfArgs();
1612 while (TransitionBlock::InvalidOffset != (ofs = GetNextOffset()))
1616 #ifdef _TARGET_AMD64_
1617 #ifdef UNIX_AMD64_ABI
1618 if (m_fArgInRegisters)
1620 // Arguments passed in registers don't consume any stack
1624 stackElemSize = StackElemSize(GetArgSize());
1625 #else // UNIX_AMD64_ABI
1626 // All stack arguments take just one stack slot on AMD64 because of arguments bigger
1627 // than a stack slot are passed by reference.
1628 stackElemSize = STACK_ELEM_SIZE;
1629 #endif // UNIX_AMD64_ABI
1630 #else // _TARGET_AMD64_
1631 stackElemSize = StackElemSize(GetArgSize());
1632 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
1633 if (IsArgPassedByRef())
1634 stackElemSize = STACK_ELEM_SIZE;
1636 #endif // _TARGET_AMD64_
1638 int endOfs = ofs + stackElemSize;
1639 if (endOfs > maxOffset)
1641 #if !defined(DACCESS_COMPILE)
1642 if (endOfs > MAX_ARG_SIZE)
1645 // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
1646 // The contract violation is required to workaround bug in the static contract analyzer.
1647 _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
1648 CONTRACT_VIOLATION(ThrowsViolation);
1650 COMPlusThrow(kNotSupportedException);
1656 // Clear the iterator started flag
1657 m_dwFlags &= ~ITERATION_STARTED;
1659 int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs();
1661 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
1662 nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ?
1663 (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0;
1666 #endif // _TARGET_X86_
1669 m_nSizeOfArgStack = nSizeOfArgStack;
1670 m_dwFlags |= SIZE_OF_ARG_STACK_COMPUTED;
1675 class ArgIteratorBase
1680 FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType)
1682 WRAPPER_NO_CONTRACT;
1683 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1684 return m_pSig->GetReturnTypeNormalized(pthValueType);
1686 return m_pSig->GetReturnTypeNormalized();
1690 FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType)
1692 WRAPPER_NO_CONTRACT;
1693 _ASSERTE(iArg == m_pSig->GetArgNum());
1694 CorElementType et = m_pSig->PeekArgNormalized(pthValueType);
1699 FORCEINLINE void Reset()
1701 WRAPPER_NO_CONTRACT;
1708 LIMITED_METHOD_CONTRACT;
1709 return m_pSig->HasThis();
1714 LIMITED_METHOD_CONTRACT;
1715 return m_pSig->GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE;
1720 LIMITED_METHOD_CONTRACT;
1721 return m_pSig->IsVarArg() || m_pSig->IsTreatAsVarArg();
1724 DWORD NumFixedArgs()
1726 LIMITED_METHOD_CONTRACT;
1727 return m_pSig->NumFixedArgs();
1730 #ifdef FEATURE_INTERPRETER
1733 return m_pSig->GetCallingConvention();
1735 #endif // FEATURE_INTERPRETER
1738 // The following is used by the profiler to dig into the iterator for
1739 // discovering if the method has a This pointer or a return buffer.
1740 // Do not use this to re-initialize the signature, use the exposed Init()
1741 // method in this class.
1743 MetaSig *GetSig(void)
1749 class ArgIterator : public ArgIteratorTemplate<ArgIteratorBase>
1752 ArgIterator(MetaSig * pSig)
1757 // This API returns true if we are returning a structure in registers instead of using a byref return buffer
1758 BOOL HasNonStandardByvalReturn()
1760 WRAPPER_NO_CONTRACT;
1762 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
1763 CorElementType type = m_pSig->GetReturnTypeNormalized();
1764 return (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF) && !HasRetBuffArg();
1771 // Conventience helper
1772 inline BOOL HasRetBuffArg(MetaSig * pSig)
1774 WRAPPER_NO_CONTRACT;
1775 ArgIterator argit(pSig);
1776 return argit.HasRetBuffArg();
1780 // For UNIX_X86_ABI and unmanaged function, we always need RetBuf if the return type is VALUETYPE
1781 inline BOOL HasRetBuffArgUnmanagedFixup(MetaSig * pSig)
1783 WRAPPER_NO_CONTRACT;
1784 // We cannot just pSig->GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums
1785 CorElementType type = pSig->GetRetTypeHandleThrowing().GetVerifierCorElementType();
1786 return type == ELEMENT_TYPE_VALUETYPE;
1790 inline BOOL IsRetBuffPassedAsFirstArg()
1792 WRAPPER_NO_CONTRACT;
1793 #ifndef _TARGET_ARM64_
1800 #endif // __CALLING_CONVENTION_INCLUDED