1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
7 #ifdef FEATURE_INTERPRETER
9 #include "interpreter.h"
10 #include "interpreter.hpp"
16 #include "gcheaputilities.h"
18 #include "jitinterface.h"
20 #include "exceptmacros.h"
21 #include "runtimeexceptionkind.h"
22 #include "runtimehandles.h"
24 #include "cycletimer.h"
26 inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
28 return (CORINFO_CALLINFO_FLAGS) (flag1 | flag2);
31 static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
33 TypeHandle typeHnd(clsHnd);
34 return CEEInfo::asCorInfoType(typeHnd.GetInternalCorElementType(), typeHnd, NULL);
37 InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
38 : m_method(methInfo->ftn),
39 m_module(methInfo->scope),
40 m_ILCode(methInfo->ILCode),
41 m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
42 m_maxStack(methInfo->maxStack),
44 m_totIlInstructionsExeced(0),
45 m_maxIlInstructionsExeced(0),
47 m_ehClauseCount(methInfo->EHcount),
48 m_varArgHandleArgNum(NO_VA_ARGNUM),
49 m_numArgs(methInfo->args.numArgs),
50 m_numLocals(methInfo->locals.numArgs),
53 m_returnType(methInfo->args.retType),
57 // Overflow sanity check. (Can ILCodeSize ever be zero?)
58 _ASSERTE(m_ILCode <= m_ILCodeEnd);
60 // Does the calling convention indicate an implicit "this" (first arg) or generic type context arg (last arg)?
61 SetFlag<Flag_hasThisArg>((methInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) != 0);
62 if (GetFlag<Flag_hasThisArg>())
65 CORINFO_CLASS_HANDLE methClass = comp->getMethodClass(methInfo->ftn);
66 DWORD attribs = comp->getClassAttribs(methClass);
67 SetFlag<Flag_thisArgIsObjPtr>((attribs & CORINFO_FLG_VALUECLASS) == 0);
70 #if INTERP_PROFILE || defined(_DEBUG)
74 m_methName = ::eeGetMethodFullName(comp, methInfo->ftn, &clsName);
76 m_methName = comp->getMethodNameFromMetadata(methInfo->ftn, &clsName, NULL, NULL);
78 char* myClsName = new char[strlen(clsName) + 1];
79 strcpy(myClsName, clsName);
80 m_clsName = myClsName;
82 #endif // INTERP_PROFILE
84 // Do we have a ret buff? If its a struct or refany, then *maybe*, depending on architecture...
85 bool hasRetBuff = (methInfo->args.retType == CORINFO_TYPE_VALUECLASS || methInfo->args.retType == CORINFO_TYPE_REFANY);
86 #if defined(FEATURE_HFA)
87 // ... unless its an HFA type (and not varargs)...
88 if (hasRetBuff && (comp->getHFAType(methInfo->args.retTypeClass) != CORINFO_HFA_ELEM_NONE) && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG)
94 #if defined(UNIX_AMD64_ABI) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
95 // ...or it fits into two registers.
96 if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= 2 * sizeof(void*))
100 #elif defined(HOST_ARM) || defined(HOST_AMD64)|| defined(HOST_ARM64)
101 // ...or it fits into one register.
102 if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= sizeof(void*))
107 SetFlag<Flag_hasRetBuffArg>(hasRetBuff);
109 MetaSig sig(reinterpret_cast<MethodDesc*>(methInfo->ftn));
110 SetFlag<Flag_hasGenericsContextArg>((methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0);
111 SetFlag<Flag_isVarArg>((methInfo->args.callConv & CORINFO_CALLCONV_VARARG) != 0);
112 SetFlag<Flag_typeHasGenericArgs>(methInfo->args.sigInst.classInstCount > 0);
113 SetFlag<Flag_methHasGenericArgs>(methInfo->args.sigInst.methInstCount > 0);
114 _ASSERTE_MSG(!GetFlag<Flag_hasGenericsContextArg>()
115 || ((GetFlag<Flag_typeHasGenericArgs>() && !(GetFlag<Flag_hasThisArg>() && GetFlag<Flag_thisArgIsObjPtr>())) || GetFlag<Flag_methHasGenericArgs>()),
116 "If the method takes a generic parameter, is a static method of generic class (or meth of a value class), and/or itself takes generic parameters");
118 if (GetFlag<Flag_hasThisArg>())
122 if (GetFlag<Flag_hasRetBuffArg>())
126 if (GetFlag<Flag_isVarArg>())
130 if (GetFlag<Flag_hasGenericsContextArg>())
140 m_argDescs = new ArgDesc[m_numArgs];
143 // Now we'll do the locals.
144 m_localDescs = new LocalDesc[m_numLocals];
145 // Allocate space for the pinning reference bits (lazily).
146 m_localIsPinningRefBits = NULL;
148 // Now look at each local.
149 CORINFO_ARG_LIST_HANDLE localsPtr = methInfo->locals.args;
150 CORINFO_CLASS_HANDLE vcTypeRet;
151 unsigned curLargeStructOffset = 0;
152 for (unsigned k = 0; k < methInfo->locals.numArgs; k++)
154 // TODO: if this optimization succeeds, the switch below on localType
155 // can become much simpler.
156 m_localDescs[k].m_offset = 0;
160 CorInfoTypeWithMod localTypWithMod = comp->getArgType(&methInfo->locals, localsPtr, &vcTypeRet);
161 // If the local vars is a pinning reference, set the bit to indicate this.
162 if ((localTypWithMod & CORINFO_TYPE_MOD_PINNED) != 0)
167 CorInfoType localType = strip(localTypWithMod);
170 case CORINFO_TYPE_VALUECLASS:
171 case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
173 InterpreterType tp = InterpreterType(comp, vcTypeRet);
174 unsigned size = static_cast<unsigned>(tp.Size(comp));
175 size = max(size, sizeof(void*));
176 m_localDescs[k].m_type = tp;
177 if (tp.IsLargeStruct(comp))
179 m_localDescs[k].m_offset = curLargeStructOffset;
180 curLargeStructOffset += size;
185 case CORINFO_TYPE_VAR:
186 NYI_INTERP("argument of generic parameter type"); // Should not happen;
190 m_localDescs[k].m_type = InterpreterType(localType);
193 m_localDescs[k].m_typeStackNormal = m_localDescs[k].m_type.StackNormalize();
194 localsPtr = comp->getArgNext(localsPtr);
196 m_largeStructLocalSize = curLargeStructOffset;
199 void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_)
201 unsigned numSigArgsPlusThis = methInfo->args.numArgs;
202 if (GetFlag<Flag_hasThisArg>())
204 numSigArgsPlusThis++;
207 // The m_argDescs array is constructed in the following "canonical" order:
209 // 2. signature arguments
211 // 4. type parameter -or- vararg cookie
213 // argOffsets_ is passed in this order, and serves to establish the offsets to arguments
214 // when the interpreter is invoked using the native calling convention (i.e., not directly).
216 // When the interpreter is invoked directly, the arguments will appear in the same order
217 // and form as arguments passed to MethodDesc::CallDescr(). This ordering is as follows:
220 // 3. signature arguments
222 // MethodDesc::CallDescr() does not support generic parameters or varargs functions.
224 _ASSERTE_MSG((methInfo->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
225 "Don't yet handle EXPLICITTHIS calling convention modifier.");
226 switch (methInfo->args.callConv & CORINFO_CALLCONV_MASK)
228 case CORINFO_CALLCONV_DEFAULT:
229 case CORINFO_CALLCONV_VARARG:
232 ARG_SLOT* directOffset = NULL;
233 short directRetBuffOffset = 0;
234 short directVarArgOffset = 0;
235 short directTypeParamOffset = 0;
237 // If there's a "this" argument, handle it.
238 if (GetFlag<Flag_hasThisArg>())
240 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_UNDEF);
241 #ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
242 MethodDesc *pMD = reinterpret_cast<MethodDesc*>(methInfo->ftn);
243 // The signature of the ILStubs may be misleading.
244 // If a StubTarget is ever set, we'll find the correct type by inspecting the
245 // target, rather than the stub.
249 if (pMD->AsDynamicMethodDesc()->IsUnboxingILStub())
251 // This is an unboxing stub where the thisptr is passed as a boxed VT.
252 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
256 MethodDesc *pTargetMD = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
257 if (pTargetMD != NULL)
259 if (pTargetMD->GetMethodTable()->IsValueType())
261 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
265 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
272 #endif // FEATURE_INSTANTIATINGSTUB_AS_IL
273 if (m_argDescs[k].m_type == InterpreterType(CORINFO_TYPE_UNDEF))
275 CORINFO_CLASS_HANDLE cls = comp->getMethodClass(methInfo->ftn);
276 DWORD attribs = comp->getClassAttribs(cls);
277 if (attribs & CORINFO_FLG_VALUECLASS)
279 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
283 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
286 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
287 m_argDescs[k].m_nativeOffset = argOffsets_[k];
288 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
293 // If there is a return buffer, it will appear next in the arguments list for a direct call.
294 // Reserve its offset now, for use after the explicit arguments.
295 #if defined(HOST_ARM)
296 // On ARM, for direct calls we always treat HFA return types as having ret buffs.
297 // So figure out if we have an HFA return type.
299 methInfo->args.retType == CORINFO_TYPE_VALUECLASS
300 && (comp->getHFAType(methInfo->args.retTypeClass) != CORINFO_HFA_ELEM_NONE)
301 && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG;
302 #endif // defined(HOST_ARM)
304 if (GetFlag<Flag_hasRetBuffArg>()
305 #if defined(HOST_ARM)
306 // On ARM, for direct calls we always treat HFA return types as having ret buffs.
308 #endif // defined(HOST_ARM)
311 directRetBuffOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
314 #if defined(HOST_AMD64)
315 if (GetFlag<Flag_isVarArg>())
317 directVarArgOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
320 if (GetFlag<Flag_hasGenericsContextArg>())
322 directTypeParamOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
327 // Now record the argument types for the rest of the arguments.
329 CORINFO_CLASS_HANDLE vcTypeRet;
330 CORINFO_ARG_LIST_HANDLE argPtr = methInfo->args.args;
331 for (; k < numSigArgsPlusThis; k++)
333 CorInfoTypeWithMod argTypWithMod = comp->getArgType(&methInfo->args, argPtr, &vcTypeRet);
334 CorInfoType argType = strip(argTypWithMod);
337 case CORINFO_TYPE_VALUECLASS:
338 case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
339 it = InterpreterType(comp, vcTypeRet);
342 // Everything else is just encoded as a shifted CorInfoType.
343 it = InterpreterType(argType);
346 m_argDescs[k].m_type = it;
347 m_argDescs[k].m_typeStackNormal = it.StackNormalize();
348 m_argDescs[k].m_nativeOffset = argOffsets_[k];
349 // When invoking the interpreter directly, large value types are always passed by reference.
350 if (it.IsLargeStruct(comp))
352 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
356 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, it.Size(comp))));
358 argPtr = comp->getArgNext(argPtr);
362 if (GetFlag<Flag_hasRetBuffArg>())
364 // The generic type context is an unmanaged pointer (native int).
365 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
366 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
367 m_argDescs[k].m_nativeOffset = argOffsets_[k];
368 m_argDescs[k].m_directOffset = directRetBuffOffset;
372 if (GetFlag<Flag_hasGenericsContextArg>())
374 // The vararg cookie is an unmanaged pointer (native int).
375 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
376 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
377 m_argDescs[k].m_nativeOffset = argOffsets_[k];
378 m_argDescs[k].m_directOffset = directTypeParamOffset;
382 if (GetFlag<Flag_isVarArg>())
384 // The generic type context is an unmanaged pointer (native int).
385 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
386 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
387 m_argDescs[k].m_nativeOffset = argOffsets_[k];
388 m_argDescs[k].m_directOffset = directVarArgOffset;
394 case IMAGE_CEE_CS_CALLCONV_C:
395 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_C");
398 case IMAGE_CEE_CS_CALLCONV_STDCALL:
399 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_STDCALL");
402 case IMAGE_CEE_CS_CALLCONV_THISCALL:
403 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_THISCALL");
406 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
407 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_FASTCALL");
410 case CORINFO_CALLCONV_FIELD:
411 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FIELD");
414 case CORINFO_CALLCONV_LOCAL_SIG:
415 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_LOCAL_SIG");
418 case CORINFO_CALLCONV_PROPERTY:
419 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_PROPERTY");
422 case CORINFO_CALLCONV_UNMANAGED:
423 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_UNMANAGED");
426 case CORINFO_CALLCONV_NATIVEVARARG:
427 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_NATIVEVARARG");
431 _ASSERTE_ALL_BUILDS(false); // shouldn't get here
435 InterpreterMethodInfo::~InterpreterMethodInfo()
437 if (m_methodCache != NULL)
439 delete reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
443 void InterpreterMethodInfo::AllocPinningBitsIfNeeded()
445 if (m_localIsPinningRefBits != NULL)
448 unsigned numChars = (m_numLocals + 7) / 8;
449 m_localIsPinningRefBits = new char[numChars];
450 for (unsigned i = 0; i < numChars; i++)
452 m_localIsPinningRefBits[i] = char(0);
457 void InterpreterMethodInfo::SetPinningBit(unsigned locNum)
459 _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
460 AllocPinningBitsIfNeeded();
462 unsigned ind = locNum / 8;
463 unsigned bitNum = locNum - (ind * 8);
464 m_localIsPinningRefBits[ind] |= (1 << bitNum);
467 bool InterpreterMethodInfo::GetPinningBit(unsigned locNum)
469 _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
470 if (m_localIsPinningRefBits == NULL)
473 unsigned ind = locNum / 8;
474 unsigned bitNum = locNum - (ind * 8);
475 return (m_localIsPinningRefBits[ind] & (1 << bitNum)) != 0;
478 void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noReg, bool twoSlotAlign)
480 #if defined(HOST_AMD64)
482 _ASSERTE(!twoSlotAlign);
483 AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/false);
486 #if defined(HOST_X86) || defined(HOST_ARM64)
487 _ASSERTE(!twoSlotAlign); // Shouldn't use this flag on x86 (it wouldn't work right in the stack, at least).
489 // If the argument requires two-slot alignment, make sure we have it. This is the
490 // ARM model: both in regs and on the stack.
493 if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
495 if ((numRegArgs % 2) != 0)
502 if ((callerArgStackSlots % 2) != 0)
504 callerArgStackSlots++;
509 #if defined(HOST_ARM64)
510 // On ARM64 we're not going to place an argument 'partially' on the stack
511 // if all slots fits into registers, they go into registers, otherwise they go into stack.
512 if (!noReg && numRegArgs+numSlots <= NumberOfIntegerRegArgs())
514 if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
517 argIsReg[canonIndex] = ARS_IntReg;
518 argOffsets[canonIndex] = numRegArgs * sizeof(void*);
519 numRegArgs += numSlots;
520 // If we overflowed the regs, we consume some stack arg space.
521 if (numRegArgs > NumberOfIntegerRegArgs())
523 callerArgStackSlots += (numRegArgs - NumberOfIntegerRegArgs());
528 #if defined(HOST_X86)
529 // On X86, stack args are pushed in order. We will add the total size of the arguments to this offset,
530 // so we set this to a negative number relative to the SP before the first arg push.
531 callerArgStackSlots += numSlots;
532 ClrSafeInt<short> offset(-callerArgStackSlots);
533 #elif defined(HOST_ARM) || defined(HOST_ARM64)
534 // On ARM, args are pushed in *reverse* order. So we will create an offset relative to the address
535 // of the first stack arg; later, we will add the size of the non-stack arguments.
536 ClrSafeInt<short> offset(callerArgStackSlots);
537 #elif defined(HOST_LOONGARCH64)
538 callerArgStackSlots += numSlots;
539 ClrSafeInt<short> offset(-callerArgStackSlots);
540 #elif defined(HOST_RISCV64)
541 callerArgStackSlots += numSlots;
542 ClrSafeInt<short> offset(-callerArgStackSlots);
544 offset *= static_cast<short>(sizeof(void*));
545 _ASSERTE(!offset.IsOverflow());
546 argOffsets[canonIndex] = offset.Value();
547 #if defined(HOST_ARM) || defined(HOST_ARM64)
548 callerArgStackSlots += numSlots;
551 #endif // !HOST_AMD64
554 #if defined(HOST_AMD64)
556 #if defined(UNIX_AMD64_ABI)
557 void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
559 int regSlots = numFPRegArgSlots + numRegArgs;
560 if (isFloatingType && numFPRegArgSlots + 1 < MaxNumFPRegArgSlots)
562 _ASSERTE(numSlots == 1);
563 argIsReg[canonIndex] = ARS_FloatReg;
564 argOffsets[canonIndex] = regSlots * sizeof(void*);
565 fpArgsUsed |= (0x1 << regSlots);
566 numFPRegArgSlots += 1;
569 else if (numSlots < 3 && (numRegArgs + numSlots <= NumberOfIntegerRegArgs()))
571 argIsReg[canonIndex] = ARS_IntReg;
572 argOffsets[canonIndex] = regSlots * sizeof(void*);
573 numRegArgs += numSlots;
577 argIsReg[canonIndex] = ARS_NotReg;
578 ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
579 _ASSERTE(!offset.IsOverflow());
580 argOffsets[canonIndex] = offset.Value();
581 callerArgStackSlots += numSlots;
585 // Windows AMD64 calling convention allows any type that can be contained in 64 bits to be passed in registers,
586 // if not contained or they are of a size not a power of 2, then they are passed by reference on the stack.
587 // RCX, RDX, R8, R9 are the int arg registers. XMM0-3 overlap with the integer registers and are used
588 // for floating point arguments.
589 void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
591 // If floating type and there are slots use a float reg slot.
592 if (isFloatingType && (numFPRegArgSlots < MaxNumFPRegArgSlots))
594 _ASSERTE(numSlots == 1);
595 argIsReg[canonIndex] = ARS_FloatReg;
596 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
597 fpArgsUsed |= (0x1 << (numFPRegArgSlots + 1));
598 numFPRegArgSlots += 1;
599 numRegArgs += 1; // Increment int reg count due to shadowing.
603 // If we have an integer/aligned-struct arg or a reference of a struct that got copied on
604 // to the stack, it would go into a register or a stack slot.
605 if (numRegArgs != NumberOfIntegerRegArgs())
607 argIsReg[canonIndex] = ARS_IntReg;
608 argOffsets[canonIndex] = numRegArgs * sizeof(void*);
610 numFPRegArgSlots += 1; // Increment FP reg count due to shadowing.
614 argIsReg[canonIndex] = ARS_NotReg;
615 ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
616 _ASSERTE(!offset.IsOverflow());
617 argOffsets[canonIndex] = offset.Value();
618 callerArgStackSlots += 1;
621 #endif //UNIX_AMD64_ABI
624 void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlots, bool twoSlotAlign)
626 #if defined(HOST_AMD64)
627 _ASSERTE(!twoSlotAlign);
628 _ASSERTE(numSlots == 1);
629 AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/ true);
630 #elif defined(HOST_X86)
631 _ASSERTE(false); // Don't call this on x86; we pass all FP on the stack.
632 #elif defined(HOST_ARM)
633 // We require "numSlots" alignment.
634 _ASSERTE(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
635 argIsReg[canonIndex] = ARS_FloatReg;
639 // If we require two slot alignment, the number of slots must be a multiple of two.
640 _ASSERTE((numSlots % 2) == 0);
642 // Skip a slot if necessary.
643 if ((numFPRegArgSlots % 2) != 0)
647 // We always use new slots for two slot aligned args precision...
648 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
649 for (unsigned short i = 0; i < numSlots/2; i++)
651 fpArgsUsed |= (0x3 << (numFPRegArgSlots + i));
653 numFPRegArgSlots += numSlots;
659 // A single-precision (float) argument. We must do "back-filling" where possible, searching
660 // for previous unused registers.
662 while (slot < 32 && (fpArgsUsed & (1 << slot))) slot++;
663 _ASSERTE(slot < 32); // Search succeeded.
664 _ASSERTE(slot <= numFPRegArgSlots); // No bits at or above numFPRegArgSlots are set (regs used).
665 argOffsets[canonIndex] = slot * sizeof(void*);
666 fpArgsUsed |= (0x1 << slot);
667 if (slot == numFPRegArgSlots)
668 numFPRegArgSlots += numSlots;
672 // We can always allocate at after the last used slot.
673 argOffsets[numFPRegArgSlots] = numFPRegArgSlots * sizeof(void*);
674 for (unsigned i = 0; i < numSlots; i++)
676 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
678 numFPRegArgSlots += numSlots;
681 #elif defined(HOST_ARM64)
683 _ASSERTE(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
684 _ASSERTE(!twoSlotAlign);
685 argIsReg[canonIndex] = ARS_FloatReg;
687 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
688 for (unsigned i = 0; i < numSlots; i++)
690 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
692 numFPRegArgSlots += numSlots;
694 #elif defined(HOST_LOONGARCH64)
696 assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
697 assert(!twoSlotAlign);
698 argIsReg[canonIndex] = ARS_FloatReg;
700 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
701 for (unsigned i = 0; i < numSlots; i++)
703 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
705 numFPRegArgSlots += numSlots;
706 #elif defined(HOST_RISCV64)
707 assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
708 assert(!twoSlotAlign);
709 argIsReg[canonIndex] = ARS_FloatReg;
711 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
712 for (unsigned i = 0; i < numSlots; i++)
714 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
716 numFPRegArgSlots += numSlots;
718 #error "Unsupported architecture"
724 CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
725 CORINFO_METHOD_INFO* info,
726 /*OUT*/ BYTE **nativeEntry,
727 /*OUT*/ ULONG *nativeSizeOfCode,
728 InterpreterMethodInfo** ppInterpMethodInfo,
732 // First, ensure that the compiler-specific statics are initialized.
735 InitializeCompilerStatics(comp);
738 // Next, use switches and IL scanning to determine whether to interpret this method.
742 #define TRACE_SKIPPED(cls, meth, reason) \
743 if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs)) { \
744 fprintf(GetLogFile(), "Skipping %s:%s (%s).\n", cls, meth, reason); \
747 #define TRACE_SKIPPED(cls, meth, reason)
751 // If jmpCall, we only need to do computations involving method info.
755 const char* methName = comp->getMethodNameFromMetadata(info->ftn, &clsName, NULL, NULL);
756 if ( !s_InterpretMeths.contains(methName, clsName, info->args.pSig)
757 || s_InterpretMethsExclude.contains(methName, clsName, info->args.pSig))
759 TRACE_SKIPPED(clsName, methName, "not in set of methods to interpret");
760 return CORJIT_SKIPPED;
763 unsigned methHash = comp->getMethodHash(info->ftn);
764 if ( methHash < s_InterpretMethHashMin.val(CLRConfig::INTERNAL_InterpreterMethHashMin)
765 || methHash > s_InterpretMethHashMax.val(CLRConfig::INTERNAL_InterpreterMethHashMax))
767 TRACE_SKIPPED(clsName, methName, "hash not within range to interpret");
768 return CORJIT_SKIPPED;
771 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
776 TRACE_SKIPPED(clsName, methName, "interop stubs not supported");
777 return CORJIT_SKIPPED;
780 #endif // !INTERP_ILSTUBS
782 if (!s_InterpreterDoLoopMethods && MethodMayHaveLoop(info->ILCode, info->ILCodeSize))
784 TRACE_SKIPPED(clsName, methName, "has loop, not interpreting loop methods.");
785 return CORJIT_SKIPPED;
788 s_interpreterStubNum++;
791 if (s_interpreterStubNum < s_InterpreterStubMin.val(CLRConfig::INTERNAL_InterpreterStubMin)
792 || s_interpreterStubNum > s_InterpreterStubMax.val(CLRConfig::INTERNAL_InterpreterStubMax))
794 TRACE_SKIPPED(clsName, methName, "stub num not in range, not interpreting.");
795 return CORJIT_SKIPPED;
798 if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs))
800 unsigned hash = comp->getMethodHash(info->ftn);
801 fprintf(GetLogFile(), "Generating interpretation stub (# %d = 0x%x, hash = 0x%x) for %s:%s.\n",
802 s_interpreterStubNum, s_interpreterStubNum, hash, clsName, methName);
803 fflush(GetLogFile());
809 // Finally, generate an interpreter entry-point stub.
812 // @TODO: this structure clearly needs some sort of lifetime management. It is the moral equivalent
813 // of compiled code, and should be associated with an app domain. In addition, when I get to it, we should
814 // delete it when/if we actually compile the method. (Actually, that's complicated, since there may be
815 // VSD stubs still bound to the interpreter stub. The check there will get to the jitted code, but we want
816 // to eventually clean those up at some safe point...)
817 InterpreterMethodInfo* interpMethInfo = new InterpreterMethodInfo(comp, info);
818 if (ppInterpMethodInfo != nullptr)
820 *ppInterpMethodInfo = interpMethInfo;
822 interpMethInfo->m_stubNum = s_interpreterStubNum;
823 MethodDesc* methodDesc = reinterpret_cast<MethodDesc*>(info->ftn);
826 interpMethInfo = RecordInterpreterMethodInfoForMethodHandle(info->ftn, interpMethInfo);
829 #if FEATURE_INTERPRETER_DEADSIMPLE_OPT
831 if (IsDeadSimpleGetter(comp, methodDesc, &offsetOfLd))
833 interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>(true);
834 if (offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg)
836 interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>(true);
840 _ASSERTE(offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt);
843 #endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
845 // Used to initialize the arg offset information.
848 // We assume that the stack contains (with addresses growing upwards, assuming a downwards-growing stack):
852 // [Non-reg arg <# of reg args>]
855 // Then push the register args to get:
859 // [Non-reg arg <# of reg args>]
861 // [reg arg <# of reg args>-1]
865 // Pass the address of this argument array, and the MethodDesc pointer for the method, as arguments to
868 // So the structure of the code will look like this (in the non-ILstub case):
870 #if defined(HOST_X86) || defined(HOST_AMD64)
873 // [if there are register arguments in ecx or edx, push them]
874 // ecx := addr of InterpretMethodInfo for the method to be interpreted.
875 // edx = esp /*pointer to argument structure*/
876 // call to Interpreter::InterpretMethod
877 // [if we pushed register arguments, increment esp by the right amount.]
879 // ret <n> ; where <n> is the number of argument stack slots in the call to the stub.
880 #elif defined (HOST_ARM)
884 // TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
885 // at least, we could have a small per-method stub that puts the address of the method-specific
886 // InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
887 // always push all integer args on x86, as we do already on ARM. On ARM, we'd need several versions
888 // of the shared stub, for different numbers of floating point register args, cross different kinds of
889 // HFA return values. But these could still be shared, and the per-method stub would decide which of
892 // In the IL stub case, which uses eax, it would be problematic to do this sharing.
895 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
899 #if defined(HOST_X86) || defined(HOST_AMD64)
900 #if defined(HOST_X86)
901 sl.X86EmitPushReg(kEBP);
902 sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
904 #elif defined(HOST_ARM)
905 // On ARM we use R12 as a "scratch" register -- callee-trashed, not used
907 ThumbReg r11 = ThumbReg(11);
908 ThumbReg r12 = ThumbReg(12);
910 #elif defined(HOST_ARM64)
911 // x8 through x15 are scratch registers on ARM64.
912 IntReg x8 = IntReg(8);
913 IntReg x9 = IntReg(9);
914 #error unsupported platform
918 MetaSig sig(methodDesc);
920 unsigned totalArgs = info->args.numArgs;
921 unsigned sigArgsPlusThis = totalArgs;
922 bool hasThis = false;
923 bool hasRetBuff = false;
924 bool isVarArg = false;
925 bool hasGenericsContextArg = false;
927 // Below, we will increment "totalArgs" for any of the "this" argument,
928 // a ret buff argument, and/or a generics context argument.
930 // There will be four arrays allocated below, each with this increased "totalArgs" elements:
931 // argOffsets, argIsReg, argPerm, and, later, m_argDescs.
933 // They will be indexed in the order (0-based, [] indicating optional)
935 // [this] sigArgs [retBuff] [VASigCookie] [genCtxt]
937 // We will call this "canonical order". It is architecture-independent, and
938 // does not necessarily correspond to the architecture-dependent physical order
939 // in which the registers are actually passed. (That's actually the purpose of
940 // "argPerm": to record the correspondence between canonical order and physical
941 // order.) We could have chosen any order for the first three of these, but it's
942 // simplest to let m_argDescs have all the passed IL arguments passed contiguously
943 // at the beginning, allowing it to be indexed by IL argument number.
945 int genericsContextArgIndex = 0;
946 int retBuffArgIndex = 0;
947 int vaSigCookieIndex = 0;
951 _ASSERTE(info->args.callConv & CORINFO_CALLCONV_HASTHIS);
953 totalArgs++; sigArgsPlusThis++;
956 if (methodDesc->HasRetBuffArg())
959 retBuffArgIndex = totalArgs;
963 if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG)
966 vaSigCookieIndex = totalArgs;
970 if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE)
972 _ASSERTE(info->args.callConv & CORINFO_CALLCONV_PARAMTYPE);
973 hasGenericsContextArg = true;
974 genericsContextArgIndex = totalArgs;
978 // The non-this sig args have indices starting after these.
980 // We will first encode the arg offsets as *negative* offsets from the address above the first
981 // stack arg, and later add in the total size of the stack args to get a positive offset.
982 // The first sigArgsPlusThis elements are the offsets of the IL-addressable arguments. After that,
983 // there may be up to two more: generics context arg, if present, and return buff pointer, if present.
984 // (Note that the latter is actually passed after the "this" pointer, or else first if no "this" pointer
985 // is present. We re-arrange to preserve the easy IL-addressability.)
986 ArgState argState(totalArgs);
988 // This is the permutation that translates from an index in the argOffsets/argIsReg arrays to
989 // the platform-specific order in which the arguments are passed.
990 unsigned* argPerm = new unsigned[totalArgs];
992 // The number of register argument slots we end up pushing.
993 unsigned short regArgsFound = 0;
995 unsigned physArgIndex = 0;
997 #if defined(HOST_ARM)
998 // The stub linker has a weird little limitation: all stubs it's used
999 // for on ARM push some callee-saved register, so the unwind info
1000 // code was written assuming at least one would be pushed. I don't know how to
1001 // fix it, so I'm meeting this requirement, by pushing one callee-save.
1002 #define STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH 1
1004 #if STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH
1005 const int NumberOfCalleeSaveRegsToPush = 1;
1007 const int NumberOfCalleeSaveRegsToPush = 0;
1009 // The "1" here is for the return address.
1010 const int NumberOfFixedPushes = 1 + NumberOfCalleeSaveRegsToPush;
1011 #elif defined(HOST_ARM64)
1013 const int NumberOfFixedPushes = 2;
1016 #if defined(FEATURE_HFA)
1017 #if defined(HOST_ARM) || defined(HOST_ARM64)
1018 // On ARM, a non-retBuffArg method that returns a struct type might be an HFA return. Figure
1020 unsigned HFARetTypeSize = 0;
1022 #if defined(HOST_ARM64)
1023 unsigned cHFAVars = 0;
1025 if (info->args.retType == CORINFO_TYPE_VALUECLASS
1026 && (comp->getHFAType(info->args.retTypeClass) != CORINFO_HFA_ELEM_NONE)
1027 && info->args.getCallConv() != CORINFO_CALLCONV_VARARG)
1029 HFARetTypeSize = getClassSize(info->args.retTypeClass);
1030 #if defined(HOST_ARM)
1031 // Round up to a double boundary;
1032 HFARetTypeSize = ((HFARetTypeSize+ sizeof(double) - 1) / sizeof(double)) * sizeof(double);
1033 #elif defined(HOST_ARM64)
1034 // We don't need to round it up to double. Unlike ARM, whether it's a float or a double each field will
1035 // occupy one slot. We'll handle the stack alignment in the prolog where we have all the information about
1036 // what is going to be pushed on the stack.
1037 // Instead on ARM64 we'll need to know how many slots we'll need.
1038 // for instance a VT with two float fields will have the same size as a VT with 1 double field. (ARM64TODO: Verify it)
1039 // It works on ARM because the overlapping layout of the floating point registers
1040 // but it won't work on ARM64.
1041 cHFAVars = (comp->getHFAType(info->args.retTypeClass) == CORINFO_HFA_ELEM_FLOAT) ? HFARetTypeSize/sizeof(float) : HFARetTypeSize/sizeof(double);
1045 #endif // defined(FEATURE_HFA)
1047 _ASSERTE_MSG((info->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
1048 "Don't yet handle EXPLICITTHIS calling convention modifier.");
1050 switch (info->args.callConv & CORINFO_CALLCONV_MASK)
1052 case CORINFO_CALLCONV_DEFAULT:
1053 case CORINFO_CALLCONV_VARARG:
1055 unsigned firstSigArgIndex = 0;
1058 argPerm[0] = physArgIndex; physArgIndex++;
1065 argPerm[retBuffArgIndex] = physArgIndex; physArgIndex++;
1066 argState.AddArg(retBuffArgIndex);
1071 argPerm[vaSigCookieIndex] = physArgIndex; physArgIndex++;
1072 interpMethInfo->m_varArgHandleArgNum = vaSigCookieIndex;
1073 argState.AddArg(vaSigCookieIndex);
1076 #if defined(HOST_ARM) || defined(HOST_AMD64) || defined(HOST_ARM64)
1077 // Generics context comes before args on ARM. Would be better if I factored this out as a call,
1078 // to avoid large swatches of duplicate code.
1079 if (hasGenericsContextArg)
1081 argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
1082 argState.AddArg(genericsContextArgIndex);
1084 #endif // HOST_ARM || HOST_AMD64 || HOST_ARM64
1086 CORINFO_ARG_LIST_HANDLE argPtr = info->args.args;
1087 // Some arguments are have been passed in registers, some in memory. We must generate code that
1088 // moves the register arguments to memory, and determines a pointer into the stack from which all
1089 // the arguments can be accessed, according to the offsets in "argOffsets."
1091 // In the first pass over the arguments, we will label and count the register arguments, and
1092 // initialize entries in "argOffsets" for the non-register arguments -- relative to the SP at the
1093 // time of the call. Then when we have counted the number of register arguments, we will adjust
1094 // the offsets for the non-register arguments to account for those. Then, in the second pass, we
1095 // will push the register arguments on the stack, and capture the final stack pointer value as
1096 // the argument vector pointer.
1097 CORINFO_CLASS_HANDLE vcTypeRet;
1098 // This iteration starts at the first signature argument, and iterates over all the
1099 // canonical indices for the signature arguments.
1100 for (unsigned k = firstSigArgIndex; k < sigArgsPlusThis; k++)
1102 argPerm[k] = physArgIndex; physArgIndex++;
1104 CorInfoTypeWithMod argTypWithMod = comp->getArgType(&info->args, argPtr, &vcTypeRet);
1105 CorInfoType argType = strip(argTypWithMod);
1108 case CORINFO_TYPE_UNDEF:
1109 case CORINFO_TYPE_VOID:
1110 case CORINFO_TYPE_VAR:
1111 _ASSERTE_ALL_BUILDS(false); // Should not happen;
1114 // One integer slot arguments:
1115 case CORINFO_TYPE_BOOL:
1116 case CORINFO_TYPE_CHAR:
1117 case CORINFO_TYPE_BYTE:
1118 case CORINFO_TYPE_UBYTE:
1119 case CORINFO_TYPE_SHORT:
1120 case CORINFO_TYPE_USHORT:
1121 case CORINFO_TYPE_INT:
1122 case CORINFO_TYPE_UINT:
1123 case CORINFO_TYPE_NATIVEINT:
1124 case CORINFO_TYPE_NATIVEUINT:
1125 case CORINFO_TYPE_BYREF:
1126 case CORINFO_TYPE_CLASS:
1127 case CORINFO_TYPE_STRING:
1128 case CORINFO_TYPE_PTR:
1132 // Two integer slot arguments.
1133 case CORINFO_TYPE_LONG:
1134 case CORINFO_TYPE_ULONG:
1135 #if defined(HOST_X86)
1136 // Longs are always passed on the stack -- with no obvious alignment.
1137 argState.AddArg(k, 2, /*noReg*/true);
1138 #elif defined(HOST_ARM)
1139 // LONGS have 2-reg alignment; inc reg if necessary.
1140 argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true);
1141 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1144 #error unknown platform
1148 // One float slot args:
1149 case CORINFO_TYPE_FLOAT:
1150 #if defined(HOST_X86)
1151 argState.AddArg(k, 1, /*noReg*/true);
1152 #elif defined(HOST_ARM)
1153 argState.AddFPArg(k, 1, /*twoSlotAlign*/false);
1154 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1155 argState.AddFPArg(k, 1, false);
1157 #error unknown platform
1161 // Two float slot args
1162 case CORINFO_TYPE_DOUBLE:
1163 #if defined(HOST_X86)
1164 argState.AddArg(k, 2, /*noReg*/true);
1165 #elif defined(HOST_ARM)
1166 argState.AddFPArg(k, 2, /*twoSlotAlign*/true);
1167 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1168 argState.AddFPArg(k, 1, false);
1170 #error unknown platform
1174 // Value class args:
1175 case CORINFO_TYPE_VALUECLASS:
1176 case CORINFO_TYPE_REFANY:
1178 unsigned sz = getClassSize(vcTypeRet);
1179 unsigned szSlots = max(1, sz / sizeof(void*));
1180 #if defined(HOST_X86)
1181 argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/true);
1182 #elif defined(HOST_AMD64)
1183 argState.AddArg(k, static_cast<short>(szSlots));
1184 #elif defined(HOST_ARM) || defined(HOST_ARM64)
1185 // TODO: handle Vector64, Vector128 types
1186 CorInfoHFAElemType hfaType = comp->getHFAType(vcTypeRet);
1187 if (CorInfoTypeIsFloatingPoint(hfaType))
1189 argState.AddFPArg(k, szSlots,
1190 #if defined(HOST_ARM)
1191 /*twoSlotAlign*/ (hfaType == CORINFO_HFA_ELEM_DOUBLE)
1192 #elif defined(HOST_ARM64)
1193 /*twoSlotAlign*/ false // unlike ARM32 FP args always consume 1 slot on ARM64
1199 unsigned align = comp->getClassAlignmentRequirement(vcTypeRet, FALSE);
1200 argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/false,
1201 #if defined(HOST_ARM)
1202 /*twoSlotAlign*/ (align == 8)
1203 #elif defined(HOST_ARM64)
1204 /*twoSlotAlign*/ false
1208 #elif defined(HOST_LOONGARCH64)
1209 argState.AddArg(k, static_cast<short>(szSlots));
1210 #elif defined(HOST_RISCV64)
1211 argState.AddArg(k, static_cast<short>(szSlots));
1213 #error unknown platform
1220 _ASSERTE_MSG(false, "should not reach here, unknown arg type");
1222 argPtr = comp->getArgNext(argPtr);
1225 #if defined(HOST_X86)
1226 // Generics context comes last on HOST_X86. Would be better if I factored this out as a call,
1227 // to avoid large swatches of duplicate code.
1228 if (hasGenericsContextArg)
1230 argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
1231 argState.AddArg(genericsContextArgIndex);
1234 // Now we have counted the number of register arguments, so we can update the offsets for the
1235 // non-register arguments. "+ 2" below is to account for the return address from the call, and
1237 unsigned short stackArgBaseOffset = (argState.numRegArgs + 2 + argState.callerArgStackSlots) * sizeof(void*);
1238 unsigned intRegArgBaseOffset = 0;
1240 #elif defined(HOST_ARM)
1242 // We're choosing to always push all arg regs on ARM -- this is the only option
1243 // that ThumbEmitProlog currently gives.
1244 argState.numRegArgs = 4;
1246 // On ARM, we push the (integer) arg regs before we push the return address, so we don't add an
1247 // extra constant. And the offset is the address of the last pushed argument, which is the first
1248 // stack argument in signature order.
1250 // Round up to a double boundary...
1251 unsigned fpStackSlots = ((argState.numFPRegArgSlots + 1) / 2) * 2;
1252 unsigned intRegArgBaseOffset = (fpStackSlots + NumberOfFixedPushes) * sizeof(void*);
1253 unsigned short stackArgBaseOffset = intRegArgBaseOffset + (argState.numRegArgs) * sizeof(void*);
1254 #elif defined(HOST_ARM64)
1256 // See StubLinkerCPU::EmitProlog for the layout of the stack
1257 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1258 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1259 #elif defined(UNIX_AMD64_ABI)
1260 unsigned intRegArgBaseOffset = 0;
1261 unsigned short stackArgBaseOffset = (2 + argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*);
1262 #elif defined(HOST_AMD64)
1263 unsigned short stackArgBaseOffset = (argState.numRegArgs) * sizeof(void*);
1264 #elif defined(HOST_LOONGARCH64)
1265 // See StubLinkerCPU::EmitProlog for the layout of the stack
1266 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1267 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1268 #elif defined(HOST_RISCV64)
1269 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1270 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1272 #error unsupported platform
1275 #if defined(HOST_ARM)
1276 WORD regArgMask = 0;
1277 #endif // defined(HOST_ARM)
1278 // argPerm maps from an index into the argOffsets/argIsReg arrays to
1279 // the order that the arguments are passed.
1280 unsigned* argPermInverse = new unsigned[totalArgs];
1281 for (unsigned t = 0; t < totalArgs; t++)
1283 argPermInverse[argPerm[t]] = t;
1286 for (unsigned kk = 0; kk < totalArgs; kk++)
1288 // Let "k" be the index of the kk'th input in the argOffsets and argIsReg arrays.
1289 // To compute "k" we need to invert argPerm permutation -- determine the "k" such
1290 // that argPerm[k] == kk.
1291 unsigned k = argPermInverse[kk];
1293 _ASSERTE(k < totalArgs);
1295 if (argState.argIsReg[k] == ArgState::ARS_IntReg)
1298 // If any int reg args are used on ARM, we push them all (in ThumbEmitProlog)
1299 #if defined(HOST_X86)
1300 if (regArgsFound == 1)
1302 if (!jmpCall) { sl.X86EmitPushReg(kECX); }
1303 argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*); // General form, good for general # of reg args.
1307 _ASSERTE(regArgsFound == 2);
1308 if (!jmpCall) { sl.X86EmitPushReg(kEDX); }
1309 argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*);
1311 #elif defined(HOST_ARM) || defined(HOST_ARM64) || defined(UNIX_AMD64_ABI)
1312 argState.argOffsets[k] += intRegArgBaseOffset;
1313 #elif defined(HOST_AMD64)
1314 // First home the register arguments in the stack space allocated by the caller.
1315 // Refer to Stack Allocation on x64 [http://msdn.microsoft.com/en-US/library/ew5tede7(v=vs.80).aspx]
1316 X86Reg argRegs[] = { kECX, kEDX, kR8, kR9 };
1317 if (!jmpCall) { sl.X86EmitIndexRegStoreRSP(regArgsFound * sizeof(void*), argRegs[regArgsFound - 1]); }
1318 argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
1319 #elif defined(HOST_LOONGARCH64)
1320 argState.argOffsets[k] += intRegArgBaseOffset;
1321 #elif defined(HOST_RISCV64)
1322 argState.argOffsets[k] += intRegArgBaseOffset;
1324 #error unsupported platform
1327 #if defined(HOST_AMD64) && !defined(UNIX_AMD64_ABI)
1328 else if (argState.argIsReg[k] == ArgState::ARS_FloatReg)
1330 // Increment regArgsFound since float/int arguments have overlapping registers.
1332 // Home the float arguments.
1333 X86Reg argRegs[] = { kXMM0, kXMM1, kXMM2, kXMM3 };
1334 if (!jmpCall) { sl.X64EmitMovSDToMem(argRegs[regArgsFound - 1], static_cast<X86Reg>(kESP_Unsafe), regArgsFound * sizeof(void*)); }
1335 argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
1338 else if (argState.argIsReg[k] == ArgState::ARS_NotReg)
1340 argState.argOffsets[k] += stackArgBaseOffset;
1342 // So far, x86 doesn't have any FP reg args, and ARM and ARM64 puts them at offset 0, so no
1343 // adjustment is necessary (yet) for arguments passed in those registers.
1345 delete[] argPermInverse;
1349 case IMAGE_CEE_CS_CALLCONV_C:
1350 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_C");
1353 case IMAGE_CEE_CS_CALLCONV_STDCALL:
1354 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_STDCALL");
1357 case IMAGE_CEE_CS_CALLCONV_THISCALL:
1358 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_THISCALL");
1361 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
1362 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_FASTCALL");
1365 case CORINFO_CALLCONV_FIELD:
1366 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FIELD");
1369 case CORINFO_CALLCONV_LOCAL_SIG:
1370 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_LOCAL_SIG");
1373 case CORINFO_CALLCONV_PROPERTY:
1374 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_PROPERTY");
1377 case CORINFO_CALLCONV_UNMANAGED:
1378 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_UNMANAGED");
1381 case CORINFO_CALLCONV_NATIVEVARARG:
1382 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_NATIVEVARARG");
1386 _ASSERTE_ALL_BUILDS(false); // shouldn't get here
1391 PCODE interpretMethodFunc;
1394 switch (info->args.retType)
1396 case CORINFO_TYPE_FLOAT:
1397 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodFloat);
1399 case CORINFO_TYPE_DOUBLE:
1400 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodDouble);
1403 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethod);
1406 // The argument registers have been pushed by now, so we can use them.
1407 #if defined(HOST_X86)
1408 // First arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
1409 sl.X86EmitMovRegReg(kEDX, static_cast<X86Reg>(kESP_Unsafe));
1410 // InterpretMethod uses F_CALL_CONV == __fastcall; pass 2 args in regs.
1412 if (pMD->IsILStub())
1414 // Third argument is stubcontext, in eax.
1415 sl.X86EmitPushReg(kEAX);
1420 // For a non-ILStub method, push NULL as the StubContext argument.
1421 sl.X86EmitZeroOutReg(kECX);
1422 sl.X86EmitPushReg(kECX);
1424 // sl.X86EmitAddReg(kECX, reinterpret_cast<UINT>(interpMethInfo));
1425 sl.X86EmitRegLoad(kECX, reinterpret_cast<UINT>(interpMethInfo));
1426 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1427 // Now we will deallocate the stack slots we pushed to hold register arguments.
1428 if (argState.numRegArgs > 0)
1430 sl.X86EmitAddEsp(argState.numRegArgs * sizeof(void*));
1432 sl.X86EmitPopReg(kEBP);
1433 sl.X86EmitReturn(static_cast<WORD>(argState.callerArgStackSlots * sizeof(void*)));
1434 #elif defined(UNIX_AMD64_ABI)
1435 bool hasTowRetSlots = info->args.retType == CORINFO_TYPE_VALUECLASS &&
1436 getClassSize(info->args.retTypeClass) == 16;
1438 int fixedTwoSlotSize = 16;
1440 int argSize = (argState.numFPRegArgSlots + argState.numRegArgs) * sizeof(void*);
1442 int stackSize = argSize + fixedTwoSlotSize; // Fixed two slot for possible "retbuf", access address by "m_ilArgs-16"
1444 if (stackSize % 16 == 0) { // for $rsp align requirement
1448 sl.X86EmitSubEsp(stackSize);
1450 X86Reg intArgsRegs[] = {ARGUMENT_kREG1, ARGUMENT_kREG2, kRDX, kRCX, kR8, kR9};
1453 for (int i = 0; i < argState.numRegArgs + argState.numFPRegArgSlots; i++)
1455 int offs = i * sizeof(void*) + 16;
1456 if (argState.fpArgsUsed & (1 << i))
1458 sl.X64EmitMovSDToMem(static_cast<X86Reg>(indexFP), static_cast<X86Reg>(kESP_Unsafe), offs);
1463 sl.X86EmitIndexRegStoreRSP(offs, intArgsRegs[indexGP]);
1468 // Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg.
1469 sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast<X86Reg>(kESP_Unsafe), fixedTwoSlotSize);
1471 // If we have IL stubs pass the stub context in R10 or else pass NULL.
1473 if (pMD->IsILStub())
1475 sl.X86EmitMovRegReg(kRDX, kR10);
1480 // For a non-ILStub method, push NULL as the StubContext argument.
1481 sl.X86EmitZeroOutReg(ARGUMENT_kREG1);
1482 sl.X86EmitMovRegReg(kRDX, ARGUMENT_kREG1);
1484 sl.X86EmitRegLoad(ARGUMENT_kREG1, reinterpret_cast<UINT_PTR>(interpMethInfo));
1486 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1487 if (hasTowRetSlots) {
1488 sl.X86EmitEspOffset(0x8b, kRAX, 0);
1489 sl.X86EmitEspOffset(0x8b, kRDX, 8);
1491 sl.X86EmitAddEsp(stackSize);
1492 sl.X86EmitReturn(0);
1493 #elif defined(HOST_AMD64)
1494 // Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg
1495 sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast<X86Reg>(kESP_Unsafe), 8);
1497 // Allocate space for homing callee's (InterpretMethod's) arguments.
1498 // Calling convention requires a default allocation space of 4,
1499 // but to double align the stack frame, we'd allocate 5.
1500 int interpMethodArgSize = 5 * sizeof(void*);
1501 sl.X86EmitSubEsp(interpMethodArgSize);
1503 // If we have IL stubs pass the stub context in R10 or else pass NULL.
1505 if (pMD->IsILStub())
1507 sl.X86EmitMovRegReg(kR8, kR10);
1512 // For a non-ILStub method, push NULL as the StubContext argument.
1513 sl.X86EmitZeroOutReg(ARGUMENT_kREG1);
1514 sl.X86EmitMovRegReg(kR8, ARGUMENT_kREG1);
1516 sl.X86EmitRegLoad(ARGUMENT_kREG1, reinterpret_cast<UINT_PTR>(interpMethInfo));
1517 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1518 sl.X86EmitAddEsp(interpMethodArgSize);
1519 sl.X86EmitReturn(0);
1520 #elif defined(HOST_ARM)
1522 // We have to maintain 8-byte stack alignment. So if the number of
1523 // slots we would normally push is not a multiple of two, add a random
1524 // register. (We will not pop this register, but rather, increment
1525 // sp by an amount that includes it.)
1526 bool oddPushes = (((argState.numRegArgs + NumberOfFixedPushes) % 2) != 0);
1528 UINT stackFrameSize = 0;
1529 if (oddPushes) stackFrameSize = sizeof(void*);
1530 // Now, if any FP regs are used as arguments, we will copy those to the stack; reserve space for that here.
1531 // (We push doubles to keep the stack aligned...)
1532 unsigned short doublesToPush = (argState.numFPRegArgSlots + 1)/2;
1533 stackFrameSize += (doublesToPush*2*sizeof(void*));
1535 // The last argument here causes this to generate code to push all int arg regs.
1536 sl.ThumbEmitProlog(/*cCalleeSavedRegs*/NumberOfCalleeSaveRegsToPush, /*cbStackFrame*/stackFrameSize, /*fPushArgRegs*/TRUE);
1538 // Now we will generate code to copy the floating point registers to the stack frame.
1539 if (doublesToPush > 0)
1541 sl.ThumbEmitStoreMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, doublesToPush*2);
1545 if (pMD->IsILStub())
1547 // Third argument is stubcontext, in r12.
1548 sl.ThumbEmitMovRegReg(ThumbReg(2), ThumbReg(12));
1553 // For a non-ILStub method, push NULL as the third StubContext argument.
1554 sl.ThumbEmitMovConstant(ThumbReg(2), 0);
1556 // Second arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
1557 sl.ThumbEmitMovRegReg(ThumbReg(1), thumbRegSp);
1559 // First arg is the pointer to the interpMethInfo structure.
1560 sl.ThumbEmitMovConstant(ThumbReg(0), reinterpret_cast<int>(interpMethInfo));
1562 // If there's an HFA return, add space for that.
1563 if (HFARetTypeSize > 0)
1565 sl.ThumbEmitSubSp(HFARetTypeSize);
1568 // Now we can call the right method.
1569 // No "direct call" instruction, so load into register first. Can use R3.
1570 sl.ThumbEmitMovConstant(ThumbReg(3), static_cast<int>(interpretMethodFunc));
1571 sl.ThumbEmitCallRegister(ThumbReg(3));
1573 // If there's an HFA return, copy to FP regs, and deallocate the stack space.
1574 if (HFARetTypeSize > 0)
1576 sl.ThumbEmitLoadMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, HFARetTypeSize/sizeof(void*));
1577 sl.ThumbEmitAddSp(HFARetTypeSize);
1580 sl.ThumbEmitEpilog();
1582 #elif defined(HOST_ARM64)
1584 UINT stackFrameSize = argState.numFPRegArgSlots;
1586 sl.EmitProlog(argState.numRegArgs, argState.numFPRegArgSlots, 0 /*cCalleeSavedRegs*/, static_cast<unsigned short>(cHFAVars*sizeof(void*)));
1589 if (pMD->IsILStub())
1591 // Third argument is stubcontext, in x12 (METHODDESC_REGISTER)
1592 sl.EmitMovReg(IntReg(2), IntReg(12));
1597 // For a non-ILStub method, push NULL as the third stubContext argument
1598 sl.EmitMovConstant(IntReg(2), 0);
1601 // Second arg is pointer to the basei of the ILArgs -- i.e., the current stack value
1602 sl.EmitAddImm(IntReg(1), RegSp, sl.GetSavedRegArgsOffset());
1604 // First arg is the pointer to the interpMethodInfo structure
1606 if (!pMD->IsILStub())
1609 // interpMethodInfo is already in x8, so copy it from x8
1610 sl.EmitMovReg(IntReg(0), IntReg(8));
1615 // We didn't do the short-circuiting, therefore interpMethInfo is
1616 // not stored in a register (x8) before. so do it now.
1617 sl.EmitMovConstant(IntReg(0), reinterpret_cast<UINT64>(interpMethInfo));
1621 sl.EmitCallLabel(sl.NewExternalCodeLabel((LPVOID)interpretMethodFunc), FALSE, FALSE);
1623 // If there's an HFA return, copy to FP regs
1626 for (unsigned i=0; i<=(cHFAVars/2)*2;i+=2)
1627 sl.EmitLoadStoreRegPairImm(StubLinkerCPU::eLOAD, VecReg(i), VecReg(i+1), RegSp, i*sizeof(void*));
1628 if ((cHFAVars % 2) == 1)
1629 sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD,VecReg(cHFAVars-1), RegSp, cHFAVars*sizeof(void*));
1635 #elif defined(HOST_LOONGARCH64)
1636 assert(!"unimplemented on LOONGARCH yet");
1637 #elif defined(HOST_RISCV64)
1638 assert(!"unimplemented on RISCV64 yet");
1640 #error unsupported platform
1642 stub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
1644 *nativeSizeOfCode = static_cast<ULONG>(stub->GetNumCodeBytes());
1645 // TODO: manage reference count of interpreter stubs. Look for examples...
1646 *nativeEntry = dac_cast<BYTE*>(stub->GetEntryPoint());
1649 // Initialize the arg offset information.
1650 interpMethInfo->InitArgInfo(comp, info, argState.argOffsets);
1653 AddInterpMethInfo(interpMethInfo);
1657 // Remember the mapping between code address and MethodDesc*.
1658 RecordInterpreterStubForMethodDesc(info->ftn, *nativeEntry);
1662 #undef TRACE_SKIPPED
1665 size_t Interpreter::GetFrameSize(InterpreterMethodInfo* interpMethInfo)
1667 size_t sz = interpMethInfo->LocalMemSize();
1668 #if COMBINE_OPSTACK_VAL_TYPE
1669 sz += (interpMethInfo->m_maxStack * sizeof(OpStackValAndType));
1671 sz += (interpMethInfo->m_maxStack * (sizeof(INT64) + sizeof(InterpreterType*)));
1677 ARG_SLOT Interpreter::ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, _Out_ bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedToken)
1679 #define INTERP_DYNAMIC_CONTRACTS 1
1680 #if INTERP_DYNAMIC_CONTRACTS
1687 // Dynamic contract occupies too much stack.
1688 STATIC_CONTRACT_THROWS;
1689 STATIC_CONTRACT_GC_TRIGGERS;
1690 STATIC_CONTRACT_MODE_COOPERATIVE;
1693 size_t sizeWithGS = GetFrameSize(interpMethInfo) + sizeof(GSCookie);
1694 BYTE* frameMemoryGS = static_cast<BYTE*>(_alloca(sizeWithGS));
1696 ARG_SLOT retVal = 0;
1697 unsigned jmpCallToken = 0;
1699 Interpreter interp(interpMethInfo, directCall, ilArgs, stubContext, frameMemoryGS);
1701 // Make sure we can do a GC Scan properly.
1702 FrameWithCookie<InterpreterFrame> interpFrame(&interp);
1704 // Update the interpretation count.
1705 InterlockedIncrement(reinterpret_cast<LONG *>(&interpMethInfo->m_invocations));
1707 // Need to wait until this point to do this JITting, since it may trigger a GC.
1708 JitMethodIfAppropriate(interpMethInfo);
1710 // Pass buffers to get jmpCall flag and the token, if necessary.
1711 interp.ExecuteMethod(&retVal, pDoJmpCall, &jmpCallToken);
1716 interp.ResolveToken(pResolvedToken, jmpCallToken, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
1723 // TODO: Add GSCookie checks
1726 inline ARG_SLOT Interpreter::InterpretMethodBody(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext)
1728 #if INTERP_DYNAMIC_CONTRACTS
1735 // Dynamic contract occupies too much stack.
1736 STATIC_CONTRACT_THROWS;
1737 STATIC_CONTRACT_GC_TRIGGERS;
1738 STATIC_CONTRACT_MODE_COOPERATIVE;
1741 CEEInfo* jitInfo = NULL;
1742 for (bool doJmpCall = true; doJmpCall; )
1744 unsigned jmpCallToken = 0;
1745 CORINFO_RESOLVED_TOKEN methTokPtr;
1746 ARG_SLOT retVal = ExecuteMethodWrapper(interpMethInfo, directCall, ilArgs, stubContext, &doJmpCall, &methTokPtr);
1747 // Clear any allocated jitInfo.
1750 // Nothing to do if the recent method asks not to do a jmpCall.
1756 // The recently executed method wants us to perform a jmpCall.
1757 MethodDesc* pMD = GetMethod(methTokPtr.hMethod);
1758 interpMethInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(pMD));
1760 // Allocate a new jitInfo and also a new interpMethInfo.
1761 if (interpMethInfo == NULL)
1763 _ASSERTE(doJmpCall);
1764 jitInfo = new CEEInfo(pMD, true);
1766 CORINFO_METHOD_INFO methInfo;
1769 jitInfo->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo, NULL);
1770 GenerateInterpreterStub(jitInfo, &methInfo, NULL, 0, &interpMethInfo, true);
1776 void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force)
1784 unsigned int MaxInterpretCount = s_InterpreterJITThreshold.val(CLRConfig::INTERNAL_InterpreterJITThreshold);
1785 bool scheduleTieringBackgroundWork = false;
1786 TieredCompilationManager *tieredCompilationManager = GetAppDomain()->GetTieredCompilationManager();
1788 if (force || interpMethInfo->m_invocations > MaxInterpretCount)
1791 MethodDesc *md = reinterpret_cast<MethodDesc *>(interpMethInfo->m_method);
1792 PCODE stub = md->GetNativeCode();
1794 if (InterpretationStubToMethodInfo(stub) == md)
1797 if (s_TraceInterpreterJITTransitionFlag.val(CLRConfig::INTERNAL_TraceInterpreterJITTransition))
1799 fprintf(GetLogFile(), "JITting method %s:%s.\n", md->m_pszDebugClassName, md->m_pszDebugMethodName);
1801 #endif // INTERP_TRACING
1802 CORJIT_FLAGS jitFlags(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
1803 NewHolder<COR_ILMETHOD_DECODER> pDecoder(NULL);
1804 // Dynamic methods (e.g., IL stubs) do not have an IL decoder but may
1805 // require additional flags. Ordinary methods require the opposite.
1806 if (md->IsDynamicMethod())
1808 jitFlags.Add(md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags());
1812 COR_ILMETHOD_DECODER::DecoderStatus status;
1813 pDecoder = new COR_ILMETHOD_DECODER(md->GetILHeader(TRUE),
1817 // This used to be a synchronous jit and could be made so again if desired,
1818 // but using ASP .NET MusicStore as an example scenario the performance is
1819 // better doing the JIT asynchronously. Given the not-on-by-default nature of the
1820 // interpreter I didn't wring my hands too much trying to determine the ideal
1822 #ifdef FEATURE_TIERED_COMPILATION
1823 CodeVersionManager::LockHolder _lockHolder;
1824 NativeCodeVersion activeCodeVersion = md->GetCodeVersionManager()->GetActiveILCodeVersion(md).GetActiveNativeCodeVersion(md);
1825 ILCodeVersion ilCodeVersion = activeCodeVersion.GetILCodeVersion();
1826 if (!activeCodeVersion.IsFinalTier() &&
1827 !ilCodeVersion.HasAnyOptimizedNativeCodeVersion(activeCodeVersion))
1829 tieredCompilationManager->AsyncPromoteToTier1(activeCodeVersion, &scheduleTieringBackgroundWork);
1832 #error FEATURE_INTERPRETER depends on FEATURE_TIERED_COMPILATION now
1837 if (scheduleTieringBackgroundWork)
1839 tieredCompilationManager->TryScheduleBackgroundWorkerWithoutGCTrigger_Locked();
1844 HCIMPL3(float, InterpretMethodFloat, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1848 ARG_SLOT retVal = 0;
1850 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1851 retVal = (ARG_SLOT)Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1852 HELPER_METHOD_FRAME_END();
1854 return *reinterpret_cast<float*>(ArgSlotEndiannessFixup(&retVal, sizeof(float)));
1859 HCIMPL3(double, InterpretMethodDouble, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1863 ARG_SLOT retVal = 0;
1865 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1866 retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1867 HELPER_METHOD_FRAME_END();
1869 return *reinterpret_cast<double*>(ArgSlotEndiannessFixup(&retVal, sizeof(double)));
1874 HCIMPL3(INT64, InterpretMethod, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1878 ARG_SLOT retVal = 0;
1880 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1881 retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1882 HELPER_METHOD_FRAME_END();
1884 return static_cast<INT64>(retVal);
1888 bool Interpreter::IsInCalleesFrames(void* stackPtr)
1890 // We assume a downwards_growing stack.
1891 return stackPtr < (m_localVarMemory - sizeof(GSCookie));
1894 // I want an enumeration with values for the second byte of 2-byte opcodes.
1896 #define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) TWOBYTE_##c = unsigned(s2),
1897 #include "opcode.def"
1901 // Optimize the interpreter loop for speed.
1903 #pragma optimize("t", on)
1906 // Duplicating code from JitHelpers for MonEnter,MonExit,MonEnter_Static,
1907 // MonExit_Static because it sets up helper frame for the JIT.
1908 static void MonitorEnter(Object* obj, BYTE* pbLockTaken)
1911 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1915 COMPlusThrow(kArgumentNullException);
1917 GCPROTECT_BEGININTERIOR(pbLockTaken);
1919 if (GET_THREAD()->CatchAtSafePointOpportunistic())
1921 GET_THREAD()->PulseGCMode();
1923 objRef->EnterObjMonitor();
1925 if (pbLockTaken != 0) *pbLockTaken = 1;
1930 static void MonitorExit(Object* obj, BYTE* pbLockTaken)
1932 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1935 COMPlusThrow(kArgumentNullException);
1937 if (!objRef->LeaveObjMonitor())
1938 COMPlusThrow(kSynchronizationLockException);
1940 if (pbLockTaken != 0) *pbLockTaken = 0;
1942 if (GET_THREAD()->IsAbortRequested()) {
1943 GET_THREAD()->HandleThreadAbort();
1947 static void MonitorEnterStatic(AwareLock *lock, BYTE* pbLockTaken)
1950 MONHELPER_STATE(*pbLockTaken = 1;)
1953 static void MonitorExitStatic(AwareLock *lock, BYTE* pbLockTaken)
1955 // Error, yield or contention
1957 COMPlusThrow(kSynchronizationLockException);
1959 if (GET_THREAD()->IsAbortRequested()) {
1960 GET_THREAD()->HandleThreadAbort();
1965 AwareLock* Interpreter::GetMonitorForStaticMethod()
1967 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
1968 CORINFO_LOOKUP_KIND kind;
1971 m_interpCeeInfo.getLocationOfThisType(m_methInfo->m_method, &kind);
1973 if (!kind.needsRuntimeLookup)
1975 OBJECTREF ref = pMD->GetMethodTable()->GetManagedClassObject();
1976 return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
1980 CORINFO_CLASS_HANDLE classHnd = nullptr;
1981 switch (kind.runtimeLookupKind)
1983 case CORINFO_LOOKUP_CLASSPARAM:
1985 CORINFO_CONTEXT_HANDLE ctxHnd = GetPreciseGenericsContext();
1986 _ASSERTE_MSG((((size_t)ctxHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS), "Precise context not class context");
1987 classHnd = (CORINFO_CLASS_HANDLE) ((size_t)ctxHnd & ~CORINFO_CONTEXTFLAGS_CLASS);
1990 case CORINFO_LOOKUP_METHODPARAM:
1992 CORINFO_CONTEXT_HANDLE ctxHnd = GetPreciseGenericsContext();
1993 _ASSERTE_MSG((((size_t)ctxHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD), "Precise context not method context");
1994 MethodDesc* pMD = (MethodDesc*) (CORINFO_METHOD_HANDLE) ((size_t)ctxHnd & ~CORINFO_CONTEXTFLAGS_METHOD);
1995 classHnd = (CORINFO_CLASS_HANDLE) pMD->GetMethodTable();
1999 NYI_INTERP("Unknown lookup for synchronized methods");
2002 MethodTable* pMT = GetMethodTableFromClsHnd(classHnd);
2003 OBJECTREF ref = pMT->GetManagedClassObject();
2005 return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
2009 void Interpreter::DoMonitorEnterWork()
2011 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2012 if (pMD->IsSynchronized())
2014 if (pMD->IsStatic())
2016 AwareLock* lock = GetMonitorForStaticMethod();
2017 MonitorEnterStatic(lock, &m_monAcquired);
2021 MonitorEnter((Object*) m_thisArg, &m_monAcquired);
2026 void Interpreter::DoMonitorExitWork()
2028 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2029 if (pMD->IsSynchronized())
2031 if (pMD->IsStatic())
2033 AwareLock* lock = GetMonitorForStaticMethod();
2034 MonitorExitStatic(lock, &m_monAcquired);
2038 MonitorExit((Object*) m_thisArg, &m_monAcquired);
2044 void Interpreter::ExecuteMethod(ARG_SLOT* retVal, _Out_ bool* pDoJmpCall, _Out_ unsigned* pJmpCallToken)
2046 #if INTERP_DYNAMIC_CONTRACTS
2053 // Dynamic contract occupies too much stack.
2054 STATIC_CONTRACT_THROWS;
2055 STATIC_CONTRACT_GC_TRIGGERS;
2056 STATIC_CONTRACT_MODE_COOPERATIVE;
2059 *pDoJmpCall = false;
2061 // Normally I'd prefer to declare these in small case-block scopes, but most C++ compilers
2062 // do not realize that their lifetimes do not overlap, so that makes for a large stack frame.
2063 // So I avoid that by outside declarations (sigh).
2065 unsigned char argNumc;
2066 unsigned short argNums;
2074 // Make sure that the .cctor for the current method's class has been run.
2075 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2076 EnsureClassInit(pMD->GetMethodTable());
2079 const char* methName = eeGetMethodFullName(m_methInfo->m_method);
2080 unsigned ilOffset = 0;
2082 unsigned curInvocation = InterlockedIncrement(&s_totalInvocations);
2083 if (s_TraceInterpreterEntriesFlag.val(CLRConfig::INTERNAL_TraceInterpreterEntries))
2085 fprintf(GetLogFile(), "Entering method #%d (= 0x%x): %s.\n", curInvocation, curInvocation, methName);
2086 fprintf(GetLogFile(), " arguments:\n");
2089 #endif // INTERP_TRACING
2091 #if LOOPS_VIA_INSTRS
2092 unsigned instrs = 0;
2095 unsigned instrs = 0;
2101 // Catch any exceptions raised.
2103 // Optional features...
2104 #define INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT 1
2106 #if INTERP_ILCYCLE_PROFILE
2107 m_instr = CEE_COUNT; // Flag to indicate first instruction.
2109 #endif // INTERP_ILCYCLE_PROFILE
2111 DoMonitorEnterWork();
2113 INTERPLOG("START %d, %s\n", m_methInfo->m_stubNum, methName);
2116 // TODO: verify that m_ILCodePtr is legal, and we haven't walked off the end of the IL array? (i.e., bad IL).
2117 // Note that ExecuteBranch() should be called for every branch. That checks that we aren't either before or
2118 // after the IL range. Here, we would only need to check that we haven't gone past the end (not before the beginning)
2119 // because everything that doesn't call ExecuteBranch() should only add to m_ILCodePtr.
2122 ilOffset = CurOffset();
2125 if (s_TraceInterpreterOstackFlag.val(CLRConfig::INTERNAL_TraceInterpreterOstack))
2129 #if INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT
2130 _ASSERTE_MSG(LargeStructStackHeightIsValid(), "Large structure stack height invariant violated."); // Check the large struct stack invariant.
2132 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2134 fprintf(GetLogFile(), " %#4x: %s\n", ilOffset, ILOp(m_ILCodePtr));
2135 fflush(GetLogFile());
2137 #endif // INTERP_TRACING
2138 #if LOOPS_VIA_INSTRS
2146 #if INTERP_ILINSTR_PROFILE
2147 #if INTERP_ILCYCLE_PROFILE
2149 #endif // INTERP_ILCYCLE_PROFILE
2151 InterlockedIncrement(&s_ILInstrExecs[*m_ILCodePtr]);
2152 #endif // INTERP_ILINSTR_PROFILE
2154 switch (*m_ILCodePtr)
2159 case CEE_BREAK: // TODO: interact with the debugger?
2201 argNumc = *m_ILCodePtr;
2206 argNumc = *m_ILCodePtr;
2211 argNumc = *m_ILCodePtr;
2215 argNumc = *(m_ILCodePtr + 1);
2221 argNumc = *m_ILCodePtr;
2225 argNumc = *(m_ILCodePtr + 1);
2264 valc = getI1(m_ILCodePtr + 1);
2269 vali = getI4LittleEndian(m_ILCodePtr + 1);
2274 vall = getI8LittleEndian(m_ILCodePtr + 1);
2279 // We use I4 here because we just care about the bit pattern.
2280 // LdR4Con will push the right InterpreterType.
2281 vali = getI4LittleEndian(m_ILCodePtr + 1);
2286 // We use I4 here because we just care about the bit pattern.
2287 // LdR8Con will push the right InterpreterType.
2288 vall = getI8LittleEndian(m_ILCodePtr + 1);
2293 _ASSERTE(m_curStackHt > 0);
2294 it = OpStackTypeGet(m_curStackHt - 1);
2295 OpStackTypeSet(m_curStackHt, it);
2296 if (it.IsLargeStruct(&m_interpCeeInfo))
2298 sz = it.Size(&m_interpCeeInfo);
2299 void* dest = LargeStructOperandStackPush(sz);
2300 memcpy(dest, OpStackGet<void*>(m_curStackHt - 1), sz);
2301 OpStackSet<void*>(m_curStackHt, dest);
2305 OpStackSet<INT64>(m_curStackHt, OpStackGet<INT64>(m_curStackHt - 1));
2310 _ASSERTE(m_curStackHt > 0);
2312 it = OpStackTypeGet(m_curStackHt);
2313 if (it.IsLargeStruct(&m_interpCeeInfo))
2315 LargeStructOperandStackPop(it.Size(&m_interpCeeInfo), OpStackGet<void*>(m_curStackHt));
2320 *pJmpCallToken = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
2325 DoCall(/*virtualCall*/false);
2327 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2329 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2331 #endif // INTERP_TRACING
2335 DoCall(/*virtualCall*/true);
2337 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2339 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2341 #endif // INTERP_TRACING
2350 if (m_methInfo->m_returnType == CORINFO_TYPE_VOID)
2352 _ASSERTE(m_curStackHt == 0);
2356 _ASSERTE(m_curStackHt == 1);
2357 InterpreterType retValIt = OpStackTypeGet(0);
2358 bool looseInt = s_InterpreterLooseRules &&
2359 CorInfoTypeIsIntegral(m_methInfo->m_returnType) &&
2360 (CorInfoTypeIsIntegral(retValIt.ToCorInfoType()) || CorInfoTypeIsPointer(retValIt.ToCorInfoType())) &&
2361 (m_methInfo->m_returnType != retValIt.ToCorInfoType());
2363 bool looseFloat = s_InterpreterLooseRules &&
2364 CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
2365 CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()) &&
2366 (m_methInfo->m_returnType != retValIt.ToCorInfoType());
2368 // Make sure that the return value "matches" (which allows certain relaxations) the declared return type.
2369 _ASSERTE((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2370 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2371 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY) ||
2372 (looseInt || looseFloat) ||
2373 InterpreterType(m_methInfo->m_returnType).StackNormalize().Matches(retValIt, &m_interpCeeInfo));
2375 size_t sz = retValIt.Size(&m_interpCeeInfo);
2376 #if defined(FEATURE_HFA)
2377 CorInfoHFAElemType cit = CORINFO_HFA_ELEM_NONE;
2380 if(m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS)
2381 cit = m_interpCeeInfo.getHFAType(retValIt.ToClassHandle());
2384 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
2386 _ASSERTE((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2387 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2388 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY));
2389 if (retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY)
2391 InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
2392 TypedByRef* ptr = OpStackGet<TypedByRef*>(0);
2393 *((TypedByRef*) m_retBufArg) = *ptr;
2395 else if (retValIt.IsLargeStruct(&m_interpCeeInfo))
2397 MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
2398 // The ostack value is a pointer to the struct value.
2399 CopyValueClassUnchecked(m_retBufArg, OpStackGet<void*>(0), clsMt);
2403 MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
2404 // The ostack value *is* the struct value.
2405 CopyValueClassUnchecked(m_retBufArg, OpStackGetAddr(0, sz), clsMt);
2408 #if defined(FEATURE_HFA)
2410 else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
2411 && (cit != CORINFO_HFA_ELEM_NONE)
2412 && (MetaSig(reinterpret_cast<MethodDesc*>(m_methInfo->m_method)).GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG) == 0)
2414 if (retValIt.IsLargeStruct(&m_interpCeeInfo))
2416 // The ostack value is a pointer to the struct value.
2417 memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGet<void*>(0), sz);
2421 // The ostack value *is* the struct value.
2422 memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGetAddr(0, sz), sz);
2425 #elif defined(UNIX_AMD64_ABI)
2426 // Is it an struct contained in $rax and $rdx
2427 else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
2430 //The Fixed Two slot return buffer address
2431 memcpy(m_ilArgs-16, OpStackGet<void*>(0), sz);
2434 else if (CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
2435 CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()))
2437 double val = (sz <= sizeof(INT32)) ? OpStackGet<float>(0) : OpStackGet<double>(0);
2438 if (m_methInfo->m_returnType == CORINFO_TYPE_DOUBLE)
2440 memcpy(retVal, &val, sizeof(double));
2444 float val2 = (float) val;
2445 memcpy(retVal, &val2, sizeof(float));
2450 if (sz <= sizeof(INT32))
2452 *retVal = OpStackGet<INT32>(0);
2456 // If looseInt is true, we are relying on auto-downcast in case *retVal
2457 // is small (but this is guaranteed not to happen by def'n of ARG_SLOT.)
2459 // Note structs of size 5, 6, 7 may be returned as 8 byte ints.
2460 _ASSERTE(sz <= sizeof(INT64));
2461 *retVal = OpStackGet<INT64>(0);
2468 // We're not capturing instructions executed in a method that terminates via exception,
2470 m_methInfo->RecordExecInstrs(instrs);
2473 // We keep this live until we leave.
2475 #endif // INTERP_TRACING
2477 #if INTERP_ILCYCLE_PROFILE
2478 // Finish off accounting for the "RET" before we return
2480 #endif // INTERP_ILCYCLE_PROFILE
2486 offsetc = *m_ILCodePtr;
2487 // The offset is wrt the beginning of the following instruction, so the +1 is to get to that
2488 // m_ILCodePtr value before adding the offset.
2489 ExecuteBranch(m_ILCodePtr + offsetc + 1);
2490 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2493 // LEAVE empties the operand stack.
2495 m_largeStructOperandStackHt = 0;
2496 offsetc = getI1(m_ILCodePtr + 1);
2499 // The offset is wrt the beginning of the following instruction, so the +2 is to get to that
2500 // m_ILCodePtr value before adding the offset.
2501 BYTE* leaveTarget = m_ILCodePtr + offsetc + 2;
2502 unsigned leaveOffset = CurOffset();
2503 m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
2504 if (!SearchForCoveringFinally())
2506 m_leaveInfoStack.Pop();
2507 ExecuteBranch(leaveTarget);
2510 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2512 // Abstract the next pair out to something common with templates.
2514 BrOnValue<false, 1>();
2518 BrOnValue<true, 1>();
2522 BrOnComparison<CO_EQ, false, 1>();
2525 _ASSERTE(m_curStackHt >= 2);
2526 // ECMA spec gives different semantics for different operand types:
2527 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2529 case CORINFO_TYPE_FLOAT:
2530 case CORINFO_TYPE_DOUBLE:
2531 BrOnComparison<CO_LT_UN, true, 1>();
2534 BrOnComparison<CO_LT, true, 1>();
2539 BrOnComparison<CO_GT, false, 1>();
2542 _ASSERTE(m_curStackHt >= 2);
2543 // ECMA spec gives different semantics for different operand types:
2544 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2546 case CORINFO_TYPE_FLOAT:
2547 case CORINFO_TYPE_DOUBLE:
2548 BrOnComparison<CO_GT_UN, true, 1>();
2551 BrOnComparison<CO_GT, true, 1>();
2556 BrOnComparison<CO_LT, false, 1>();
2559 BrOnComparison<CO_EQ, true, 1>();
2562 _ASSERTE(m_curStackHt >= 2);
2563 // ECMA spec gives different semantics for different operand types:
2564 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2566 case CORINFO_TYPE_FLOAT:
2567 case CORINFO_TYPE_DOUBLE:
2568 BrOnComparison<CO_LT, true, 1>();
2571 BrOnComparison<CO_LT_UN, true, 1>();
2576 BrOnComparison<CO_GT_UN, false, 1>();
2579 _ASSERTE(m_curStackHt >= 2);
2580 // ECMA spec gives different semantics for different operand types:
2581 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2583 case CORINFO_TYPE_FLOAT:
2584 case CORINFO_TYPE_DOUBLE:
2585 BrOnComparison<CO_GT, true, 1>();
2588 BrOnComparison<CO_GT_UN, true, 1>();
2593 BrOnComparison<CO_LT_UN, false, 1>();
2598 vali = getI4LittleEndian(m_ILCodePtr);
2599 vali += 4; // +4 for the length of the offset.
2600 ExecuteBranch(m_ILCodePtr + vali);
2603 // Backwards branch -- enable caching.
2604 BackwardsBranchActions(vali);
2610 // LEAVE empties the operand stack.
2612 m_largeStructOperandStackHt = 0;
2613 vali = getI4LittleEndian(m_ILCodePtr + 1);
2616 // The offset is wrt the beginning of the following instruction, so the +5 is to get to that
2617 // m_ILCodePtr value before adding the offset.
2618 BYTE* leaveTarget = m_ILCodePtr + (vali + 5);
2619 unsigned leaveOffset = CurOffset();
2620 m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
2621 if (!SearchForCoveringFinally())
2623 (void)m_leaveInfoStack.Pop();
2626 // Backwards branch -- enable caching.
2627 BackwardsBranchActions(vali);
2629 ExecuteBranch(leaveTarget);
2632 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2635 BrOnValue<false, 4>();
2638 BrOnValue<true, 4>();
2642 BrOnComparison<CO_EQ, false, 4>();
2645 _ASSERTE(m_curStackHt >= 2);
2646 // ECMA spec gives different semantics for different operand types:
2647 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2649 case CORINFO_TYPE_FLOAT:
2650 case CORINFO_TYPE_DOUBLE:
2651 BrOnComparison<CO_LT_UN, true, 4>();
2654 BrOnComparison<CO_LT, true, 4>();
2659 BrOnComparison<CO_GT, false, 4>();
2662 _ASSERTE(m_curStackHt >= 2);
2663 // ECMA spec gives different semantics for different operand types:
2664 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2666 case CORINFO_TYPE_FLOAT:
2667 case CORINFO_TYPE_DOUBLE:
2668 BrOnComparison<CO_GT_UN, true, 4>();
2671 BrOnComparison<CO_GT, true, 4>();
2676 BrOnComparison<CO_LT, false, 4>();
2679 BrOnComparison<CO_EQ, true, 4>();
2682 _ASSERTE(m_curStackHt >= 2);
2683 // ECMA spec gives different semantics for different operand types:
2684 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2686 case CORINFO_TYPE_FLOAT:
2687 case CORINFO_TYPE_DOUBLE:
2688 BrOnComparison<CO_LT, true, 4>();
2691 BrOnComparison<CO_LT_UN, true, 4>();
2696 BrOnComparison<CO_GT_UN, false, 4>();
2699 _ASSERTE(m_curStackHt >= 2);
2700 // ECMA spec gives different semantics for different operand types:
2701 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2703 case CORINFO_TYPE_FLOAT:
2704 case CORINFO_TYPE_DOUBLE:
2705 BrOnComparison<CO_GT, true, 4>();
2708 BrOnComparison<CO_GT_UN, true, 4>();
2713 BrOnComparison<CO_LT_UN, false, 4>();
2718 _ASSERTE(m_curStackHt > 0);
2720 #if defined(_DEBUG) || defined(HOST_AMD64)
2721 CorInfoType cit = OpStackTypeGet(m_curStackHt).ToCorInfoType();
2722 #endif // _DEBUG || HOST_AMD64
2724 _ASSERTE(cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT || cit == CORINFO_TYPE_NATIVEINT);
2726 #if defined(HOST_AMD64)
2727 UINT32 val = (cit == CORINFO_TYPE_NATIVEINT) ? (INT32) OpStackGet<NativeInt>(m_curStackHt)
2728 : OpStackGet<INT32>(m_curStackHt);
2730 UINT32 val = OpStackGet<INT32>(m_curStackHt);
2732 UINT32 n = getU4LittleEndian(m_ILCodePtr + 1);
2733 UINT32 instrSize = 1 + (n + 1)*4;
2736 vali = getI4LittleEndian(m_ILCodePtr + (5 + val * 4));
2737 ExecuteBranch(m_ILCodePtr + instrSize + vali);
2741 m_ILCodePtr += instrSize;
2747 LdIndShort<INT8, /*isUnsigned*/false>();
2750 LdIndShort<UINT8, /*isUnsigned*/true>();
2753 LdIndShort<INT16, /*isUnsigned*/false>();
2756 LdIndShort<UINT16, /*isUnsigned*/true>();
2759 LdInd<INT32, CORINFO_TYPE_INT>();
2762 LdInd<UINT32, CORINFO_TYPE_INT>();
2765 LdInd<INT64, CORINFO_TYPE_LONG>();
2768 LdInd<NativeInt, CORINFO_TYPE_NATIVEINT>();
2771 LdInd<float, CORINFO_TYPE_FLOAT>();
2774 LdInd<double, CORINFO_TYPE_DOUBLE>();
2777 LdInd<Object*, CORINFO_TYPE_CLASS>();
2801 BinaryArithOp<BA_Add>();
2805 BinaryArithOp<BA_Sub>();
2808 BinaryArithOp<BA_Mul>();
2811 BinaryArithOp<BA_Div>();
2814 BinaryIntOp<BIO_DivUn>();
2817 BinaryArithOp<BA_Rem>();
2820 BinaryIntOp<BIO_RemUn>();
2823 BinaryIntOp<BIO_And>();
2826 BinaryIntOp<BIO_Or>();
2829 BinaryIntOp<BIO_Xor>();
2838 ShiftOp<CEE_SHR_UN>();
2847 Conv<INT8, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
2850 Conv<INT16, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
2853 Conv<INT32, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
2856 Conv<INT64, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
2859 Conv<float, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_FLOAT>();
2862 Conv<double, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_DOUBLE>();
2865 Conv<UINT32, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
2868 Conv<UINT64, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
2883 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2885 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2887 #endif // INTERP_TRACING
2925 case CEE_CONV_OVF_I1_UN:
2926 ConvOvfUn<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2928 case CEE_CONV_OVF_I2_UN:
2929 ConvOvfUn<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2931 case CEE_CONV_OVF_I4_UN:
2932 ConvOvfUn<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2934 case CEE_CONV_OVF_I8_UN:
2935 ConvOvfUn<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
2937 case CEE_CONV_OVF_U1_UN:
2938 ConvOvfUn<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2940 case CEE_CONV_OVF_U2_UN:
2941 ConvOvfUn<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2943 case CEE_CONV_OVF_U4_UN:
2944 ConvOvfUn<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2946 case CEE_CONV_OVF_U8_UN:
2947 ConvOvfUn<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
2949 case CEE_CONV_OVF_I_UN:
2950 if (sizeof(NativeInt) == 4)
2952 ConvOvfUn<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
2956 _ASSERTE(sizeof(NativeInt) == 8);
2957 ConvOvfUn<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
2960 case CEE_CONV_OVF_U_UN:
2961 if (sizeof(NativeUInt) == 4)
2963 ConvOvfUn<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
2967 _ASSERTE(sizeof(NativeUInt) == 8);
2968 ConvOvfUn<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
2981 LdElem</*takeAddr*/true>();
2984 LdElemWithType<INT8, false, CORINFO_TYPE_INT>();
2987 LdElemWithType<UINT8, false, CORINFO_TYPE_INT>();
2990 LdElemWithType<INT16, false, CORINFO_TYPE_INT>();
2993 LdElemWithType<UINT16, false, CORINFO_TYPE_INT>();
2996 LdElemWithType<INT32, false, CORINFO_TYPE_INT>();
2999 LdElemWithType<UINT32, false, CORINFO_TYPE_INT>();
3002 LdElemWithType<INT64, false, CORINFO_TYPE_LONG>();
3004 // Note that the ECMA spec defines a "LDELEM_U8", but it is the same instruction number as LDELEM_I8 (since
3005 // when loading to the widest width, signed/unsigned doesn't matter).
3007 LdElemWithType<NativeInt, false, CORINFO_TYPE_NATIVEINT>();
3010 LdElemWithType<float, false, CORINFO_TYPE_FLOAT>();
3013 LdElemWithType<double, false, CORINFO_TYPE_DOUBLE>();
3015 case CEE_LDELEM_REF:
3016 LdElemWithType<Object*, true, CORINFO_TYPE_CLASS>();
3019 StElemWithType<NativeInt, false>();
3022 StElemWithType<INT8, false>();
3025 StElemWithType<INT16, false>();
3028 StElemWithType<INT32, false>();
3031 StElemWithType<INT64, false>();
3034 StElemWithType<float, false>();
3037 StElemWithType<double, false>();
3039 case CEE_STELEM_REF:
3040 StElemWithType<Object*, true>();
3043 LdElem</*takeAddr*/false>();
3051 case CEE_CONV_OVF_I1:
3052 ConvOvf<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3054 case CEE_CONV_OVF_U1:
3055 ConvOvf<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3057 case CEE_CONV_OVF_I2:
3058 ConvOvf<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3060 case CEE_CONV_OVF_U2:
3061 ConvOvf<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3063 case CEE_CONV_OVF_I4:
3064 ConvOvf<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3066 case CEE_CONV_OVF_U4:
3067 ConvOvf<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3069 case CEE_CONV_OVF_I8:
3070 ConvOvf<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
3072 case CEE_CONV_OVF_U8:
3073 ConvOvf<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
3088 Conv<UINT16, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
3091 Conv<UINT8, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
3094 Conv<NativeInt, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
3096 case CEE_CONV_OVF_I:
3097 if (sizeof(NativeInt) == 4)
3099 ConvOvf<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3103 _ASSERTE(sizeof(NativeInt) == 8);
3104 ConvOvf<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3107 case CEE_CONV_OVF_U:
3108 if (sizeof(NativeUInt) == 4)
3110 ConvOvf<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3114 _ASSERTE(sizeof(NativeUInt) == 8);
3115 ConvOvf<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3119 BinaryArithOvfOp<BA_Add, /*asUnsigned*/false>();
3121 case CEE_ADD_OVF_UN:
3122 BinaryArithOvfOp<BA_Add, /*asUnsigned*/true>();
3125 BinaryArithOvfOp<BA_Mul, /*asUnsigned*/false>();
3127 case CEE_MUL_OVF_UN:
3128 BinaryArithOvfOp<BA_Mul, /*asUnsigned*/true>();
3131 BinaryArithOvfOp<BA_Sub, /*asUnsigned*/false>();
3133 case CEE_SUB_OVF_UN:
3134 BinaryArithOvfOp<BA_Sub, /*asUnsigned*/true>();
3136 case CEE_ENDFINALLY:
3137 // We have just ended a finally.
3138 // If we were called during exception dispatch,
3139 // rethrow the exception on our way out.
3140 if (m_leaveInfoStack.IsEmpty())
3142 Object* finallyException = NULL;
3146 _ASSERTE(m_inFlightException != NULL);
3147 finallyException = m_inFlightException;
3148 INTERPLOG("endfinally handling for %s, %p, %p\n", methName, m_methInfo, finallyException);
3149 m_inFlightException = NULL;
3152 COMPlusThrow(ObjectToOBJECTREF(finallyException));
3155 // Otherwise, see if there's another finally block to
3156 // execute as part of processing the current LEAVE...
3157 else if (!SearchForCoveringFinally())
3159 // No, there isn't -- go to the leave target.
3160 _ASSERTE(!m_leaveInfoStack.IsEmpty());
3161 LeaveInfo li = m_leaveInfoStack.Pop();
3162 ExecuteBranch(li.m_target);
3164 // Yes, there, is, and SearchForCoveringFinally set us up to start executing it.
3165 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
3171 Conv<NativeUInt, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
3174 NYI_INTERP("Unimplemented opcode: CEE_PREFIX7");
3177 NYI_INTERP("Unimplemented opcode: CEE_PREFIX6");
3180 NYI_INTERP("Unimplemented opcode: CEE_PREFIX5");
3183 NYI_INTERP("Unimplemented opcode: CEE_PREFIX4");
3186 NYI_INTERP("Unimplemented opcode: CEE_PREFIX3");
3189 NYI_INTERP("Unimplemented opcode: CEE_PREFIX2");
3192 // This is the prefix for all the 2-byte opcodes.
3193 // Figure out the second byte of the 2-byte opcode.
3194 ops = *(m_ILCodePtr + 1);
3195 #if INTERP_ILINSTR_PROFILE
3196 // Take one away from PREFIX1, which we won't count.
3197 InterlockedDecrement(&s_ILInstrExecs[CEE_PREFIX1]);
3198 // Credit instead to the 2-byte instruction index.
3199 InterlockedIncrement(&s_ILInstr2ByteExecs[ops]);
3200 #endif // INTERP_ILINSTR_PROFILE
3203 case TWOBYTE_CEE_ARGLIST:
3204 // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_ARGLIST");
3205 _ASSERTE(m_methInfo->m_varArgHandleArgNum != NO_VA_ARGNUM);
3206 LdArgA(m_methInfo->m_varArgHandleArgNum);
3210 case TWOBYTE_CEE_CEQ:
3214 case TWOBYTE_CEE_CGT:
3218 case TWOBYTE_CEE_CGT_UN:
3219 CompareOp<CO_GT_UN>();
3222 case TWOBYTE_CEE_CLT:
3226 case TWOBYTE_CEE_CLT_UN:
3227 CompareOp<CO_LT_UN>();
3231 case TWOBYTE_CEE_LDARG:
3233 argNums = getU2LittleEndian(m_ILCodePtr);
3237 case TWOBYTE_CEE_LDARGA:
3239 argNums = getU2LittleEndian(m_ILCodePtr);
3243 case TWOBYTE_CEE_STARG:
3245 argNums = getU2LittleEndian(m_ILCodePtr);
3250 case TWOBYTE_CEE_LDLOC:
3252 argNums = getU2LittleEndian(m_ILCodePtr);
3256 case TWOBYTE_CEE_LDLOCA:
3258 argNums = getU2LittleEndian(m_ILCodePtr);
3262 case TWOBYTE_CEE_STLOC:
3264 argNums = getU2LittleEndian(m_ILCodePtr);
3269 case TWOBYTE_CEE_CONSTRAINED:
3270 RecordConstrainedCall();
3273 case TWOBYTE_CEE_VOLATILE:
3274 // Set a flag that causes a memory barrier to be associated with the next load or store.
3275 m_volatileFlag = true;
3279 case TWOBYTE_CEE_LDFTN:
3283 case TWOBYTE_CEE_INITOBJ:
3287 case TWOBYTE_CEE_LOCALLOC:
3292 case TWOBYTE_CEE_LDVIRTFTN:
3296 case TWOBYTE_CEE_SIZEOF:
3300 case TWOBYTE_CEE_RETHROW:
3304 case TWOBYTE_CEE_READONLY:
3305 m_readonlyFlag = true;
3307 // A comment in importer.cpp indicates that READONLY may also apply to calls. We'll see.
3308 _ASSERTE_MSG(*m_ILCodePtr == CEE_LDELEMA, "According to the ECMA spec, READONLY may only precede LDELEMA");
3311 case TWOBYTE_CEE_INITBLK:
3315 case TWOBYTE_CEE_CPBLK:
3319 case TWOBYTE_CEE_ENDFILTER:
3323 case TWOBYTE_CEE_UNALIGNED:
3324 // Nothing to do here.
3328 case TWOBYTE_CEE_TAILCALL:
3329 // TODO: Needs revisiting when implementing tail call.
3330 // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_TAILCALL");
3334 case TWOBYTE_CEE_REFANYTYPE:
3345 NYI_INTERP("Unimplemented opcode: CEE_PREFIXREF");
3356 INTERPLOG("DONE %d, %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3360 INTERPLOG("EXCEPTION %d (throw), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3362 bool handleException = false;
3363 OBJECTREF orThrowable = NULL;
3366 orThrowable = GET_THROWABLE();
3368 if (m_filterNextScan != 0)
3370 // We are in the middle of a filter scan and an exception is thrown inside
3371 // a filter. We are supposed to swallow it and assume the filter did not
3372 // handle the exception.
3374 m_largeStructOperandStackHt = 0;
3377 handleException = true;
3381 // orThrowable must be protected. MethodHandlesException() will place orThrowable
3382 // into the operand stack (a permanently protected area) if it returns true.
3383 GCPROTECT_BEGIN(orThrowable);
3384 handleException = MethodHandlesException(orThrowable);
3388 if (handleException)
3390 GetThread()->SafeSetThrowables(orThrowable
3391 DEBUG_ARG(ThreadExceptionState::STEC_CurrentTrackerEqualNullOkForInterpreter));
3396 INTERPLOG("EXCEPTION %d (rethrow), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3400 EX_END_CATCH(RethrowTransientExceptions)
3404 #pragma optimize("", on)
3407 void Interpreter::EndFilter()
3409 unsigned handles = OpStackGet<unsigned>(0);
3410 // If the filter decides to handle the exception, then go to the handler offset.
3413 // We decided to handle the exception, so give all EH entries a chance to
3414 // handle future exceptions. Clear scan.
3415 m_filterNextScan = 0;
3416 ExecuteBranch(m_methInfo->m_ILCode + m_filterHandlerOffset);
3418 // The filter decided not to handle the exception, ask if there is some other filter
3419 // lined up to try to handle it or some other catch/finally handlers will handle it.
3420 // If no one handles the exception, rethrow and be done with it.
3423 bool handlesEx = false;
3425 OBJECTREF orThrowable = ObjectToOBJECTREF(m_inFlightException);
3426 GCPROTECT_BEGIN(orThrowable);
3427 handlesEx = MethodHandlesException(orThrowable);
3432 // Just clear scan before rethrowing to give any EH entry a chance to handle
3434 m_filterNextScan = 0;
3435 Object* filterException = NULL;
3438 _ASSERTE(m_inFlightException != NULL);
3439 filterException = m_inFlightException;
3440 INTERPLOG("endfilter handling for %s, %p, %p\n", m_methInfo->m_methName, m_methInfo, filterException);
3441 m_inFlightException = NULL;
3444 COMPlusThrow(ObjectToOBJECTREF(filterException));
3449 // Let it do another round of filter:end-filter or handler block.
3450 // During the next end filter, we will reuse m_filterNextScan and
3451 // continue searching where we left off. Note however, while searching,
3452 // any of the filters could throw an exception. But this is supposed to
3453 // be swallowed and endfilter should be called with a value of 0 on the
3459 bool Interpreter::MethodHandlesException(OBJECTREF orThrowable)
3467 bool handlesEx = false;
3469 if (orThrowable != NULL)
3471 // Don't catch ThreadAbort and other uncatchable exceptions
3472 if (!IsUncatchable(&orThrowable))
3474 // Does the current method catch this? The clauses are defined by offsets, so get that.
3475 // However, if we are in the middle of a filter scan, make sure we get the offset of the
3476 // excepting code, rather than the offset of the filter body.
3477 DWORD curOffset = (m_filterNextScan != 0) ? m_filterExcILOffset : CurOffset();
3478 TypeHandle orThrowableTH = TypeHandle(orThrowable->GetMethodTable());
3480 GCPROTECT_BEGIN(orThrowable);
3483 // Perform a filter scan or regular walk of the EH Table. Filter scan is performed when
3484 // we are evaluating a series of filters to handle the exception until the first handler
3485 // (filter's or otherwise) that will handle the exception.
3486 for (unsigned XTnum = m_filterNextScan; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
3488 CORINFO_EH_CLAUSE clause;
3489 m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
3490 _ASSERTE(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
3492 // First, is the current offset in the try block?
3493 if (clause.TryOffset <= curOffset && curOffset < clause.TryOffset + clause.TryLength)
3495 unsigned handlerOffset = 0;
3496 // CORINFO_EH_CLAUSE_NONE represents 'catch' blocks
3497 if (clause.Flags == CORINFO_EH_CLAUSE_NONE)
3499 // Now, does the catch block handle the thrown exception type?
3500 CORINFO_CLASS_HANDLE excType = FindClass(clause.ClassToken InterpTracingArg(RTK_CheckHandlesException));
3501 if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orThrowableTH))
3504 // Push the exception object onto the operand stack.
3505 OpStackSet<OBJECTREF>(0, orThrowable);
3506 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3508 m_largeStructOperandStackHt = 0;
3509 handlerOffset = clause.HandlerOffset;
3511 m_filterNextScan = 0;
3516 // Handle a wrapped exception.
3517 OBJECTREF orUnwrapped = PossiblyUnwrapThrowable(orThrowable, GetMethodDesc()->GetAssembly());
3518 if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orUnwrapped->GetTypeHandle()))
3520 // Push the exception object onto the operand stack.
3521 OpStackSet<OBJECTREF>(0, orUnwrapped);
3522 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3524 m_largeStructOperandStackHt = 0;
3525 handlerOffset = clause.HandlerOffset;
3527 m_filterNextScan = 0;
3531 else if (clause.Flags == CORINFO_EH_CLAUSE_FILTER)
3534 // Push the exception object onto the operand stack.
3535 OpStackSet<OBJECTREF>(0, orThrowable);
3536 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3538 m_largeStructOperandStackHt = 0;
3539 handlerOffset = clause.FilterOffset;
3540 m_inFlightException = OBJECTREFToObject(orThrowable);
3542 m_filterHandlerOffset = clause.HandlerOffset;
3543 m_filterNextScan = XTnum + 1;
3544 m_filterExcILOffset = curOffset;
3546 else if (clause.Flags == CORINFO_EH_CLAUSE_FAULT ||
3547 clause.Flags == CORINFO_EH_CLAUSE_FINALLY)
3550 // Save the exception object to rethrow.
3551 m_inFlightException = OBJECTREFToObject(orThrowable);
3552 // Empty the operand stack.
3554 m_largeStructOperandStackHt = 0;
3555 handlerOffset = clause.HandlerOffset;
3557 m_filterNextScan = 0;
3560 // Reset the interpreter loop in preparation of calling the handler.
3563 // Set the IL offset of the handler.
3564 ExecuteBranch(m_methInfo->m_ILCode + handlerOffset);
3566 // If an exception occurs while attempting to leave a protected scope,
3567 // we empty the 'leave' info stack upon entering the handler.
3568 while (!m_leaveInfoStack.IsEmpty())
3570 m_leaveInfoStack.Pop();
3573 // Some things are set up before a call, and must be cleared on an exception caught be the caller.
3574 // A method that returns a struct allocates local space for the return value, and "registers" that
3575 // space and the type so that it's scanned if a GC happens. "Unregister" it if we throw an exception
3576 // in the call, and handle it in the caller. (If it's not handled by the caller, the Interpreter is
3577 // deallocated, so it's value doesn't matter.)
3578 m_structRetValITPtr = NULL;
3579 m_callThisArg = NULL;
3590 DoMonitorExitWork();
3596 static unsigned OpFormatExtraSize(opcode_format_t format) {
3604 case InlineBrTarget:
3620 return 0; // We'll handle this specially.
3622 case ShortInlineVar:
3624 case ShortInlineBrTarget:
3635 static unsigned opSizes1Byte[CEE_COUNT];
3636 static bool opSizes1ByteInit = false;
3638 static void OpSizes1ByteInit()
3640 if (opSizes1ByteInit) return;
3641 #define OPDEF(name, stringname, stackpop, stackpush, params, kind, len, byte1, byte2, ctrl) \
3642 opSizes1Byte[name] = len + OpFormatExtraSize(params);
3643 #include "opcode.def"
3645 opSizes1ByteInit = true;
3649 bool Interpreter::MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize)
3653 BYTE* ilCodeLim = ilCode + codeSize;
3654 while (ilCode < ilCodeLim)
3656 unsigned op = *ilCode;
3659 case CEE_BR_S: case CEE_BRFALSE_S: case CEE_BRTRUE_S:
3660 case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S:
3661 case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S:
3663 delta = getI1(ilCode + 1);
3664 if (delta < 0) return true;
3668 case CEE_BR: case CEE_BRFALSE: case CEE_BRTRUE:
3669 case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT:
3670 case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN:
3672 delta = getI4LittleEndian(ilCode + 1);
3673 if (delta < 0) return true;
3679 UINT32 n = getU4LittleEndian(ilCode + 1);
3680 UINT32 instrSize = 1 + (n + 1)*4;
3681 for (unsigned i = 0; i < n; i++) {
3682 delta = getI4LittleEndian(ilCode + (5 + i * 4));
3683 if (delta < 0) return true;
3685 ilCode += instrSize;
3690 op = *(ilCode + 1) + 0x100;
3691 _ASSERTE(op < CEE_COUNT); // Bounds check for below.
3692 // deliberate fall-through here.
3695 // For the rest of the 1-byte instructions, we'll use a table-driven approach.
3696 ilCode += opSizes1Byte[op];
3704 void Interpreter::BackwardsBranchActions(int offset)
3706 // TODO: Figure out how to do a GC poll.
3709 bool Interpreter::SearchForCoveringFinally()
3717 _ASSERTE_MSG(!m_leaveInfoStack.IsEmpty(), "precondition");
3719 LeaveInfo& li = m_leaveInfoStack.PeekRef();
3723 for (unsigned XTnum = li.m_nextEHIndex; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
3725 CORINFO_EH_CLAUSE clause;
3726 m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
3727 _ASSERTE(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
3729 // First, is the offset of the leave instruction in the try block?
3730 unsigned tryEndOffset = clause.TryOffset + clause.TryLength;
3731 if (clause.TryOffset <= li.m_offset && li.m_offset < tryEndOffset)
3733 // Yes: is it a finally, and is its target outside the try block?
3734 size_t targOffset = (li.m_target - m_methInfo->m_ILCode);
3735 if (clause.Flags == CORINFO_EH_CLAUSE_FINALLY
3736 && !(clause.TryOffset <= targOffset && targOffset < tryEndOffset))
3738 m_ILCodePtr = m_methInfo->m_ILCode + clause.HandlerOffset;
3739 li.m_nextEHIndex = XTnum + 1;
3745 // Caller will handle popping the leave info stack.
3750 void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0)
3752 Interpreter* interp = reinterpret_cast<Interpreter*>(interp0);
3753 interp->GCScanRoots(pf, sc);
3756 void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc)
3758 // Report inbound arguments, if the interpreter has not been invoked directly.
3759 // (In the latter case, the arguments are reported by the calling method.)
3762 for (unsigned i = 0; i < m_methInfo->m_numArgs; i++)
3764 GCScanRootAtLoc(reinterpret_cast<Object**>(GetArgAddr(i)), GetArgType(i), pf, sc);
3768 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
3770 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>())
3772 GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3776 GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3780 // This is the "this" argument passed in to DoCallWork. (Note that we treat this as a byref; it
3781 // might be, for a struct instance method, and this covers the object pointer case as well.)
3782 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_callThisArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3784 // Scan the exception object that we'll rethrow at the end of the finally block.
3785 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_inFlightException), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3787 // A retBufArg, may, in some cases, be a byref into the heap.
3788 if (m_retBufArg != NULL)
3790 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_retBufArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3793 if (m_structRetValITPtr != NULL)
3795 GCScanRootAtLoc(reinterpret_cast<Object**>(m_structRetValTempSpace), *m_structRetValITPtr, pf, sc);
3798 // We'll conservatively assume that we might have a security object.
3799 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_securityObject), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3802 for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
3804 InterpreterType it = m_methInfo->m_localDescs[i].m_type;
3805 void* localPtr = NULL;
3806 if (it.IsLargeStruct(&m_interpCeeInfo))
3808 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
3809 localPtr = *reinterpret_cast<void**>(structPtr);
3813 localPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
3815 GCScanRootAtLoc(reinterpret_cast<Object**>(localPtr), it, pf, sc, m_methInfo->GetPinningBit(i));
3818 // Do current ostack.
3819 for (unsigned i = 0; i < m_curStackHt; i++)
3821 InterpreterType it = OpStackTypeGet(i);
3822 if (it.IsLargeStruct(&m_interpCeeInfo))
3824 Object** structPtr = reinterpret_cast<Object**>(OpStackGet<void*>(i));
3825 // If the ostack value is a pointer to a local var value, don't scan, since we already
3826 // scanned the variable value above.
3827 if (!IsInLargeStructLocalArea(structPtr))
3829 GCScanRootAtLoc(structPtr, it, pf, sc);
3834 void* stackPtr = OpStackGetAddr(i, it.Size(&m_interpCeeInfo));
3835 GCScanRootAtLoc(reinterpret_cast<Object**>(stackPtr), it, pf, sc);
3839 // Any outgoing arguments for a call in progress.
3840 for (unsigned i = 0; i < m_argsSize; i++)
3842 // If a call has a large struct argument, we'll have pushed a pointer to the entry for that argument on the
3843 // largeStructStack of the current Interpreter. That will be scanned by the code above, so just skip it.
3844 InterpreterType undef(CORINFO_TYPE_UNDEF);
3845 InterpreterType it = m_argTypes[i];
3846 if (it != undef && !it.IsLargeStruct(&m_interpCeeInfo))
3848 BYTE* argPtr = ArgSlotEndiannessFixup(&m_args[i], it.Size(&m_interpCeeInfo));
3849 GCScanRootAtLoc(reinterpret_cast<Object**>(argPtr), it, pf, sc);
3854 void Interpreter::GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc, bool pinningRef)
3856 switch (it.ToCorInfoType())
3858 case CORINFO_TYPE_CLASS:
3859 case CORINFO_TYPE_STRING:
3862 if (pinningRef) flags |= GC_CALL_PINNED;
3863 (*pf)(loc, sc, flags);
3867 case CORINFO_TYPE_BYREF:
3868 case CORINFO_TYPE_REFANY:
3870 DWORD flags = GC_CALL_INTERIOR;
3871 if (pinningRef) flags |= GC_CALL_PINNED;
3872 (*pf)(loc, sc, flags);
3876 case CORINFO_TYPE_VALUECLASS:
3877 _ASSERTE(!pinningRef);
3878 GCScanValueClassRootAtLoc(loc, it.ToClassHandle(), pf, sc);
3882 _ASSERTE(!pinningRef);
3887 void Interpreter::GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc)
3889 MethodTable* valClsMT = GetMethodTableFromClsHnd(valueClsHnd);
3890 ReportPointersFromValueType(pf, sc, valClsMT, loc);
3893 // Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
3894 // are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
3895 bool IsStackNormalType(CorInfoType cit)
3897 LIMITED_METHOD_CONTRACT;
3901 case CORINFO_TYPE_UNDEF:
3902 case CORINFO_TYPE_VOID:
3903 case CORINFO_TYPE_BOOL:
3904 case CORINFO_TYPE_CHAR:
3905 case CORINFO_TYPE_BYTE:
3906 case CORINFO_TYPE_UBYTE:
3907 case CORINFO_TYPE_SHORT:
3908 case CORINFO_TYPE_USHORT:
3909 case CORINFO_TYPE_UINT:
3910 case CORINFO_TYPE_NATIVEUINT:
3911 case CORINFO_TYPE_ULONG:
3912 case CORINFO_TYPE_VAR:
3913 case CORINFO_TYPE_STRING:
3914 case CORINFO_TYPE_PTR:
3917 case CORINFO_TYPE_INT:
3918 case CORINFO_TYPE_NATIVEINT:
3919 case CORINFO_TYPE_BYREF:
3920 case CORINFO_TYPE_CLASS:
3921 case CORINFO_TYPE_LONG:
3922 case CORINFO_TYPE_VALUECLASS:
3923 case CORINFO_TYPE_REFANY:
3924 // I chose to consider both float and double stack-normal; together these comprise
3925 // the "F" type of the ECMA spec. This means I have to consider these to freely
3927 case CORINFO_TYPE_FLOAT:
3928 case CORINFO_TYPE_DOUBLE:
3936 CorInfoType CorInfoTypeStackNormalize(CorInfoType cit)
3938 LIMITED_METHOD_CONTRACT;
3942 case CORINFO_TYPE_UNDEF:
3943 return CORINFO_TYPE_UNDEF;
3945 case CORINFO_TYPE_VOID:
3946 case CORINFO_TYPE_VAR:
3947 _ASSERTE_MSG(false, "Type that cannot be on the ostack.");
3948 return CORINFO_TYPE_UNDEF;
3950 case CORINFO_TYPE_BOOL:
3951 case CORINFO_TYPE_CHAR:
3952 case CORINFO_TYPE_BYTE:
3953 case CORINFO_TYPE_UBYTE:
3954 case CORINFO_TYPE_SHORT:
3955 case CORINFO_TYPE_USHORT:
3956 case CORINFO_TYPE_UINT:
3957 return CORINFO_TYPE_INT;
3959 case CORINFO_TYPE_NATIVEUINT:
3960 case CORINFO_TYPE_PTR:
3961 return CORINFO_TYPE_NATIVEINT;
3963 case CORINFO_TYPE_ULONG:
3964 return CORINFO_TYPE_LONG;
3966 case CORINFO_TYPE_STRING:
3967 return CORINFO_TYPE_CLASS;
3969 case CORINFO_TYPE_INT:
3970 case CORINFO_TYPE_NATIVEINT:
3971 case CORINFO_TYPE_BYREF:
3972 case CORINFO_TYPE_CLASS:
3973 case CORINFO_TYPE_LONG:
3974 case CORINFO_TYPE_VALUECLASS:
3975 case CORINFO_TYPE_REFANY:
3976 // I chose to consider both float and double stack-normal; together these comprise
3977 // the "F" type of the ECMA spec. This means I have to consider these to freely
3979 case CORINFO_TYPE_FLOAT:
3980 case CORINFO_TYPE_DOUBLE:
3981 _ASSERTE(IsStackNormalType(cit));
3989 InterpreterType InterpreterType::StackNormalize() const
3991 LIMITED_METHOD_CONTRACT;
3993 switch (ToCorInfoType())
3995 case CORINFO_TYPE_BOOL:
3996 case CORINFO_TYPE_CHAR:
3997 case CORINFO_TYPE_BYTE:
3998 case CORINFO_TYPE_UBYTE:
3999 case CORINFO_TYPE_SHORT:
4000 case CORINFO_TYPE_USHORT:
4001 case CORINFO_TYPE_UINT:
4002 return InterpreterType(CORINFO_TYPE_INT);
4004 case CORINFO_TYPE_NATIVEUINT:
4005 case CORINFO_TYPE_PTR:
4006 return InterpreterType(CORINFO_TYPE_NATIVEINT);
4008 case CORINFO_TYPE_ULONG:
4009 return InterpreterType(CORINFO_TYPE_LONG);
4011 case CORINFO_TYPE_STRING:
4012 return InterpreterType(CORINFO_TYPE_CLASS);
4014 case CORINFO_TYPE_INT:
4015 case CORINFO_TYPE_NATIVEINT:
4016 case CORINFO_TYPE_BYREF:
4017 case CORINFO_TYPE_CLASS:
4018 case CORINFO_TYPE_LONG:
4019 case CORINFO_TYPE_VALUECLASS:
4020 case CORINFO_TYPE_REFANY:
4021 case CORINFO_TYPE_FLOAT:
4022 case CORINFO_TYPE_DOUBLE:
4023 return *const_cast<InterpreterType*>(this);
4025 case CORINFO_TYPE_UNDEF:
4026 case CORINFO_TYPE_VOID:
4027 case CORINFO_TYPE_VAR:
4029 _ASSERTE_MSG(false, "should not reach here");
4030 return *const_cast<InterpreterType*>(this);
4035 bool InterpreterType::MatchesWork(const InterpreterType it2, CEEInfo* info) const
4043 if (*this == it2) return true;
4046 CorInfoType cit1 = ToCorInfoType();
4047 CorInfoType cit2 = it2.ToCorInfoType();
4051 // An approximation: valueclasses of the same size match.
4052 if (cit1 == CORINFO_TYPE_VALUECLASS &&
4053 cit2 == CORINFO_TYPE_VALUECLASS &&
4054 Size(info) == it2.Size(info))
4059 // NativeInt matches byref. (In unsafe code).
4060 if ((cit1 == CORINFO_TYPE_BYREF && cit2 == CORINFO_TYPE_NATIVEINT))
4063 // apparently the VM may do the optimization of reporting the return type of a method that
4064 // returns a struct of a single nativeint field *as* nativeint; and similarly with at least some other primitive types.
4065 // So weaken this check to allow that.
4066 // (The check is actually a little weaker still, since I don't want to crack the return type and make sure
4067 // that it has only a single nativeint member -- so I just ensure that the total size is correct).
4070 case CORINFO_TYPE_NATIVEINT:
4071 case CORINFO_TYPE_NATIVEUINT:
4072 _ASSERTE(sizeof(NativeInt) == sizeof(NativeUInt));
4073 if (it2.Size(info) == sizeof(NativeInt))
4077 case CORINFO_TYPE_INT:
4078 case CORINFO_TYPE_UINT:
4079 _ASSERTE(sizeof(INT32) == sizeof(UINT32));
4080 if (it2.Size(info) == sizeof(INT32))
4088 // See if the second is a value type synonym for a primitive.
4089 if (cit2 == CORINFO_TYPE_VALUECLASS)
4091 CorInfoType cit2prim = info->getTypeForPrimitiveValueClass(it2.ToClassHandle());
4092 if (cit2prim != CORINFO_TYPE_UNDEF)
4094 InterpreterType it2prim(cit2prim);
4095 if (*this == it2prim.StackNormalize())
4106 size_t CorInfoTypeSizeArray[] =
4108 /*CORINFO_TYPE_UNDEF = 0x0*/0,
4109 /*CORINFO_TYPE_VOID = 0x1*/0,
4110 /*CORINFO_TYPE_BOOL = 0x2*/1,
4111 /*CORINFO_TYPE_CHAR = 0x3*/2,
4112 /*CORINFO_TYPE_BYTE = 0x4*/1,
4113 /*CORINFO_TYPE_UBYTE = 0x5*/1,
4114 /*CORINFO_TYPE_SHORT = 0x6*/2,
4115 /*CORINFO_TYPE_USHORT = 0x7*/2,
4116 /*CORINFO_TYPE_INT = 0x8*/4,
4117 /*CORINFO_TYPE_UINT = 0x9*/4,
4118 /*CORINFO_TYPE_LONG = 0xa*/8,
4119 /*CORINFO_TYPE_ULONG = 0xb*/8,
4120 /*CORINFO_TYPE_NATIVEINT = 0xc*/sizeof(void*),
4121 /*CORINFO_TYPE_NATIVEUINT = 0xd*/sizeof(void*),
4122 /*CORINFO_TYPE_FLOAT = 0xe*/4,
4123 /*CORINFO_TYPE_DOUBLE = 0xf*/8,
4124 /*CORINFO_TYPE_STRING = 0x10*/sizeof(void*),
4125 /*CORINFO_TYPE_PTR = 0x11*/sizeof(void*),
4126 /*CORINFO_TYPE_BYREF = 0x12*/sizeof(void*),
4127 /*CORINFO_TYPE_VALUECLASS = 0x13*/0,
4128 /*CORINFO_TYPE_CLASS = 0x14*/sizeof(void*),
4129 /*CORINFO_TYPE_REFANY = 0x15*/sizeof(void*)*2,
4130 /*CORINFO_TYPE_VAR = 0x16*/0,
4133 bool CorInfoTypeIsUnsigned(CorInfoType cit)
4135 LIMITED_METHOD_CONTRACT;
4139 case CORINFO_TYPE_UINT:
4140 case CORINFO_TYPE_NATIVEUINT:
4141 case CORINFO_TYPE_ULONG:
4142 case CORINFO_TYPE_UBYTE:
4143 case CORINFO_TYPE_USHORT:
4144 case CORINFO_TYPE_CHAR:
4152 bool CorInfoTypeIsIntegral(CorInfoType cit)
4154 LIMITED_METHOD_CONTRACT;
4158 case CORINFO_TYPE_UINT:
4159 case CORINFO_TYPE_NATIVEUINT:
4160 case CORINFO_TYPE_ULONG:
4161 case CORINFO_TYPE_UBYTE:
4162 case CORINFO_TYPE_USHORT:
4163 case CORINFO_TYPE_INT:
4164 case CORINFO_TYPE_NATIVEINT:
4165 case CORINFO_TYPE_LONG:
4166 case CORINFO_TYPE_BYTE:
4167 case CORINFO_TYPE_BOOL:
4168 case CORINFO_TYPE_SHORT:
4176 bool CorInfoTypeIsFloatingPoint(CorInfoType cit)
4178 return cit == CORINFO_TYPE_FLOAT || cit == CORINFO_TYPE_DOUBLE;
4181 bool CorInfoTypeIsFloatingPoint(CorInfoHFAElemType cihet)
4183 return cihet == CORINFO_HFA_ELEM_FLOAT || cihet == CORINFO_HFA_ELEM_DOUBLE;
4186 bool CorElemTypeIsUnsigned(CorElementType cet)
4188 LIMITED_METHOD_CONTRACT;
4192 case ELEMENT_TYPE_U1:
4193 case ELEMENT_TYPE_U2:
4194 case ELEMENT_TYPE_U4:
4195 case ELEMENT_TYPE_U8:
4196 case ELEMENT_TYPE_U:
4204 bool CorInfoTypeIsPointer(CorInfoType cit)
4206 LIMITED_METHOD_CONTRACT;
4209 case CORINFO_TYPE_PTR:
4210 case CORINFO_TYPE_BYREF:
4211 case CORINFO_TYPE_NATIVEINT:
4212 case CORINFO_TYPE_NATIVEUINT:
4215 // It seems like the ECMA spec doesn't allow this, but (at least) the managed C++
4216 // compiler expects the explicitly-sized pointer type of the platform pointer size to work:
4217 case CORINFO_TYPE_INT:
4218 case CORINFO_TYPE_UINT:
4219 return sizeof(NativeInt) == sizeof(INT32);
4220 case CORINFO_TYPE_LONG:
4221 case CORINFO_TYPE_ULONG:
4222 return sizeof(NativeInt) == sizeof(INT64);
4229 void Interpreter::LdArg(int argNum)
4237 LdFromMemAddr(GetArgAddr(argNum), GetArgType(argNum));
4240 void Interpreter::LdArgA(int argNum)
4248 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
4249 OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(GetArgAddr(argNum)));
4253 void Interpreter::StArg(int argNum)
4261 StToLocalMemAddr(GetArgAddr(argNum), GetArgType(argNum));
4265 void Interpreter::LdLocA(int locNum)
4273 InterpreterType tp = m_methInfo->m_localDescs[locNum].m_type;
4275 if (tp.IsLargeStruct(&m_interpCeeInfo))
4277 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), sizeof(void**));
4278 addr = *reinterpret_cast<void**>(structPtr);
4282 addr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), tp.Size(&m_interpCeeInfo));
4284 // The "addr" above, while a byref, is never a heap pointer, so we're robust if
4285 // any of these were to cause a GC.
4286 OpStackSet<void*>(m_curStackHt, addr);
4287 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
4291 void Interpreter::LdIcon(INT32 c)
4299 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
4300 OpStackSet<INT32>(m_curStackHt, c);
4304 void Interpreter::LdR4con(INT32 c)
4312 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_FLOAT));
4313 OpStackSet<INT32>(m_curStackHt, c);
4317 void Interpreter::LdLcon(INT64 c)
4325 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_LONG));
4326 OpStackSet<INT64>(m_curStackHt, c);
4330 void Interpreter::LdR8con(INT64 c)
4338 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_DOUBLE));
4339 OpStackSet<INT64>(m_curStackHt, c);
4343 void Interpreter::LdNull()
4351 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
4352 OpStackSet<void*>(m_curStackHt, NULL);
4356 template<typename T, CorInfoType cit>
4357 void Interpreter::LdInd()
4359 _ASSERTE(TOSIsPtr());
4360 _ASSERTE(IsStackNormalType(cit));
4361 unsigned curStackInd = m_curStackHt-1;
4362 T* ptr = OpStackGet<T*>(curStackInd);
4363 ThrowOnInvalidPointer(ptr);
4364 OpStackSet<T>(curStackInd, *ptr);
4365 OpStackTypeSet(curStackInd, InterpreterType(cit));
4366 BarrierIfVolatile();
4369 template<typename T, bool isUnsigned>
4370 void Interpreter::LdIndShort()
4372 _ASSERTE(TOSIsPtr());
4373 _ASSERTE(sizeof(T) < 4);
4374 unsigned curStackInd = m_curStackHt-1;
4375 T* ptr = OpStackGet<T*>(curStackInd);
4376 ThrowOnInvalidPointer(ptr);
4379 OpStackSet<UINT32>(curStackInd, *ptr);
4383 OpStackSet<INT32>(curStackInd, *ptr);
4385 // All short integers are normalized to INT as their stack type.
4386 OpStackTypeSet(curStackInd, InterpreterType(CORINFO_TYPE_INT));
4387 BarrierIfVolatile();
4390 template<typename T>
4391 void Interpreter::StInd()
4393 _ASSERTE(m_curStackHt >= 2);
4394 _ASSERTE(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
4395 BarrierIfVolatile();
4396 unsigned stackInd0 = m_curStackHt-2;
4397 unsigned stackInd1 = m_curStackHt-1;
4398 T val = OpStackGet<T>(stackInd1);
4399 T* ptr = OpStackGet<T*>(stackInd0);
4400 ThrowOnInvalidPointer(ptr);
4405 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
4410 #endif // INTERP_TRACING
4413 void Interpreter::StInd_Ref()
4415 _ASSERTE(m_curStackHt >= 2);
4416 _ASSERTE(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
4417 BarrierIfVolatile();
4418 unsigned stackInd0 = m_curStackHt-2;
4419 unsigned stackInd1 = m_curStackHt-1;
4420 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(stackInd1));
4421 OBJECTREF* ptr = OpStackGet<OBJECTREF*>(stackInd0);
4422 ThrowOnInvalidPointer(ptr);
4423 SetObjectReference(ptr, val);
4427 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
4432 #endif // INTERP_TRACING
4437 void Interpreter::BinaryArithOp()
4445 _ASSERTE(m_curStackHt >= 2);
4446 unsigned op1idx = m_curStackHt - 2;
4447 unsigned op2idx = m_curStackHt - 1;
4448 InterpreterType t1 = OpStackTypeGet(op1idx);
4449 _ASSERTE(IsStackNormalType(t1.ToCorInfoType()));
4450 // Looking at the generated code, it does seem to save some instructions to use the "shifted
4451 // types," though the effect on end-to-end time is variable. So I'll leave it set.
4452 InterpreterType t2 = OpStackTypeGet(op2idx);
4453 _ASSERTE(IsStackNormalType(t2.ToCorInfoType()));
4455 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
4456 switch (t1.ToCorInfoTypeShifted())
4458 case CORINFO_TYPE_SHIFTED_INT:
4462 INT32 val1 = OpStackGet<INT32>(op1idx);
4463 INT32 val2 = OpStackGet<INT32>(op2idx);
4464 BinaryArithOpWork<op, INT32, /*IsIntType*/true, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4468 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4469 if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
4471 // Int op NativeInt = NativeInt
4472 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4473 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4474 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4476 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4478 // Int op Long = Long
4479 INT64 val1 = static_cast<INT64>(OpStackGet<INT32>(op1idx));
4480 INT64 val2 = OpStackGet<INT64>(op2idx);
4481 BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/false>(val1, val2);
4483 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4485 if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
4487 // Int + ByRef = ByRef
4488 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4489 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4490 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4494 VerificationError("Operation not permitted on int and managed pointer.");
4499 VerificationError("Binary arithmetic operation type mismatch (int and ?)");
4504 case CORINFO_TYPE_SHIFTED_NATIVEINT:
4506 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4509 // NativeInt op NativeInt = NativeInt
4510 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4511 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4515 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4516 if (cits2 == CORINFO_TYPE_SHIFTED_INT)
4518 // NativeInt op Int = NativeInt
4519 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4520 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4522 // CLI spec does not allow adding a native int and an int64. So use loose rules.
4523 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4525 // NativeInt op Int = NativeInt
4526 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
4527 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4529 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4531 if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
4533 // NativeInt + ByRef = ByRef
4534 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4535 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4539 VerificationError("Operation not permitted on native int and managed pointer.");
4544 VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
4550 case CORINFO_TYPE_SHIFTED_LONG:
4552 bool looseLong = false;
4553 #if defined(HOST_AMD64)
4554 looseLong = (s_InterpreterLooseRules && (t2.ToCorInfoType() == CORINFO_TYPE_NATIVEINT ||
4555 t2.ToCorInfoType() == CORINFO_TYPE_BYREF));
4557 if (t1 == t2 || looseLong)
4559 // Long op Long = Long
4560 INT64 val1 = OpStackGet<INT64>(op1idx);
4561 INT64 val2 = OpStackGet<INT64>(op2idx);
4562 BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4566 VerificationError("Binary arithmetic operation type mismatch (long and ?)");
4571 case CORINFO_TYPE_SHIFTED_FLOAT:
4575 // Float op Float = Float
4576 float val1 = OpStackGet<float>(op1idx);
4577 float val2 = OpStackGet<float>(op2idx);
4578 BinaryArithOpWork<op, float, /*IsIntType*/false, CORINFO_TYPE_FLOAT, /*TypeIsUnchanged*/true>(val1, val2);
4582 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4583 if (cits2 == CORINFO_TYPE_SHIFTED_DOUBLE)
4585 // Float op Double = Double
4586 double val1 = static_cast<double>(OpStackGet<float>(op1idx));
4587 double val2 = OpStackGet<double>(op2idx);
4588 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/false>(val1, val2);
4592 VerificationError("Binary arithmetic operation type mismatch (float and ?)");
4598 case CORINFO_TYPE_SHIFTED_DOUBLE:
4602 // Double op Double = Double
4603 double val1 = OpStackGet<double>(op1idx);
4604 double val2 = OpStackGet<double>(op2idx);
4605 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
4609 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4610 if (cits2 == CORINFO_TYPE_SHIFTED_FLOAT)
4612 // Double op Float = Double
4613 double val1 = OpStackGet<double>(op1idx);
4614 double val2 = static_cast<double>(OpStackGet<float>(op2idx));
4615 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
4619 VerificationError("Binary arithmetic operation type mismatch (double and ?)");
4625 case CORINFO_TYPE_SHIFTED_BYREF:
4627 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4628 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4629 if (cits2 == CORINFO_TYPE_SHIFTED_INT)
4631 if (op == BA_Add || op == BA_Sub)
4633 // ByRef +- Int = ByRef
4634 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4635 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4639 VerificationError("May only add/subtract managed pointer and integral value.");
4642 else if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
4644 if (op == BA_Add || op == BA_Sub)
4646 // ByRef +- NativeInt = ByRef
4647 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4648 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4652 VerificationError("May only add/subtract managed pointer and integral value.");
4655 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4659 // ByRef - ByRef = NativeInt
4660 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4661 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4665 VerificationError("May only subtract managed pointer values.");
4668 // CLI spec does not allow adding a native int and an int64. So use loose rules.
4669 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4671 // NativeInt op Int = NativeInt
4672 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
4673 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4677 VerificationError("Binary arithmetic operation not permitted on byref");
4682 case CORINFO_TYPE_SHIFTED_CLASS:
4683 VerificationError("Can't do binary arithmetic on object references.");
4687 _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
4694 template<int op, bool asUnsigned>
4695 void Interpreter::BinaryArithOvfOp()
4703 _ASSERTE(m_curStackHt >= 2);
4704 unsigned op1idx = m_curStackHt - 2;
4705 unsigned op2idx = m_curStackHt - 1;
4707 InterpreterType t1 = OpStackTypeGet(op1idx);
4708 CorInfoType cit1 = t1.ToCorInfoType();
4709 _ASSERTE(IsStackNormalType(cit1));
4711 InterpreterType t2 = OpStackTypeGet(op2idx);
4712 CorInfoType cit2 = t2.ToCorInfoType();
4713 _ASSERTE(IsStackNormalType(cit2));
4715 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
4718 case CORINFO_TYPE_INT:
4719 if (cit2 == CORINFO_TYPE_INT)
4723 // UnsignedInt op UnsignedInt = UnsignedInt
4724 UINT32 val1 = OpStackGet<UINT32>(op1idx);
4725 UINT32 val2 = OpStackGet<UINT32>(op2idx);
4726 BinaryArithOvfOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4731 INT32 val1 = OpStackGet<INT32>(op1idx);
4732 INT32 val2 = OpStackGet<INT32>(op2idx);
4733 BinaryArithOvfOpWork<op, INT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4736 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4740 // UnsignedInt op UnsignedNativeInt = UnsignedNativeInt
4741 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
4742 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4743 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4747 // Int op NativeInt = NativeInt
4748 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4749 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4750 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4753 else if (cit2 == CORINFO_TYPE_BYREF)
4755 if (asUnsigned && op == BA_Add)
4757 // UnsignedInt + ByRef = ByRef
4758 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
4759 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4760 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4764 VerificationError("Illegal arithmetic overflow operation for int and byref.");
4769 VerificationError("Binary arithmetic overflow operation type mismatch (int and ?)");
4773 case CORINFO_TYPE_NATIVEINT:
4774 if (cit2 == CORINFO_TYPE_INT)
4778 // UnsignedNativeInt op UnsignedInt = UnsignedNativeInt
4779 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4780 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<UINT32>(op2idx));
4781 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4785 // NativeInt op Int = NativeInt
4786 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4787 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4788 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4791 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4795 // UnsignedNativeInt op UnsignedNativeInt = UnsignedNativeInt
4796 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4797 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4798 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4802 // NativeInt op NativeInt = NativeInt
4803 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4804 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4805 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4808 else if (cit2 == CORINFO_TYPE_BYREF)
4810 if (asUnsigned && op == BA_Add)
4812 // UnsignedNativeInt op ByRef = ByRef
4813 NativeUInt val1 = OpStackGet<UINT32>(op1idx);
4814 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4815 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4819 VerificationError("Illegal arithmetic overflow operation for native int and byref.");
4824 VerificationError("Binary arithmetic overflow operation type mismatch (native int and ?)");
4828 case CORINFO_TYPE_LONG:
4829 if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
4833 // UnsignedLong op UnsignedLong = UnsignedLong
4834 UINT64 val1 = OpStackGet<UINT64>(op1idx);
4835 UINT64 val2 = OpStackGet<UINT64>(op2idx);
4836 BinaryArithOvfOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4840 // Long op Long = Long
4841 INT64 val1 = OpStackGet<INT64>(op1idx);
4842 INT64 val2 = OpStackGet<INT64>(op2idx);
4843 BinaryArithOvfOpWork<op, INT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4848 VerificationError("Binary arithmetic overflow operation type mismatch (long and ?)");
4852 case CORINFO_TYPE_BYREF:
4853 if (asUnsigned && (op == BA_Add || op == BA_Sub))
4855 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4856 if (cit2 == CORINFO_TYPE_INT)
4858 // ByRef +- UnsignedInt = ByRef
4859 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
4860 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4862 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4864 // ByRef +- UnsignedNativeInt = ByRef
4865 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4866 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4868 else if (cit2 == CORINFO_TYPE_BYREF)
4872 // ByRef - ByRef = UnsignedNativeInt
4873 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4874 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4878 VerificationError("Illegal arithmetic overflow operation for byref and byref: may only subtract managed pointer values.");
4883 VerificationError("Binary arithmetic overflow operation not permitted on byref");
4890 VerificationError("Signed binary arithmetic overflow operation not permitted on managed pointer values.");
4894 _ASSERTE_MSG(op == BA_Mul, "Must be an overflow operation; tested for Add || Sub above.");
4895 VerificationError("Cannot multiply managed pointer values.");
4901 _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
4908 template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
4909 void Interpreter::BinaryArithOvfOpWork(T val1, T val2)
4918 ClrSafeInt<T> safeV1(val1);
4919 ClrSafeInt<T> safeV2(val2);
4922 res = safeV1 + safeV2;
4924 else if (op == BA_Sub)
4926 res = safeV1 - safeV2;
4928 else if (op == BA_Mul)
4930 res = safeV1 * safeV2;
4934 _ASSERTE_MSG(false, "op should be one of the overflow ops...");
4937 if (res.IsOverflow())
4939 ThrowOverflowException();
4942 unsigned residx = m_curStackHt - 2;
4943 OpStackSet<T>(residx, res.Value());
4944 if (!TypeIsUnchanged)
4946 OpStackTypeSet(residx, InterpreterType(cit));
4951 void Interpreter::BinaryIntOp()
4959 _ASSERTE(m_curStackHt >= 2);
4960 unsigned op1idx = m_curStackHt - 2;
4961 unsigned op2idx = m_curStackHt - 1;
4963 InterpreterType t1 = OpStackTypeGet(op1idx);
4964 CorInfoType cit1 = t1.ToCorInfoType();
4965 _ASSERTE(IsStackNormalType(cit1));
4967 InterpreterType t2 = OpStackTypeGet(op2idx);
4968 CorInfoType cit2 = t2.ToCorInfoType();
4969 _ASSERTE(IsStackNormalType(cit2));
4971 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
4974 case CORINFO_TYPE_INT:
4975 if (cit2 == CORINFO_TYPE_INT)
4978 UINT32 val1 = OpStackGet<UINT32>(op1idx);
4979 UINT32 val2 = OpStackGet<UINT32>(op2idx);
4980 BinaryIntOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4982 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4984 // Int op NativeInt = NativeInt
4985 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
4986 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4987 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4989 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
4991 // Int op NativeUInt = NativeUInt
4992 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
4993 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4994 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4998 VerificationError("Binary arithmetic operation type mismatch (int and ?)");
5002 case CORINFO_TYPE_NATIVEINT:
5003 if (cit2 == CORINFO_TYPE_NATIVEINT)
5005 // NativeInt op NativeInt = NativeInt
5006 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5007 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
5008 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5010 else if (cit2 == CORINFO_TYPE_INT)
5012 // NativeInt op Int = NativeInt
5013 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5014 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
5015 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5017 // CLI spec does not allow adding a native int and an int64. So use loose rules.
5018 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
5020 // NativeInt op Int = NativeInt
5021 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5022 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT64>(op2idx));
5023 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5027 VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
5031 case CORINFO_TYPE_LONG:
5032 if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
5034 // Long op Long = Long
5035 UINT64 val1 = OpStackGet<UINT64>(op1idx);
5036 UINT64 val2 = OpStackGet<UINT64>(op2idx);
5037 BinaryIntOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
5041 VerificationError("Binary arithmetic operation type mismatch (long and ?)");
5046 VerificationError("Illegal operation for non-integral data type.");
5053 template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
5054 void Interpreter::BinaryIntOpWork(T val1, T val2)
5061 else if (op == BIO_Or)
5065 else if (op == BIO_Xor)
5071 _ASSERTE(op == BIO_DivUn || op == BIO_RemUn);
5074 ThrowDivideByZero();
5076 else if (val2 == static_cast<T>(-1) && val1 == static_cast<T>(((UINT64)1) << (sizeof(T)*8 - 1))) // min int / -1 is not representable.
5078 ThrowSysArithException();
5081 if (op == BIO_DivUn)
5091 unsigned residx = m_curStackHt - 2;
5092 OpStackSet<T>(residx, res);
5093 if (!TypeIsUnchanged)
5095 OpStackTypeSet(residx, InterpreterType(cit));
5100 void Interpreter::ShiftOp()
5108 _ASSERTE(m_curStackHt >= 2);
5109 unsigned op1idx = m_curStackHt - 2;
5110 unsigned op2idx = m_curStackHt - 1;
5112 InterpreterType t1 = OpStackTypeGet(op1idx);
5113 CorInfoType cit1 = t1.ToCorInfoType();
5114 _ASSERTE(IsStackNormalType(cit1));
5116 InterpreterType t2 = OpStackTypeGet(op2idx);
5117 CorInfoType cit2 = t2.ToCorInfoType();
5118 _ASSERTE(IsStackNormalType(cit2));
5120 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
5123 case CORINFO_TYPE_INT:
5124 ShiftOpWork<op, INT32, UINT32>(op1idx, cit2);
5127 case CORINFO_TYPE_NATIVEINT:
5128 ShiftOpWork<op, NativeInt, NativeUInt>(op1idx, cit2);
5131 case CORINFO_TYPE_LONG:
5132 ShiftOpWork<op, INT64, UINT64>(op1idx, cit2);
5136 VerificationError("Illegal value type for shift operation.");
5143 template<int op, typename T, typename UT>
5144 void Interpreter::ShiftOpWork(unsigned op1idx, CorInfoType cit2)
5146 T val = OpStackGet<T>(op1idx);
5147 unsigned op2idx = op1idx + 1;
5150 if (cit2 == CORINFO_TYPE_INT)
5152 INT32 shiftAmt = OpStackGet<INT32>(op2idx);
5155 res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
5157 else if (op == CEE_SHR)
5159 res = val >> shiftAmt;
5163 _ASSERTE(op == CEE_SHR_UN);
5164 res = (static_cast<UT>(val)) >> shiftAmt;
5167 else if (cit2 == CORINFO_TYPE_NATIVEINT)
5169 NativeInt shiftAmt = OpStackGet<NativeInt>(op2idx);
5172 res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
5174 else if (op == CEE_SHR)
5176 res = val >> shiftAmt;
5180 _ASSERTE(op == CEE_SHR_UN);
5181 res = (static_cast<UT>(val)) >> shiftAmt;
5186 VerificationError("Operand type mismatch for shift operator.");
5188 OpStackSet<T>(op1idx, res);
5192 void Interpreter::Neg()
5200 _ASSERTE(m_curStackHt >= 1);
5201 unsigned opidx = m_curStackHt - 1;
5203 InterpreterType t1 = OpStackTypeGet(opidx);
5204 CorInfoType cit1 = t1.ToCorInfoType();
5205 _ASSERTE(IsStackNormalType(cit1));
5209 case CORINFO_TYPE_INT:
5210 OpStackSet<INT32>(opidx, -OpStackGet<INT32>(opidx));
5213 case CORINFO_TYPE_NATIVEINT:
5214 OpStackSet<NativeInt>(opidx, -OpStackGet<NativeInt>(opidx));
5217 case CORINFO_TYPE_LONG:
5218 OpStackSet<INT64>(opidx, -OpStackGet<INT64>(opidx));
5221 case CORINFO_TYPE_FLOAT:
5222 OpStackSet<float>(opidx, -OpStackGet<float>(opidx));
5225 case CORINFO_TYPE_DOUBLE:
5226 OpStackSet<double>(opidx, -OpStackGet<double>(opidx));
5230 VerificationError("Illegal operand type for Neg operation.");
5234 void Interpreter::Not()
5242 _ASSERTE(m_curStackHt >= 1);
5243 unsigned opidx = m_curStackHt - 1;
5245 InterpreterType t1 = OpStackTypeGet(opidx);
5246 CorInfoType cit1 = t1.ToCorInfoType();
5247 _ASSERTE(IsStackNormalType(cit1));
5251 case CORINFO_TYPE_INT:
5252 OpStackSet<INT32>(opidx, ~OpStackGet<INT32>(opidx));
5255 case CORINFO_TYPE_NATIVEINT:
5256 OpStackSet<NativeInt>(opidx, ~OpStackGet<NativeInt>(opidx));
5259 case CORINFO_TYPE_LONG:
5260 OpStackSet<INT64>(opidx, ~OpStackGet<INT64>(opidx));
5264 VerificationError("Illegal operand type for Not operation.");
5268 template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
5269 void Interpreter::Conv()
5277 _ASSERTE(m_curStackHt >= 1);
5278 unsigned opidx = m_curStackHt - 1;
5280 InterpreterType t1 = OpStackTypeGet(opidx);
5281 CorInfoType cit1 = t1.ToCorInfoType();
5282 _ASSERTE(IsStackNormalType(cit1));
5287 case CORINFO_TYPE_INT:
5290 // Must convert the 32 bit value to unsigned first, so that we zero-extend if necessary.
5291 val = static_cast<T>(static_cast<UINT32>(OpStackGet<INT32>(opidx)));
5295 val = static_cast<T>(OpStackGet<INT32>(opidx));
5299 case CORINFO_TYPE_NATIVEINT:
5302 // NativeInt might be 32 bits, so convert to unsigned before possibly widening.
5303 val = static_cast<T>(static_cast<NativeUInt>(OpStackGet<NativeInt>(opidx)));
5307 val = static_cast<T>(OpStackGet<NativeInt>(opidx));
5311 case CORINFO_TYPE_LONG:
5312 val = static_cast<T>(OpStackGet<INT64>(opidx));
5315 // TODO: Make sure that the C++ conversions do the right thing (truncate to zero...)
5316 case CORINFO_TYPE_FLOAT:
5317 val = static_cast<T>(OpStackGet<float>(opidx));
5320 case CORINFO_TYPE_DOUBLE:
5321 val = static_cast<T>(OpStackGet<double>(opidx));
5324 case CORINFO_TYPE_BYREF:
5325 case CORINFO_TYPE_CLASS:
5326 case CORINFO_TYPE_STRING:
5327 if (!TCanHoldPtr && !s_InterpreterLooseRules)
5329 VerificationError("Conversion of pointer value to type that can't hold its value.");
5333 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5334 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...)
5335 val = static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx)));
5339 VerificationError("Illegal operand type for conv.* operation.");
5345 OpStackSet<INT32>(opidx, static_cast<INT32>(val));
5349 OpStackSet<T>(opidx, val);
5352 OpStackTypeSet(opidx, InterpreterType(cit));
5356 void Interpreter::ConvRUn()
5364 _ASSERTE(m_curStackHt >= 1);
5365 unsigned opidx = m_curStackHt - 1;
5367 InterpreterType t1 = OpStackTypeGet(opidx);
5368 CorInfoType cit1 = t1.ToCorInfoType();
5369 _ASSERTE(IsStackNormalType(cit1));
5373 case CORINFO_TYPE_INT:
5374 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT32>(opidx)));
5377 case CORINFO_TYPE_NATIVEINT:
5378 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<NativeUInt>(opidx)));
5381 case CORINFO_TYPE_LONG:
5382 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT64>(opidx)));
5385 case CORINFO_TYPE_DOUBLE:
5389 VerificationError("Illegal operand type for conv.r.un operation.");
5392 OpStackTypeSet(opidx, InterpreterType(CORINFO_TYPE_DOUBLE));
5395 template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
5396 void Interpreter::ConvOvf()
5404 _ASSERTE(m_curStackHt >= 1);
5405 unsigned opidx = m_curStackHt - 1;
5407 InterpreterType t1 = OpStackTypeGet(opidx);
5408 CorInfoType cit1 = t1.ToCorInfoType();
5409 _ASSERTE(IsStackNormalType(cit1));
5413 case CORINFO_TYPE_INT:
5415 INT32 i4 = OpStackGet<INT32>(opidx);
5418 ThrowOverflowException();
5420 OpStackSet<T>(opidx, static_cast<T>(i4));
5424 case CORINFO_TYPE_NATIVEINT:
5426 NativeInt i = OpStackGet<NativeInt>(opidx);
5429 ThrowOverflowException();
5431 OpStackSet<T>(opidx, static_cast<T>(i));
5435 case CORINFO_TYPE_LONG:
5437 INT64 i8 = OpStackGet<INT64>(opidx);
5440 ThrowOverflowException();
5442 OpStackSet<T>(opidx, static_cast<T>(i8));
5446 // Make sure that the C++ conversions do the right thing (truncate to zero...)
5447 case CORINFO_TYPE_FLOAT:
5449 float f = OpStackGet<float>(opidx);
5450 if (!FloatFitsInIntType<TMin, TMax>(f))
5452 ThrowOverflowException();
5454 OpStackSet<T>(opidx, static_cast<T>(f));
5458 case CORINFO_TYPE_DOUBLE:
5460 double d = OpStackGet<double>(opidx);
5461 if (!DoubleFitsInIntType<TMin, TMax>(d))
5463 ThrowOverflowException();
5465 OpStackSet<T>(opidx, static_cast<T>(d));
5469 case CORINFO_TYPE_BYREF:
5470 case CORINFO_TYPE_CLASS:
5471 case CORINFO_TYPE_STRING:
5474 VerificationError("Conversion of pointer value to type that can't hold its value.");
5478 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5479 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
5480 OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
5484 VerificationError("Illegal operand type for conv.ovf.* operation.");
5487 _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
5488 OpStackTypeSet(opidx, InterpreterType(cit));
5491 template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
5492 void Interpreter::ConvOvfUn()
5500 _ASSERTE(m_curStackHt >= 1);
5501 unsigned opidx = m_curStackHt - 1;
5503 InterpreterType t1 = OpStackTypeGet(opidx);
5504 CorInfoType cit1 = t1.ToCorInfoType();
5505 _ASSERTE(IsStackNormalType(cit1));
5509 case CORINFO_TYPE_INT:
5511 UINT32 ui4 = OpStackGet<UINT32>(opidx);
5512 if (!FitsIn<T>(ui4))
5514 ThrowOverflowException();
5516 OpStackSet<T>(opidx, static_cast<T>(ui4));
5520 case CORINFO_TYPE_NATIVEINT:
5522 NativeUInt ui = OpStackGet<NativeUInt>(opidx);
5525 ThrowOverflowException();
5527 OpStackSet<T>(opidx, static_cast<T>(ui));
5531 case CORINFO_TYPE_LONG:
5533 UINT64 ui8 = OpStackGet<UINT64>(opidx);
5534 if (!FitsIn<T>(ui8))
5536 ThrowOverflowException();
5538 OpStackSet<T>(opidx, static_cast<T>(ui8));
5542 // Make sure that the C++ conversions do the right thing (truncate to zero...)
5543 case CORINFO_TYPE_FLOAT:
5545 float f = OpStackGet<float>(opidx);
5546 if (!FloatFitsInIntType<TMin, TMax>(f))
5548 ThrowOverflowException();
5550 OpStackSet<T>(opidx, static_cast<T>(f));
5554 case CORINFO_TYPE_DOUBLE:
5556 double d = OpStackGet<double>(opidx);
5557 if (!DoubleFitsInIntType<TMin, TMax>(d))
5559 ThrowOverflowException();
5561 OpStackSet<T>(opidx, static_cast<T>(d));
5565 case CORINFO_TYPE_BYREF:
5566 case CORINFO_TYPE_CLASS:
5567 case CORINFO_TYPE_STRING:
5570 VerificationError("Conversion of pointer value to type that can't hold its value.");
5574 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5575 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
5576 OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
5580 VerificationError("Illegal operand type for conv.ovf.*.un operation.");
5583 _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
5584 OpStackTypeSet(opidx, InterpreterType(cit));
5587 void Interpreter::LdObj()
5595 BarrierIfVolatile();
5597 _ASSERTE(m_curStackHt > 0);
5598 unsigned ind = m_curStackHt - 1;
5601 CorInfoType cit = OpStackTypeGet(ind).ToCorInfoType();
5602 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
5606 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdObj]);
5607 #endif // INTERP_TRACING
5609 // TODO: GetTypeFromToken also uses GCX_PREEMP(); can we merge it with the getClassAttribs() block below, and do it just once?
5610 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdObj));
5614 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5617 void* src = OpStackGet<void*>(ind);
5618 ThrowOnInvalidPointer(src);
5620 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5622 LdObjValueClassWork(clsHnd, ind, src);
5626 OpStackSet<void*>(ind, *reinterpret_cast<void**>(src));
5627 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
5632 void Interpreter::LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src)
5640 // "src" is a byref, which may be into an object. GCPROTECT for the call below.
5641 GCPROTECT_BEGININTERIOR(src);
5643 InterpreterType it = InterpreterType(&m_interpCeeInfo, valueClsHnd);
5644 size_t sz = it.Size(&m_interpCeeInfo);
5645 // Note that the memcpy's below are permissible because the destination is in the operand stack.
5646 if (sz > sizeof(INT64))
5648 void* dest = LargeStructOperandStackPush(sz);
5649 memcpy(dest, src, sz);
5650 OpStackSet<void*>(ind, dest);
5654 OpStackSet<INT64>(ind, GetSmallStructValue(src, sz));
5657 OpStackTypeSet(ind, it.StackNormalize());
5662 CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk))
5672 CORINFO_RESOLVED_TOKEN typeTok;
5673 ResolveToken(&typeTok, getU4LittleEndian(codePtr), tokKind InterpTracingArg(rtk));
5674 return typeTok.hClass;
5677 bool Interpreter::IsValidPointerType(CorInfoType cit)
5679 bool isValid = (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_BYREF);
5680 #if defined(HOST_AMD64)
5681 isValid = isValid || (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG);
5686 void Interpreter::CpObj()
5694 _ASSERTE(m_curStackHt >= 2);
5695 unsigned destInd = m_curStackHt - 2;
5696 unsigned srcInd = m_curStackHt - 1;
5699 // Check that src and dest are both pointer types.
5700 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5701 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of cpobj");
5703 cit = OpStackTypeGet(srcInd).ToCorInfoType();
5704 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for src of cpobj");
5708 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CpObj]);
5709 #endif // INTERP_TRACING
5711 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_CpObj));
5715 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5718 void* dest = OpStackGet<void*>(destInd);
5719 void* src = OpStackGet<void*>(srcInd);
5721 ThrowOnInvalidPointer(dest);
5722 ThrowOnInvalidPointer(src);
5724 // dest and src are vulnerable byrefs.
5727 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5729 CopyValueClassUnchecked(dest, src, GetMethodTableFromClsHnd(clsHnd));
5733 OBJECTREF val = *reinterpret_cast<OBJECTREF*>(src);
5734 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), val);
5740 void Interpreter::StObj()
5748 _ASSERTE(m_curStackHt >= 2);
5749 unsigned destInd = m_curStackHt - 2;
5750 unsigned valInd = m_curStackHt - 1;
5753 // Check that dest is a pointer type.
5754 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5755 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of stobj");
5759 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StObj]);
5760 #endif // INTERP_TRACING
5762 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StObj));
5766 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5769 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5771 MethodTable* clsMT = GetMethodTableFromClsHnd(clsHnd);
5775 sz = getClassSize(clsHnd);
5778 // Note that "dest" might be a pointer into the heap. It is therefore important
5779 // to calculate it *after* any PREEMP transitions at which we might do a GC.
5780 void* dest = OpStackGet<void*>(destInd);
5781 ThrowOnInvalidPointer(dest);
5784 // Try and validate types
5785 InterpreterType vit = OpStackTypeGet(valInd);
5786 CorInfoType vitc = vit.ToCorInfoType();
5788 if (vitc == CORINFO_TYPE_VALUECLASS)
5790 CORINFO_CLASS_HANDLE vClsHnd = vit.ToClassHandle();
5791 const bool isClass = (vClsHnd == clsHnd);
5792 const bool isPrim = (vitc == CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(clsHnd)));
5793 bool isShared = false;
5795 // If operand type is shared we need a more complex check;
5796 // the IL type may not be shared
5797 if (!isPrim && !isClass)
5802 vClsAttribs = m_interpCeeInfo.getClassAttribs(vClsHnd);
5805 if ((vClsAttribs & CORINFO_FLG_SHAREDINST) != 0)
5807 MethodTable* clsMT2 = clsMT->GetCanonicalMethodTable();
5808 if (((CORINFO_CLASS_HANDLE) clsMT2) == vClsHnd)
5815 _ASSERTE(isClass || isPrim || isShared);
5819 const bool isSz = s_InterpreterLooseRules && sz <= sizeof(dest);
5827 if (sz > sizeof(INT64))
5829 // Large struct case -- ostack entry is pointer.
5830 void* src = OpStackGet<void*>(valInd);
5831 CopyValueClassUnchecked(dest, src, clsMT);
5832 LargeStructOperandStackPop(sz, src);
5836 // The ostack entry contains the struct value.
5837 CopyValueClassUnchecked(dest, OpStackGetAddr(valInd, sz), clsMT);
5842 // The ostack entry is an object reference.
5843 _ASSERTE(OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
5845 // Note that "dest" might be a pointer into the heap. It is therefore important
5846 // to calculate it *after* any PREEMP transitions at which we might do a GC. (Thus,
5847 // we have to duplicate this code with the case above.
5848 void* dest = OpStackGet<void*>(destInd);
5849 ThrowOnInvalidPointer(dest);
5853 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
5854 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), val);
5860 BarrierIfVolatile();
5863 void Interpreter::InitObj()
5871 _ASSERTE(m_curStackHt >= 1);
5872 unsigned destInd = m_curStackHt - 1;
5874 // Check that src and dest are both pointer types.
5875 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5876 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
5880 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_InitObj]);
5881 #endif // INTERP_TRACING
5883 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_InitObj));
5884 size_t valueClassSz = 0;
5889 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5890 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5892 valueClassSz = getClassSize(clsHnd);
5896 void* dest = OpStackGet<void*>(destInd);
5897 ThrowOnInvalidPointer(dest);
5899 // dest is a vulnerable byref.
5902 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5904 memset(dest, 0, valueClassSz);
5908 // The ostack entry is an object reference.
5909 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), NULL);
5915 void Interpreter::LdStr()
5923 OBJECTHANDLE res = ConstructStringLiteral(m_methInfo->m_module, getU4LittleEndian(m_ILCodePtr + 1));
5926 OpStackSet<Object*>(m_curStackHt, *reinterpret_cast<Object**>(res));
5927 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS)); // Stack-normal type for "string"
5933 void Interpreter::NewObj()
5935 #if INTERP_DYNAMIC_CONTRACTS
5942 // Dynamic contract occupies too much stack.
5943 STATIC_CONTRACT_THROWS;
5944 STATIC_CONTRACT_GC_TRIGGERS;
5945 STATIC_CONTRACT_MODE_COOPERATIVE;
5948 unsigned ctorTok = getU4LittleEndian(m_ILCodePtr + 1);
5951 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewObj]);
5952 #endif // INTERP_TRACING
5954 CORINFO_CALL_INFO callInfo;
5955 CORINFO_RESOLVED_TOKEN methTok;
5959 ResolveToken(&methTok, ctorTok, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_NewObj));
5960 m_interpCeeInfo.getCallInfo(&methTok, NULL,
5961 m_methInfo->m_method,
5962 CORINFO_CALLINFO_FLAGS(0),
5966 unsigned mflags = callInfo.methodFlags;
5968 if ((mflags & (CORINFO_FLG_STATIC|CORINFO_FLG_ABSTRACT)) != 0)
5970 VerificationError("newobj on static or abstract method");
5973 unsigned clsFlags = callInfo.classFlags;
5976 // What class are we allocating?
5977 const char* clsName;
5981 clsName = m_interpCeeInfo.getClassNameFromMetadata(methTok.hClass, NULL);
5985 // There are four cases:
5986 // 1) Value types (ordinary constructor, resulting VALUECLASS pushed)
5987 // 2) String (var-args constructor, result automatically pushed)
5988 // 3) MDArray (var-args constructor, resulting OBJECTREF pushed)
5989 // 4) Reference types (ordinary constructor, resulting OBJECTREF pushed)
5990 if (clsFlags & CORINFO_FLG_VALUECLASS)
5993 INT64 smallTempDest = 0;
5997 sz = getClassSize(methTok.hClass);
5999 if (sz > sizeof(INT64))
6001 // TODO: Make sure this is deleted in the face of exceptions.
6002 tempDest = new BYTE[sz];
6006 tempDest = &smallTempDest;
6008 memset(tempDest, 0, sz);
6009 InterpreterType structValRetIT(&m_interpCeeInfo, methTok.hClass);
6010 m_structRetValITPtr = &structValRetIT;
6011 m_structRetValTempSpace = tempDest;
6013 DoCallWork(/*virtCall*/false, tempDest, &methTok, &callInfo);
6015 if (sz > sizeof(INT64))
6017 void* dest = LargeStructOperandStackPush(sz);
6018 memcpy(dest, tempDest, sz);
6019 delete[] reinterpret_cast<BYTE*>(tempDest);
6020 OpStackSet<void*>(m_curStackHt, dest);
6024 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(tempDest, sz));
6026 if (m_structRetValITPtr->IsStruct())
6028 OpStackTypeSet(m_curStackHt, *m_structRetValITPtr);
6032 // Must stack-normalize primitive types.
6033 OpStackTypeSet(m_curStackHt, m_structRetValITPtr->StackNormalize());
6035 // "Unregister" the temp space for GC scanning...
6036 m_structRetValITPtr = NULL;
6039 else if ((clsFlags & CORINFO_FLG_VAROBJSIZE) && !(clsFlags & CORINFO_FLG_ARRAY))
6041 // For a VAROBJSIZE class (currently == String), pass NULL as this to "pseudo-constructor."
6042 void* specialFlagArg = reinterpret_cast<void*>(0x1); // Special value for "thisArg" argument of "DoCallWork": push NULL that's not on op stack.
6043 DoCallWork(/*virtCall*/false, specialFlagArg, &methTok, &callInfo); // pushes result automatically
6047 OBJECTREF thisArgObj = NULL;
6048 GCPROTECT_BEGIN(thisArgObj);
6050 if (clsFlags & CORINFO_FLG_ARRAY)
6052 _ASSERTE(clsFlags & CORINFO_FLG_VAROBJSIZE);
6054 MethodDesc* methDesc = GetMethod(methTok.hMethod);
6056 PCCOR_SIGNATURE pSig;
6058 methDesc->GetSig(&pSig, &cbSigSize);
6059 MetaSig msig(pSig, cbSigSize, methDesc->GetModule(), NULL);
6061 unsigned dwNumArgs = msig.NumFixedArgs();
6062 _ASSERTE(m_curStackHt >= dwNumArgs);
6063 m_curStackHt -= dwNumArgs;
6065 INT32* args = (INT32*)_alloca(dwNumArgs * sizeof(INT32));
6068 for (dwArg = 0; dwArg < dwNumArgs; dwArg++)
6070 unsigned stkInd = m_curStackHt + dwArg;
6071 bool loose = s_InterpreterLooseRules && (OpStackTypeGet(stkInd).ToCorInfoType() == CORINFO_TYPE_NATIVEINT);
6072 if (OpStackTypeGet(stkInd).ToCorInfoType() != CORINFO_TYPE_INT && !loose)
6074 VerificationError("MD array dimension bounds and sizes must be int.");
6076 args[dwArg] = loose ? (INT32) OpStackGet<NativeInt>(stkInd) : OpStackGet<INT32>(stkInd);
6079 thisArgObj = AllocateArrayEx(TypeHandle(methTok.hClass), args, dwNumArgs);
6083 CorInfoHelpFunc newHelper;
6087 newHelper = m_interpCeeInfo.getNewHelper(methTok.hClass, &sideEffect);
6090 MethodTable * pNewObjMT = GetMethodTableFromClsHnd(methTok.hClass);
6093 case CORINFO_HELP_NEWFAST:
6095 thisArgObj = AllocateObject(pNewObjMT);
6099 DoCallWork(/*virtCall*/false, OBJECTREFToObject(thisArgObj), &methTok, &callInfo);
6104 OpStackSet<Object*>(m_curStackHt, OBJECTREFToObject(thisArgObj));
6105 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
6108 GCPROTECT_END(); // For "thisArgObj"
6114 void Interpreter::NewArr()
6122 _ASSERTE(m_curStackHt > 0);
6123 unsigned stkInd = m_curStackHt-1;
6124 CorInfoType cit = OpStackTypeGet(stkInd).ToCorInfoType();
6128 case CORINFO_TYPE_INT:
6129 sz = static_cast<NativeInt>(OpStackGet<INT32>(stkInd));
6131 case CORINFO_TYPE_NATIVEINT:
6132 sz = OpStackGet<NativeInt>(stkInd);
6135 VerificationError("Size operand of 'newarr' must be int or native int.");
6138 unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
6140 CORINFO_CLASS_HANDLE elemClsHnd;
6143 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewArr]);
6144 #endif // INTERP_TRACING
6146 CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
6150 ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Newarr InterpTracingArg(RTK_NewArr));
6151 elemClsHnd = elemTypeResolvedTok.hClass;
6157 COMPlusThrow(kOverflowException);
6161 // Even though ECMA allows using a native int as the argument to newarr instruction
6162 // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
6163 // platforms we can't create an array whose size exceeds 32 bits.
6166 EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
6170 TypeHandle th(elemClsHnd);
6171 MethodTable* pArrayMT = th.GetMethodTable();
6172 pArrayMT->CheckRunClassInitThrowing();
6174 INT32 size32 = (INT32)sz;
6175 Object* newarray = OBJECTREFToObject(AllocateSzArray(pArrayMT, size32));
6178 OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
6179 OpStackSet<Object*>(stkInd, newarray);
6185 void Interpreter::IsInst()
6194 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_IsInst]);
6195 #endif // INTERP_TRACING
6197 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_IsInst));
6199 _ASSERTE(m_curStackHt >= 1);
6200 unsigned idx = m_curStackHt - 1;
6202 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6203 _ASSERTE(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
6206 Object * pObj = OpStackGet<Object*>(idx);
6209 if (!ObjIsInstanceOf(pObj, TypeHandle(cls)))
6210 OpStackSet<Object*>(idx, NULL);
6213 // Type stack stays unmodified.
6218 void Interpreter::CastClass()
6227 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CastClass]);
6228 #endif // INTERP_TRACING
6230 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_CastClass));
6232 _ASSERTE(m_curStackHt >= 1);
6233 unsigned idx = m_curStackHt - 1;
6235 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6236 _ASSERTE(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
6239 Object * pObj = OpStackGet<Object*>(idx);
6242 if (!ObjIsInstanceOf(pObj, TypeHandle(cls), TRUE))
6244 UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
6249 // Type stack stays unmodified.
6254 void Interpreter::LocAlloc()
6262 _ASSERTE(m_curStackHt >= 1);
6263 unsigned idx = m_curStackHt - 1;
6264 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6266 if (cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT)
6268 sz = static_cast<NativeUInt>(OpStackGet<UINT32>(idx));
6270 else if (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_NATIVEUINT)
6272 sz = OpStackGet<NativeUInt>(idx);
6274 else if (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG)
6276 sz = (NativeUInt) OpStackGet<INT64>(idx);
6280 VerificationError("localloc requires int or nativeint argument.");
6284 OpStackSet<void*>(idx, NULL);
6288 void* res = GetLocAllocData()->Alloc(sz);
6289 if (res == NULL) ThrowStackOverflow();
6290 OpStackSet<void*>(idx, res);
6292 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_NATIVEINT));
6295 void Interpreter::MkRefany()
6304 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_MkRefAny]);
6305 #endif // INTERP_TRACING
6307 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_MkRefAny));
6308 _ASSERTE(m_curStackHt >= 1);
6309 unsigned idx = m_curStackHt - 1;
6311 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6312 if (!(cit == CORINFO_TYPE_BYREF || cit == CORINFO_TYPE_NATIVEINT))
6313 VerificationError("MkRefany requires byref or native int (pointer) on the stack.");
6315 void* ptr = OpStackGet<void*>(idx);
6317 InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
6319 #if defined(HOST_AMD64)
6320 _ASSERTE(typedRefIT.IsLargeStruct(&m_interpCeeInfo));
6321 tbr = (TypedByRef*) LargeStructOperandStackPush(GetTypedRefSize(&m_interpCeeInfo));
6322 OpStackSet<void*>(idx, tbr);
6323 #elif defined(HOST_X86) || defined(HOST_ARM)
6324 _ASSERTE(!typedRefIT.IsLargeStruct(&m_interpCeeInfo));
6325 tbr = OpStackGetAddr<TypedByRef>(idx);
6326 #elif defined(HOST_ARM64)
6328 NYI_INTERP("Unimplemented code: MkRefAny");
6329 #elif defined(HOST_LOONGARCH64)
6331 NYI_INTERP("Unimplemented code: MkRefAny on LOONGARCH");
6332 #elif defined(HOST_RISCV64)
6334 NYI_INTERP("Unimplemented code: MkRefAny on RISCV64");
6336 #error "unsupported platform"
6339 tbr->type = TypeHandle(cls);
6340 OpStackTypeSet(idx, typedRefIT);
6345 void Interpreter::RefanyType()
6353 _ASSERTE(m_curStackHt > 0);
6354 unsigned idx = m_curStackHt - 1;
6356 if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
6357 VerificationError("RefAnyVal requires a TypedRef on the stack.");
6359 TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
6360 LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
6362 TypeHandle* pth = &ptbr->type;
6365 OBJECTREF classobj = TypeHandleToTypeRef(pth);
6367 OpStackSet<Object*>(idx, OBJECTREFToObject(classobj));
6368 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_CLASS));
6373 // This (unfortunately) duplicates code in JIT_GetRuntimeTypeHandle, which
6374 // isn't callable because it sets up a Helper Method Frame.
6375 OBJECTREF Interpreter::TypeHandleToTypeRef(TypeHandle* pth)
6377 OBJECTREF typePtr = NULL;
6378 if (!pth->IsTypeDesc())
6380 // Most common... and fastest case
6381 typePtr = pth->AsMethodTable()->GetManagedClassObjectIfExists();
6382 if (typePtr == NULL)
6384 typePtr = pth->GetManagedClassObject();
6389 typePtr = pth->GetManagedClassObject();
6394 CorInfoType Interpreter::GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd)
6404 return m_interpCeeInfo.getTypeForPrimitiveValueClass(clsHnd);
6407 void Interpreter::RefanyVal()
6415 _ASSERTE(m_curStackHt > 0);
6416 unsigned idx = m_curStackHt - 1;
6418 if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
6419 VerificationError("RefAnyVal requires a TypedRef on the stack.");
6422 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_RefAnyVal]);
6423 #endif // INTERP_TRACING
6425 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_RefAnyVal));
6426 TypeHandle expected(cls);
6428 TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
6429 LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
6430 if (expected != ptbr->type) ThrowInvalidCastException();
6432 OpStackSet<void*>(idx, static_cast<void*>(ptbr->data));
6433 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_BYREF));
6438 void Interpreter::CkFinite()
6446 _ASSERTE(m_curStackHt > 0);
6447 unsigned idx = m_curStackHt - 1;
6449 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6454 case CORINFO_TYPE_FLOAT:
6455 val = (double)OpStackGet<float>(idx);
6457 case CORINFO_TYPE_DOUBLE:
6458 val = OpStackGet<double>(idx);
6461 VerificationError("CkFinite requires a floating-point value on the stack.");
6466 ThrowSysArithException();
6469 void Interpreter::LdToken()
6477 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 1);
6480 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdToken]);
6481 #endif // INTERP_TRACING
6484 CORINFO_RESOLVED_TOKEN tok;
6487 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_LdToken));
6490 // To save duplication of the factored code at the bottom, I don't do GCX_FORBID for
6491 // these Object* values, but this comment documents the intent.
6492 if (tok.hMethod != NULL)
6494 MethodDesc* pMethod = (MethodDesc*)tok.hMethod;
6495 Object* objPtr = OBJECTREFToObject((OBJECTREF)pMethod->GetStubMethodInfo());
6496 OpStackSet<Object*>(m_curStackHt, objPtr);
6498 else if (tok.hField != NULL)
6500 FieldDesc * pField = (FieldDesc *)tok.hField;
6501 Object* objPtr = OBJECTREFToObject((OBJECTREF)pField->GetStubFieldInfo());
6502 OpStackSet<Object*>(m_curStackHt, objPtr);
6506 TypeHandle th(tok.hClass);
6507 Object* objPtr = OBJECTREFToObject(th.GetManagedClassObject());
6508 OpStackSet<Object*>(m_curStackHt, objPtr);
6513 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
6520 void Interpreter::LdFtn()
6528 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
6531 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFtn]);
6532 #endif // INTERP_TRACING
6534 CORINFO_RESOLVED_TOKEN tok;
6535 CORINFO_CALL_INFO callInfo;
6538 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdFtn));
6539 m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
6540 combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
6544 switch (callInfo.kind)
6548 PCODE pCode = ((MethodDesc *)callInfo.hMethod)->GetMultiCallableAddrOfCode();
6549 OpStackSet<void*>(m_curStackHt, (void *)pCode);
6550 GetFunctionPointerStack()[m_curStackHt] = callInfo.hMethod;
6553 case CORINFO_CALL_CODE_POINTER:
6554 NYI_INTERP("Indirect code pointer.");
6557 _ASSERTE_MSG(false, "Should not reach here: unknown call kind.");
6560 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
6565 void Interpreter::LdVirtFtn()
6573 _ASSERTE(m_curStackHt >= 1);
6574 unsigned ind = m_curStackHt - 1;
6576 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
6579 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdVirtFtn]);
6580 #endif // INTERP_TRACING
6582 CORINFO_RESOLVED_TOKEN tok;
6583 CORINFO_CALL_INFO callInfo;
6584 CORINFO_CLASS_HANDLE classHnd;
6585 CORINFO_METHOD_HANDLE methodHnd;
6588 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdVirtFtn));
6589 m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
6590 combine(CORINFO_CALLINFO_CALLVIRT,
6591 combine(CORINFO_CALLINFO_SECURITYCHECKS,
6592 CORINFO_CALLINFO_LDFTN)),
6596 classHnd = tok.hClass;
6597 methodHnd = tok.hMethod;
6600 MethodDesc * pMD = (MethodDesc *)methodHnd;
6602 if (pMD->IsVtableMethod())
6604 Object* obj = OpStackGet<Object*>(ind);
6605 ThrowOnInvalidPointer(obj);
6607 OBJECTREF objRef = ObjectToOBJECTREF(obj);
6608 GCPROTECT_BEGIN(objRef);
6609 pCode = pMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, TypeHandle(classHnd));
6612 pMD = Entry2MethodDesc(pCode, TypeHandle(classHnd).GetMethodTable());
6616 pCode = pMD->GetMultiCallableAddrOfCode();
6618 OpStackSet<void*>(ind, (void *)pCode);
6619 GetFunctionPointerStack()[ind] = (CORINFO_METHOD_HANDLE)pMD;
6621 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_NATIVEINT));
6625 void Interpreter::Sizeof()
6634 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Sizeof]);
6635 #endif // INTERP_TRACING
6637 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Sizeof));
6641 CorInfoType cit = ::asCorInfoType(cls);
6642 // For class types, the ECMA spec says to return the size of the object reference, not the referent
6643 // object. Everything else should be a value type, for which we can just return the size as reported
6647 case CORINFO_TYPE_CLASS:
6648 sz = sizeof(Object*);
6651 sz = getClassSize(cls);
6656 OpStackSet<UINT32>(m_curStackHt, sz);
6657 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
6664 bool Interpreter::s_initialized = false;
6665 bool Interpreter::s_compilerStaticsInitialized = false;
6666 size_t Interpreter::s_TypedRefSize;
6667 CORINFO_CLASS_HANDLE Interpreter::s_TypedRefClsHnd;
6668 InterpreterType Interpreter::s_TypedRefIT;
6670 // Must call GetTypedRefIT
6671 size_t Interpreter::GetTypedRefSize(CEEInfo* info)
6673 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6674 return s_TypedRefSize;
6677 InterpreterType Interpreter::GetTypedRefIT(CEEInfo* info)
6679 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6680 return s_TypedRefIT;
6683 CORINFO_CLASS_HANDLE Interpreter::GetTypedRefClsHnd(CEEInfo* info)
6685 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6686 return s_TypedRefClsHnd;
6689 void Interpreter::Initialize()
6691 _ASSERTE(!s_initialized);
6693 s_InterpretMeths.ensureInit(CLRConfig::INTERNAL_Interpret);
6694 s_InterpretMethsExclude.ensureInit(CLRConfig::INTERNAL_InterpretExclude);
6695 s_InterpreterUseCaching = (s_InterpreterUseCachingFlag.val(CLRConfig::INTERNAL_InterpreterUseCaching) != 0);
6696 s_InterpreterLooseRules = (s_InterpreterLooseRulesFlag.val(CLRConfig::INTERNAL_InterpreterLooseRules) != 0);
6697 s_InterpreterDoLoopMethods = (s_InterpreterDoLoopMethodsFlag.val(CLRConfig::INTERNAL_InterpreterDoLoopMethods) != 0);
6699 // Initialize the lock used to protect method locks.
6700 // TODO: it would be better if this were a reader/writer lock.
6701 s_methodCacheLock.Init(CrstLeafLock, CRST_DEFAULT);
6703 // Similarly, initialize the lock used to protect the map from
6704 // interpreter stub addresses to their method descs.
6705 s_interpStubToMDMapLock.Init(CrstLeafLock, CRST_DEFAULT);
6707 s_initialized = true;
6709 #if INTERP_ILINSTR_PROFILE
6710 SetILInstrCategories();
6711 #endif // INTERP_ILINSTR_PROFILE
6714 void Interpreter::InitializeCompilerStatics(CEEInfo* info)
6716 if (!s_compilerStaticsInitialized)
6718 // TODO: I believe I need no synchronization around this on x86, but I do
6719 // on more permissive memory models. (Why it's OK on x86: each thread executes this
6720 // before any access to the initialized static variables; if several threads do
6721 // so, they perform idempotent initializing writes to the statics.
6723 s_TypedRefClsHnd = info->getBuiltinClass(CLASSID_TYPED_BYREF);
6724 s_TypedRefIT = InterpreterType(info, s_TypedRefClsHnd);
6725 s_TypedRefSize = getClassSize(s_TypedRefClsHnd);
6726 s_compilerStaticsInitialized = true;
6727 // TODO: Need store-store memory barrier here.
6731 void Interpreter::Terminate()
6735 s_methodCacheLock.Destroy();
6736 s_interpStubToMDMapLock.Destroy();
6737 s_initialized = false;
6741 #if INTERP_ILINSTR_PROFILE
6742 void Interpreter::SetILInstrCategories()
6744 // Start with the indentity maps
6745 for (unsigned short instr = 0; instr < 512; instr++) s_ILInstrCategories[instr] = instr;
6746 // Now make exceptions.
6747 for (unsigned instr = CEE_LDARG_0; instr <= CEE_LDARG_3; instr++) s_ILInstrCategories[instr] = CEE_LDARG;
6748 s_ILInstrCategories[CEE_LDARG_S] = CEE_LDARG;
6750 for (unsigned instr = CEE_LDLOC_0; instr <= CEE_LDLOC_3; instr++) s_ILInstrCategories[instr] = CEE_LDLOC;
6751 s_ILInstrCategories[CEE_LDLOC_S] = CEE_LDLOC;
6753 for (unsigned instr = CEE_STLOC_0; instr <= CEE_STLOC_3; instr++) s_ILInstrCategories[instr] = CEE_STLOC;
6754 s_ILInstrCategories[CEE_STLOC_S] = CEE_STLOC;
6756 s_ILInstrCategories[CEE_LDLOCA_S] = CEE_LDLOCA;
6758 for (unsigned instr = CEE_LDC_I4_M1; instr <= CEE_LDC_I4_S; instr++) s_ILInstrCategories[instr] = CEE_LDC_I4;
6760 for (unsigned instr = CEE_BR_S; instr <= CEE_BLT_UN; instr++) s_ILInstrCategories[instr] = CEE_BR;
6762 for (unsigned instr = CEE_LDIND_I1; instr <= CEE_LDIND_REF; instr++) s_ILInstrCategories[instr] = CEE_LDIND_I;
6764 for (unsigned instr = CEE_STIND_REF; instr <= CEE_STIND_R8; instr++) s_ILInstrCategories[instr] = CEE_STIND_I;
6766 for (unsigned instr = CEE_ADD; instr <= CEE_REM_UN; instr++) s_ILInstrCategories[instr] = CEE_ADD;
6768 for (unsigned instr = CEE_AND; instr <= CEE_NOT; instr++) s_ILInstrCategories[instr] = CEE_AND;
6770 for (unsigned instr = CEE_CONV_I1; instr <= CEE_CONV_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6771 for (unsigned instr = CEE_CONV_OVF_I1_UN; instr <= CEE_CONV_OVF_U_UN; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6773 for (unsigned instr = CEE_LDELEM_I1; instr <= CEE_LDELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_LDELEM;
6774 for (unsigned instr = CEE_STELEM_I; instr <= CEE_STELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_STELEM;
6776 for (unsigned instr = CEE_CONV_OVF_I1; instr <= CEE_CONV_OVF_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6777 for (unsigned instr = CEE_CONV_U2; instr <= CEE_CONV_U1; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6778 for (unsigned instr = CEE_CONV_OVF_I; instr <= CEE_CONV_OVF_U; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6780 for (unsigned instr = CEE_ADD_OVF; instr <= CEE_SUB_OVF; instr++) s_ILInstrCategories[instr] = CEE_ADD_OVF;
6782 s_ILInstrCategories[CEE_LEAVE_S] = CEE_LEAVE;
6783 s_ILInstrCategories[CEE_CONV_U] = CEE_CONV_I;
6785 #endif // INTERP_ILINSTR_PROFILE
6789 void Interpreter::CompareOp()
6797 _ASSERTE(m_curStackHt >= 2);
6798 unsigned op1idx = m_curStackHt - 2;
6799 INT32 res = CompareOpRes<op>(op1idx);
6800 OpStackSet<INT32>(op1idx, res);
6801 OpStackTypeSet(op1idx, InterpreterType(CORINFO_TYPE_INT));
6806 INT32 Interpreter::CompareOpRes(unsigned op1idx)
6814 _ASSERTE(m_curStackHt >= op1idx + 2);
6815 unsigned op2idx = op1idx + 1;
6816 InterpreterType t1 = OpStackTypeGet(op1idx);
6817 CorInfoType cit1 = t1.ToCorInfoType();
6818 _ASSERTE(IsStackNormalType(cit1));
6819 InterpreterType t2 = OpStackTypeGet(op2idx);
6820 CorInfoType cit2 = t2.ToCorInfoType();
6821 _ASSERTE(IsStackNormalType(cit2));
6826 case CORINFO_TYPE_INT:
6827 if (cit2 == CORINFO_TYPE_INT)
6829 INT32 val1 = OpStackGet<INT32>(op1idx);
6830 INT32 val2 = OpStackGet<INT32>(op2idx);
6833 if (val1 == val2) res = 1;
6835 else if (op == CO_GT)
6837 if (val1 > val2) res = 1;
6839 else if (op == CO_GT_UN)
6841 if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
6843 else if (op == CO_LT)
6845 if (val1 < val2) res = 1;
6849 _ASSERTE(op == CO_LT_UN);
6850 if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
6853 else if (cit2 == CORINFO_TYPE_NATIVEINT ||
6854 (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF) ||
6855 (cit2 == CORINFO_TYPE_VALUECLASS
6856 && CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(t2.ToClassHandle())) == CORINFO_TYPE_NATIVEINT))
6858 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
6859 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
6862 if (val1 == val2) res = 1;
6864 else if (op == CO_GT)
6866 if (val1 > val2) res = 1;
6868 else if (op == CO_GT_UN)
6870 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
6872 else if (op == CO_LT)
6874 if (val1 < val2) res = 1;
6878 _ASSERTE(op == CO_LT_UN);
6879 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
6882 else if (cit2 == CORINFO_TYPE_VALUECLASS)
6884 cit2 = GetTypeForPrimitiveValueClass(t2.ToClassHandle());
6885 INT32 val1 = OpStackGet<INT32>(op1idx);
6887 if (CorInfoTypeStackNormalize(cit2) == CORINFO_TYPE_INT)
6890 size_t sz = t2.Size(&m_interpCeeInfo);
6894 if (CorInfoTypeIsUnsigned(cit2))
6896 val2 = OpStackGet<UINT8>(op2idx);
6900 val2 = OpStackGet<INT8>(op2idx);
6904 if (CorInfoTypeIsUnsigned(cit2))
6906 val2 = OpStackGet<UINT16>(op2idx);
6910 val2 = OpStackGet<INT16>(op2idx);
6914 val2 = OpStackGet<INT32>(op2idx);
6922 VerificationError("Can't compare with struct type.");
6926 if (val1 == val2) res = 1;
6928 else if (op == CO_GT)
6930 if (val1 > val2) res = 1;
6932 else if (op == CO_GT_UN)
6934 if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
6936 else if (op == CO_LT)
6938 if (val1 < val2) res = 1;
6942 _ASSERTE(op == CO_LT_UN);
6943 if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
6948 VerificationError("Binary comparison operation: type mismatch.");
6951 case CORINFO_TYPE_NATIVEINT:
6952 if (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_INT
6953 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
6954 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
6955 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_CLASS && OpStackGet<void*>(op2idx) == 0))
6957 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
6959 if (cit2 == CORINFO_TYPE_NATIVEINT)
6961 val2 = OpStackGet<NativeInt>(op2idx);
6963 else if (cit2 == CORINFO_TYPE_INT)
6965 val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
6967 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
6969 val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
6971 else if (cit2 == CORINFO_TYPE_CLASS)
6973 _ASSERTE(OpStackGet<void*>(op2idx) == 0);
6978 _ASSERTE(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF);
6979 val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
6983 if (val1 == val2) res = 1;
6985 else if (op == CO_GT)
6987 if (val1 > val2) res = 1;
6989 else if (op == CO_GT_UN)
6991 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
6993 else if (op == CO_LT)
6995 if (val1 < val2) res = 1;
6999 _ASSERTE(op == CO_LT_UN);
7000 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
7005 VerificationError("Binary comparison operation: type mismatch.");
7008 case CORINFO_TYPE_LONG:
7010 bool looseLong = false;
7011 #if defined(HOST_AMD64)
7012 looseLong = s_InterpreterLooseRules && (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_BYREF);
7014 if (cit2 == CORINFO_TYPE_LONG || looseLong)
7016 INT64 val1 = OpStackGet<INT64>(op1idx);
7017 INT64 val2 = OpStackGet<INT64>(op2idx);
7020 if (val1 == val2) res = 1;
7022 else if (op == CO_GT)
7024 if (val1 > val2) res = 1;
7026 else if (op == CO_GT_UN)
7028 if (static_cast<UINT64>(val1) > static_cast<UINT64>(val2)) res = 1;
7030 else if (op == CO_LT)
7032 if (val1 < val2) res = 1;
7036 _ASSERTE(op == CO_LT_UN);
7037 if (static_cast<UINT64>(val1) < static_cast<UINT64>(val2)) res = 1;
7042 VerificationError("Binary comparison operation: type mismatch.");
7047 case CORINFO_TYPE_CLASS:
7048 case CORINFO_TYPE_STRING:
7049 if (cit2 == CORINFO_TYPE_CLASS || cit2 == CORINFO_TYPE_STRING)
7052 Object* val1 = OpStackGet<Object*>(op1idx);
7053 Object* val2 = OpStackGet<Object*>(op2idx);
7056 if (val1 == val2) res = 1;
7058 else if (op == CO_GT_UN)
7060 if (val1 != val2) res = 1;
7064 VerificationError("Binary comparison operation: type mismatch.");
7069 VerificationError("Binary comparison operation: type mismatch.");
7074 case CORINFO_TYPE_FLOAT:
7076 bool isDouble = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_DOUBLE);
7077 if (cit2 == CORINFO_TYPE_FLOAT || isDouble)
7079 float val1 = OpStackGet<float>(op1idx);
7080 float val2 = (isDouble) ? (float) OpStackGet<double>(op2idx) : OpStackGet<float>(op2idx);
7083 // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
7084 if (val1 == val2) res = 1;
7086 else if (op == CO_GT)
7088 // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
7089 if (val1 > val2) res = 1;
7091 else if (op == CO_GT_UN)
7093 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7094 if (_isnan(val1) || _isnan(val2)) res = 1;
7095 else if (val1 > val2) res = 1;
7097 else if (op == CO_LT)
7099 if (val1 < val2) res = 1;
7103 _ASSERTE(op == CO_LT_UN);
7104 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7105 if (_isnan(val1) || _isnan(val2)) res = 1;
7106 else if (val1 < val2) res = 1;
7111 VerificationError("Binary comparison operation: type mismatch.");
7116 case CORINFO_TYPE_DOUBLE:
7118 bool isFloat = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_FLOAT);
7119 if (cit2 == CORINFO_TYPE_DOUBLE || isFloat)
7121 double val1 = OpStackGet<double>(op1idx);
7122 double val2 = (isFloat) ? (double) OpStackGet<float>(op2idx) : OpStackGet<double>(op2idx);
7125 // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
7126 if (val1 == val2) res = 1;
7128 else if (op == CO_GT)
7130 // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
7131 if (val1 > val2) res = 1;
7133 else if (op == CO_GT_UN)
7135 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7136 if (_isnan(val1) || _isnan(val2)) res = 1;
7137 else if (val1 > val2) res = 1;
7139 else if (op == CO_LT)
7141 if (val1 < val2) res = 1;
7145 _ASSERTE(op == CO_LT_UN);
7146 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7147 if (_isnan(val1) || _isnan(val2)) res = 1;
7148 else if (val1 < val2) res = 1;
7153 VerificationError("Binary comparison operation: type mismatch.");
7158 case CORINFO_TYPE_BYREF:
7159 if (cit2 == CORINFO_TYPE_BYREF || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
7161 NativeInt val1 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op1idx));
7163 if (cit2 == CORINFO_TYPE_BYREF)
7165 val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
7169 _ASSERTE(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT);
7170 val2 = OpStackGet<NativeInt>(op2idx);
7174 if (val1 == val2) res = 1;
7176 else if (op == CO_GT)
7178 if (val1 > val2) res = 1;
7180 else if (op == CO_GT_UN)
7182 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
7184 else if (op == CO_LT)
7186 if (val1 < val2) res = 1;
7190 _ASSERTE(op == CO_LT_UN);
7191 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
7196 VerificationError("Binary comparison operation: type mismatch.");
7200 case CORINFO_TYPE_VALUECLASS:
7202 CorInfoType newCit1 = GetTypeForPrimitiveValueClass(t1.ToClassHandle());
7203 if (newCit1 == CORINFO_TYPE_UNDEF)
7205 VerificationError("Can't compare a value class.");
7209 NYI_INTERP("Must eliminate 'punning' value classes from the ostack.");
7215 _ASSERTE(false); // Should not be here if the type is stack-normal.
7221 template<bool val, int targetLen>
7222 void Interpreter::BrOnValue()
7224 _ASSERTE(targetLen == 1 || targetLen == 4);
7225 _ASSERTE(m_curStackHt > 0);
7226 unsigned stackInd = m_curStackHt - 1;
7227 InterpreterType it = OpStackTypeGet(stackInd);
7229 // It shouldn't be a value class, unless it's a punning name for a primitive integral type.
7230 if (it.ToCorInfoType() == CORINFO_TYPE_VALUECLASS)
7233 CorInfoType cit = m_interpCeeInfo.getTypeForPrimitiveValueClass(it.ToClassHandle());
7234 if (CorInfoTypeIsIntegral(cit))
7236 it = InterpreterType(cit);
7240 VerificationError("Can't branch on the value of a value type that is not a primitive type.");
7245 switch (it.ToCorInfoType())
7247 case CORINFO_TYPE_FLOAT:
7248 case CORINFO_TYPE_DOUBLE:
7249 VerificationError("Can't branch on the value of a float or double.");
7256 switch (it.SizeNotStruct())
7260 INT32 branchVal = OpStackGet<INT32>(stackInd);
7261 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7266 INT64 branchVal = OpStackGet<INT64>(stackInd);
7267 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7271 // The value-class case handled above makes sizes 1 and 2 possible.
7274 INT8 branchVal = OpStackGet<INT8>(stackInd);
7275 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7280 INT16 branchVal = OpStackGet<INT16>(stackInd);
7281 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7288 m_curStackHt = stackInd;
7291 // compOp is a member of the BranchComparisonOp enumeration.
7292 template<int compOp, bool reverse, int targetLen>
7293 void Interpreter::BrOnComparison()
7301 _ASSERTE(targetLen == 1 || targetLen == 4);
7302 _ASSERTE(m_curStackHt >= 2);
7303 unsigned v1Ind = m_curStackHt - 2;
7305 INT32 res = CompareOpRes<compOp>(v1Ind);
7308 res = (res == 0) ? 1 : 0;
7316 // BYTE is unsigned...
7317 offset = getI1(m_ILCodePtr + 1);
7321 offset = getI4LittleEndian(m_ILCodePtr + 1);
7323 // 1 is the size of the current instruction; offset is relative to start of next.
7326 // Backwards branch; enable caching.
7327 BackwardsBranchActions(offset);
7329 ExecuteBranch(m_ILCodePtr + 1 + targetLen + offset);
7333 m_ILCodePtr += targetLen + 1;
7338 void Interpreter::LdFld(FieldDesc* fldIn)
7346 BarrierIfVolatile();
7348 FieldDesc* fld = fldIn;
7349 CORINFO_CLASS_HANDLE valClsHnd = NULL;
7353 unsigned ilOffset = CurOffset();
7354 if (fld == NULL && s_InterpreterUseCaching)
7357 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFld]);
7358 #endif // INTERP_TRACING
7359 fld = GetCachedInstanceField(ilOffset);
7363 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7364 fld = FindField(tok InterpTracingArg(RTK_LdFld));
7365 _ASSERTE(fld != NULL);
7367 fldOffset = fld->GetOffset();
7368 if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
7369 CacheInstanceField(ilOffset, fld);
7373 fldOffset = fld->GetOffset();
7376 CorInfoType valCit = CEEInfo::asCorInfoType(fld->GetFieldType());
7378 // If "fldIn" is non-NULL, it's not a "real" LdFld -- the caller should handle updating the instruction pointer.
7380 m_ILCodePtr += 5; // Last use above, so update now.
7382 // We need to construct the interpreter type for a struct type before we try to do coordinated
7383 // pushes of the value and type on the opstacks -- these must be atomic wrt GC, and constructing
7384 // a struct InterpreterType transitions to preemptive mode.
7385 InterpreterType structValIT;
7386 if (valCit == CORINFO_TYPE_VALUECLASS)
7389 valCit = m_interpCeeInfo.getFieldType(CORINFO_FIELD_HANDLE(fld), &valClsHnd, nullptr);
7390 structValIT = InterpreterType(&m_interpCeeInfo, valClsHnd);
7393 UINT sz = fld->GetSize();
7395 // Live vars: valCit, structValIt
7396 _ASSERTE(m_curStackHt > 0);
7397 unsigned stackInd = m_curStackHt - 1;
7398 InterpreterType addrIt = OpStackTypeGet(stackInd);
7399 CorInfoType addrCit = addrIt.ToCorInfoType();
7402 if (addrCit == CORINFO_TYPE_CLASS)
7404 OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(stackInd));
7405 ThrowOnInvalidPointer(OBJECTREFToObject(obj));
7406 if (valCit == CORINFO_TYPE_VALUECLASS)
7408 void* srcPtr = fld->GetInstanceAddress(obj);
7410 // srcPtr is now vulnerable.
7413 MethodTable* valClsMT = GetMethodTableFromClsHnd(valClsHnd);
7414 if (sz > sizeof(INT64))
7416 // Large struct case: allocate space on the large struct operand stack.
7417 void* destPtr = LargeStructOperandStackPush(sz);
7418 OpStackSet<void*>(stackInd, destPtr);
7419 CopyValueClass(destPtr, srcPtr, valClsMT);
7423 // Small struct case -- is inline in operand stack.
7424 OpStackSet<INT64>(stackInd, GetSmallStructValue(srcPtr, sz));
7429 BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
7430 // fldStart is now a vulnerable byref
7436 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7439 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(fldStart));
7443 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(fldStart));
7447 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7450 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(fldStart));
7454 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(fldStart));
7458 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(fldStart));
7461 OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(fldStart));
7464 _ASSERTE_MSG(false, "Should not reach here.");
7472 if (addrCit == CORINFO_TYPE_VALUECLASS)
7474 size_t addrSize = addrIt.Size(&m_interpCeeInfo);
7475 // The ECMA spec allows ldfld to be applied to "an instance of a value type."
7476 // We will take the address of the ostack entry.
7477 if (addrIt.IsLargeStruct(&m_interpCeeInfo))
7479 ptr = reinterpret_cast<INT8*>(OpStackGet<void*>(stackInd));
7480 // This is delicate. I'm going to pop the large struct off the large-struct stack
7481 // now, even though the field value we push may go back on the large object stack.
7482 // We rely on the fact that this instruction doesn't do any other pushing, and
7483 // we assume that LargeStructOperandStackPop does not actually deallocate any memory,
7484 // and we rely on memcpy properly handling possibly-overlapping regions being copied.
7485 // Finally (wow, this really *is* delicate), we rely on the property that the large-struct
7486 // stack pop operation doesn't deallocate memory (the size of the allocated memory for the
7487 // large-struct stack only grows in a method execution), and that if we push the field value
7488 // on the large struct stack below, the size of the pushed item is at most the size of the
7489 // popped item, so the stack won't grow (which would allow a dealloc/realloc).
7490 // (All in all, maybe it would be better to just copy the value elsewhere then pop...but
7491 // that wouldn't be very aggressive.)
7492 LargeStructOperandStackPop(addrSize, ptr);
7496 ptr = reinterpret_cast<INT8*>(OpStackGetAddr(stackInd, addrSize));
7501 _ASSERTE(CorInfoTypeIsPointer(addrCit));
7502 ptr = OpStackGet<INT8*>(stackInd);
7503 ThrowOnInvalidPointer(ptr);
7506 _ASSERTE(ptr != NULL);
7509 if (valCit == CORINFO_TYPE_VALUECLASS)
7511 if (sz > sizeof(INT64))
7513 // Large struct case.
7514 void* dstPtr = LargeStructOperandStackPush(sz);
7515 memcpy(dstPtr, ptr, sz);
7516 OpStackSet<void*>(stackInd, dstPtr);
7520 // Small struct case -- is inline in operand stack.
7521 OpStackSet<INT64>(stackInd, GetSmallStructValue(ptr, sz));
7523 OpStackTypeSet(stackInd, structValIT.StackNormalize());
7530 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7533 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(ptr));
7537 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(ptr));
7541 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7544 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(ptr));
7548 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(ptr));
7552 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(ptr));
7555 OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(ptr));
7559 if (valCit == CORINFO_TYPE_VALUECLASS)
7561 OpStackTypeSet(stackInd, structValIT.StackNormalize());
7565 OpStackTypeSet(stackInd, InterpreterType(valCit).StackNormalize());
7569 void Interpreter::LdFldA()
7577 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7580 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFldA]);
7581 #endif // INTERP_TRACING
7583 unsigned offset = CurOffset();
7584 m_ILCodePtr += 5; // Last use above, so update now.
7586 FieldDesc* fld = NULL;
7587 if (s_InterpreterUseCaching) fld = GetCachedInstanceField(offset);
7591 fld = FindField(tok InterpTracingArg(RTK_LdFldA));
7592 if (s_InterpreterUseCaching) CacheInstanceField(offset, fld);
7594 _ASSERTE(m_curStackHt > 0);
7595 unsigned stackInd = m_curStackHt - 1;
7596 CorInfoType addrCit = OpStackTypeGet(stackInd).ToCorInfoType();
7597 if (addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_CLASS || addrCit == CORINFO_TYPE_NATIVEINT)
7599 NativeInt ptr = OpStackGet<NativeInt>(stackInd);
7600 ThrowOnInvalidPointer((void*)ptr);
7601 // The "offset" below does not include the Object (i.e., the MethodTable pointer) for object pointers, so add that in first.
7602 if (addrCit == CORINFO_TYPE_CLASS) ptr += sizeof(Object);
7603 // Now add the offset.
7604 ptr += fld->GetOffset();
7605 OpStackSet<NativeInt>(stackInd, ptr);
7606 if (addrCit == CORINFO_TYPE_NATIVEINT)
7608 OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
7612 OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_BYREF));
7617 VerificationError("LdfldA requires object reference, managed or unmanaged pointer type.");
7621 void Interpreter::StFld()
7630 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StFld]);
7631 #endif // INTERP_TRACING
7633 FieldDesc* fld = NULL;
7636 unsigned ilOffset = CurOffset();
7637 if (s_InterpreterUseCaching) fld = GetCachedInstanceField(ilOffset);
7640 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7642 fld = FindField(tok InterpTracingArg(RTK_StFld));
7643 _ASSERTE(fld != NULL);
7644 fldOffset = fld->GetOffset();
7645 if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
7646 CacheInstanceField(ilOffset, fld);
7650 fldOffset = fld->GetOffset();
7653 m_ILCodePtr += 5; // Last use above, so update now.
7655 UINT sz = fld->GetSize();
7656 _ASSERTE(m_curStackHt >= 2);
7657 unsigned addrInd = m_curStackHt - 2;
7658 CorInfoType addrCit = OpStackTypeGet(addrInd).ToCorInfoType();
7659 unsigned valInd = m_curStackHt - 1;
7660 CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
7661 _ASSERTE(IsStackNormalType(addrCit) && IsStackNormalType(valCit));
7665 if (addrCit == CORINFO_TYPE_CLASS)
7667 OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(addrInd));
7668 ThrowOnInvalidPointer(OBJECTREFToObject(obj));
7670 if (valCit == CORINFO_TYPE_CLASS)
7672 fld->SetRefValue(obj, ObjectToOBJECTREF(OpStackGet<Object*>(valInd)));
7674 else if (valCit == CORINFO_TYPE_VALUECLASS)
7676 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
7677 void* destPtr = fld->GetInstanceAddress(obj);
7679 // destPtr is now a vulnerable byref, so can't do GC.
7682 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
7683 // the value class contains GC pointers. We could do better...
7684 if (sz > sizeof(INT64))
7686 // Large struct case: stack slot contains pointer...
7687 void* srcPtr = OpStackGet<void*>(valInd);
7688 CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
7689 LargeStructOperandStackPop(sz, srcPtr);
7693 // Small struct case -- is inline in operand stack.
7694 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
7696 BarrierIfVolatile();
7701 BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
7702 // fldStart is now a vulnerable byref
7708 *reinterpret_cast<INT8*>(fldStart) = OpStackGet<INT8>(valInd);
7711 *reinterpret_cast<INT16*>(fldStart) = OpStackGet<INT16>(valInd);
7714 *reinterpret_cast<INT32*>(fldStart) = OpStackGet<INT32>(valInd);
7717 *reinterpret_cast<INT64*>(fldStart) = OpStackGet<INT64>(valInd);
7724 _ASSERTE(addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_NATIVEINT);
7726 INT8* destPtr = OpStackGet<INT8*>(addrInd);
7727 ThrowOnInvalidPointer(destPtr);
7728 destPtr += fldOffset;
7730 if (valCit == CORINFO_TYPE_VALUECLASS)
7732 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
7733 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
7734 // the value class contains GC pointers. We could do better...
7735 if (sz > sizeof(INT64))
7737 // Large struct case: stack slot contains pointer...
7738 void* srcPtr = OpStackGet<void*>(valInd);
7739 CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
7740 LargeStructOperandStackPop(sz, srcPtr);
7744 // Small struct case -- is inline in operand stack.
7745 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
7747 BarrierIfVolatile();
7750 else if (valCit == CORINFO_TYPE_CLASS)
7752 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
7753 SetObjectReference(reinterpret_cast<OBJECTREF*>(destPtr), val);
7760 *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
7763 *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
7766 *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
7769 *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
7774 BarrierIfVolatile();
7777 bool Interpreter::StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, /*out (byref)*/void** pStaticFieldAddr, /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
7785 bool isCacheable = true;
7786 *pManagedMem = true; // Default result.
7788 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7789 m_ILCodePtr += 5; // Above is last use of m_ILCodePtr in this method, so update now.
7792 CORINFO_FIELD_INFO fldInfo;
7793 CORINFO_RESOLVED_TOKEN fldTok;
7795 void* pFldAddr = NULL;
7800 ResolveToken(&fldTok, tok, CORINFO_TOKENKIND_Field InterpTracingArg(RTK_SFldAddr));
7801 fld = reinterpret_cast<FieldDesc*>(fldTok.hField);
7803 m_interpCeeInfo.getFieldInfo(&fldTok, m_methInfo->m_method, accessFlgs, &fldInfo);
7806 EnsureClassInit(GetMethodTableFromClsHnd(fldTok.hClass));
7808 if ((fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS) || (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS_MANAGED))
7810 NYI_INTERP("Thread-local static.");
7812 else if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER
7813 || fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER)
7815 *pStaticFieldAddr = fld->GetCurrentStaticAddress();
7816 isCacheable = false;
7820 *pStaticFieldAddr = fld->GetCurrentStaticAddress();
7823 if (fldInfo.structType != NULL && fldInfo.fieldType != CORINFO_TYPE_CLASS && fldInfo.fieldType != CORINFO_TYPE_PTR)
7825 *pit = InterpreterType(&m_interpCeeInfo, fldInfo.structType);
7827 if ((fldInfo.fieldFlags & CORINFO_FLG_FIELD_UNMANAGED) == 0)
7829 // For valuetypes in managed memory, the address returned contains a pointer into the heap, to a boxed version of the
7830 // static variable; return a pointer to the boxed struct.
7831 isCacheable = false;
7835 *pManagedMem = false;
7840 *pit = InterpreterType(fldInfo.fieldType);
7842 *pFldSize = fld->GetSize();
7847 void Interpreter::LdSFld()
7855 InterpreterType fldIt;
7858 void* srcPtr = NULL;
7860 BarrierIfVolatile();
7862 GCPROTECT_BEGININTERIOR(srcPtr);
7864 StaticFldAddr(CORINFO_ACCESS_GET, &srcPtr, &fldIt, &sz, &managedMem);
7868 if (fldIt.IsStruct())
7870 // Large struct case.
7871 CORINFO_CLASS_HANDLE sh = fldIt.ToClassHandle();
7872 // This call is GC_TRIGGERS, so do it before we copy the value: no GC after this,
7873 // until the op stacks and ht are consistent.
7874 OpStackTypeSet(m_curStackHt, InterpreterType(&m_interpCeeInfo, sh).StackNormalize());
7875 if (fldIt.IsLargeStruct(&m_interpCeeInfo))
7877 void* dstPtr = LargeStructOperandStackPush(sz);
7878 memcpy(dstPtr, srcPtr, sz);
7879 OpStackSet<void*>(m_curStackHt, dstPtr);
7883 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(srcPtr, sz));
7888 CorInfoType valCit = fldIt.ToCorInfoType();
7892 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7895 OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT8*>(srcPtr));
7899 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT8*>(srcPtr));
7903 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7906 OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT16*>(srcPtr));
7910 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT16*>(srcPtr));
7914 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT32*>(srcPtr));
7917 OpStackSet<INT64>(m_curStackHt, *reinterpret_cast<INT64*>(srcPtr));
7920 _ASSERTE_MSG(false, "LdSFld: this should have exhausted all the possible sizes.");
7923 OpStackTypeSet(m_curStackHt, fldIt.StackNormalize());
7929 void Interpreter::EnsureClassInit(MethodTable* pMT)
7931 if (!pMT->IsClassInited())
7933 pMT->CheckRestore();
7934 // This is tantamount to a call, so exempt it from the cycle count.
7935 #if INTERP_ILCYCLE_PROFILE
7936 unsigned __int64 startCycles;
7937 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
7938 #endif // INTERP_ILCYCLE_PROFILE
7940 pMT->CheckRunClassInitThrowing();
7942 #if INTERP_ILCYCLE_PROFILE
7943 unsigned __int64 endCycles;
7944 b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
7945 m_exemptCycles += (endCycles - startCycles);
7946 #endif // INTERP_ILCYCLE_PROFILE
7950 void Interpreter::LdSFldA()
7958 InterpreterType fldIt;
7961 void* srcPtr = NULL;
7962 GCPROTECT_BEGININTERIOR(srcPtr);
7964 StaticFldAddr(CORINFO_ACCESS_ADDRESS, &srcPtr, &fldIt, &fldSz, &managedMem);
7966 OpStackSet<void*>(m_curStackHt, srcPtr);
7969 // Static variable in managed memory...
7970 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
7974 // RVA is in unmanaged memory.
7975 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
7982 void Interpreter::StSFld()
7989 InterpreterType fldIt;
7992 void* dstPtr = NULL;
7993 GCPROTECT_BEGININTERIOR(dstPtr);
7995 StaticFldAddr(CORINFO_ACCESS_SET, &dstPtr, &fldIt, &sz, &managedMem);
7998 InterpreterType valIt = OpStackTypeGet(m_curStackHt);
7999 CorInfoType valCit = valIt.ToCorInfoType();
8001 if (valCit == CORINFO_TYPE_VALUECLASS)
8003 MethodTable* valClsMT = GetMethodTableFromClsHnd(valIt.ToClassHandle());
8004 if (sz > sizeof(INT64))
8006 // Large struct case: value in operand stack is indirect pointer.
8007 void* srcPtr = OpStackGet<void*>(m_curStackHt);
8008 CopyValueClassUnchecked(dstPtr, srcPtr, valClsMT);
8009 LargeStructOperandStackPop(sz, srcPtr);
8013 // Struct value is inline in the operand stack.
8014 CopyValueClassUnchecked(dstPtr, OpStackGetAddr(m_curStackHt, sz), valClsMT);
8017 else if (valCit == CORINFO_TYPE_CLASS)
8019 SetObjectReference(reinterpret_cast<OBJECTREF*>(dstPtr), ObjectToOBJECTREF(OpStackGet<Object*>(m_curStackHt)));
8026 *reinterpret_cast<UINT8*>(dstPtr) = OpStackGet<UINT8>(m_curStackHt);
8029 *reinterpret_cast<UINT16*>(dstPtr) = OpStackGet<UINT16>(m_curStackHt);
8032 *reinterpret_cast<UINT32*>(dstPtr) = OpStackGet<UINT32>(m_curStackHt);
8035 *reinterpret_cast<UINT64*>(dstPtr) = OpStackGet<UINT64>(m_curStackHt);
8038 _ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
8044 BarrierIfVolatile();
8047 template<typename T, bool IsObjType, CorInfoType cit>
8048 void Interpreter::LdElemWithType()
8056 _ASSERTE(m_curStackHt >= 2);
8057 unsigned arrInd = m_curStackHt - 2;
8058 unsigned indexInd = m_curStackHt - 1;
8060 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8062 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8063 ThrowOnInvalidPointer(a);
8064 int len = a->GetNumComponents();
8066 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8067 if (indexCit == CORINFO_TYPE_INT)
8069 int index = OpStackGet<INT32>(indexInd);
8070 if (index < 0 || index >= len) ThrowArrayBoundsException();
8076 OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
8077 OpStackSet<OBJECTREF>(arrInd, res);
8081 intptr_t res_ptr = reinterpret_cast<intptr_t>(reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements());
8082 if (cit == CORINFO_TYPE_INT)
8084 _ASSERTE(std::is_integral<T>::value);
8086 // Widen narrow types.
8091 ires = std::is_same<T, INT8>::value ?
8092 static_cast<int>(reinterpret_cast<INT8*>(res_ptr)[index]) :
8093 static_cast<int>(reinterpret_cast<UINT8*>(res_ptr)[index]);
8096 ires = std::is_same<T, INT16>::value ?
8097 static_cast<int>(reinterpret_cast<INT16*>(res_ptr)[index]) :
8098 static_cast<int>(reinterpret_cast<UINT16*>(res_ptr)[index]);
8101 ires = std::is_same<T, INT32>::value ?
8102 static_cast<int>(reinterpret_cast<INT32*>(res_ptr)[index]) :
8103 static_cast<int>(reinterpret_cast<UINT32*>(res_ptr)[index]);
8106 _ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
8110 OpStackSet<int>(arrInd, ires);
8114 OpStackSet<T>(arrInd, ((T*) res_ptr)[index]);
8120 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8121 NativeInt index = OpStackGet<NativeInt>(indexInd);
8122 if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
8128 OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
8129 OpStackSet<OBJECTREF>(arrInd, res);
8133 T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
8134 OpStackSet<T>(arrInd, res);
8138 OpStackTypeSet(arrInd, InterpreterType(cit));
8142 template<typename T, bool IsObjType>
8143 void Interpreter::StElemWithType()
8152 _ASSERTE(m_curStackHt >= 3);
8153 unsigned arrInd = m_curStackHt - 3;
8154 unsigned indexInd = m_curStackHt - 2;
8155 unsigned valInd = m_curStackHt - 1;
8157 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8159 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8160 ThrowOnInvalidPointer(a);
8161 int len = a->GetNumComponents();
8163 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8164 if (indexCit == CORINFO_TYPE_INT)
8166 int index = OpStackGet<INT32>(indexInd);
8167 if (index < 0 || index >= len) ThrowArrayBoundsException();
8174 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8175 gc.a = ObjectToOBJECTREF(a);
8176 GCPROTECT_BEGIN(gc);
8177 if (gc.val != NULL &&
8178 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8179 COMPlusThrow(kArrayTypeMismatchException);
8180 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8186 T val = OpStackGet<T>(valInd);
8187 reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
8192 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8193 NativeInt index = OpStackGet<NativeInt>(indexInd);
8194 if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
8201 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8202 gc.a = ObjectToOBJECTREF(a);
8203 GCPROTECT_BEGIN(gc);
8204 if (gc.val != NULL &&
8205 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8206 COMPlusThrow(kArrayTypeMismatchException);
8207 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8213 T val = OpStackGet<T>(valInd);
8214 reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
8221 template<bool takeAddress>
8222 void Interpreter::LdElem()
8230 _ASSERTE(m_curStackHt >= 2);
8231 unsigned arrInd = m_curStackHt - 2;
8232 unsigned indexInd = m_curStackHt - 1;
8234 unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
8237 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdElem]);
8238 #endif // INTERP_TRACING
8240 unsigned ilOffset = CurOffset();
8241 CORINFO_CLASS_HANDLE clsHnd = NULL;
8242 if (s_InterpreterUseCaching) clsHnd = GetCachedClassHandle(ilOffset);
8247 CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
8250 ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdElem));
8251 clsHnd = elemTypeResolvedTok.hClass;
8253 if (s_InterpreterUseCaching) CacheClassHandle(ilOffset, clsHnd);
8256 CorInfoType elemCit = ::asCorInfoType(clsHnd);
8261 InterpreterType elemIt;
8262 if (elemCit == CORINFO_TYPE_VALUECLASS)
8264 elemIt = InterpreterType(&m_interpCeeInfo, clsHnd);
8268 elemIt = InterpreterType(elemCit);
8271 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8274 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8275 ThrowOnInvalidPointer(a);
8276 int len = a->GetNumComponents();
8282 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8283 if (indexCit == CORINFO_TYPE_INT)
8285 index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
8289 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8290 index = OpStackGet<NativeInt>(indexInd);
8293 if (index < 0 || index >= len) ThrowArrayBoundsException();
8295 bool throwTypeMismatch = NULL;
8297 void* elemPtr = a->GetDataPtr() + a->GetComponentSize() * index;
8298 // elemPtr is now a vulnerable byref.
8303 // If the element type is a class type, may have to do a type check.
8304 if (elemCit == CORINFO_TYPE_CLASS)
8306 // Unless there was a readonly prefix, which removes the need to
8307 // do the (dynamic) type check.
8310 // Consume the readonly prefix, and don't do the type check below.
8311 m_readonlyFlag = false;
8315 PtrArray* pa = reinterpret_cast<PtrArray*>(a);
8316 // The element array type must be exactly the referent type of the managed
8317 // pointer we'll be creating.
8318 if (pa->GetArrayElementTypeHandle() != TypeHandle(clsHnd))
8320 throwTypeMismatch = true;
8324 if (!throwTypeMismatch)
8326 // If we're not going to throw the exception, we can take the address.
8327 OpStackSet<void*>(arrInd, elemPtr);
8328 OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_BYREF));
8335 LdFromMemAddr(elemPtr, elemIt);
8340 // If we're going to throw, we do the throw outside the GCX_FORBID region above, since it requires GC_TRIGGERS.
8341 if (throwTypeMismatch)
8343 COMPlusThrow(kArrayTypeMismatchException);
8347 void Interpreter::StElem()
8355 _ASSERTE(m_curStackHt >= 3);
8356 unsigned arrInd = m_curStackHt - 3;
8357 unsigned indexInd = m_curStackHt - 2;
8358 unsigned valInd = m_curStackHt - 1;
8360 CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
8363 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StElem]);
8364 #endif // INTERP_TRACING
8366 CORINFO_CLASS_HANDLE typeFromTok = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StElem));
8370 CorInfoType typeFromTokCit;
8373 typeFromTokCit = ::asCorInfoType(typeFromTok);
8378 InterpreterType typeFromTokIt;
8381 if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
8384 sz = getClassSize(typeFromTok);
8386 typeFromTokIt = InterpreterType(&m_interpCeeInfo, typeFromTok);
8391 sz = CorInfoTypeSize(typeFromTokCit);
8393 typeFromTokIt = InterpreterType(typeFromTokCit);
8398 // Instead of debug, I need to parameterize the interpreter at the top level over whether
8399 // to do checks corresponding to verification.
8400 if (typeFromTokIt.StackNormalize().ToCorInfoType() != valCit)
8402 // This is obviously only a partial test of the required condition.
8403 VerificationError("Value in stelem does not have the required type.");
8407 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8409 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8410 ThrowOnInvalidPointer(a);
8411 int len = a->GetNumComponents();
8413 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8414 NativeInt index = 0;
8415 if (indexCit == CORINFO_TYPE_INT)
8417 index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
8421 index = OpStackGet<NativeInt>(indexInd);
8424 if (index < 0 || index >= len) ThrowArrayBoundsException();
8426 if (typeFromTokCit == CORINFO_TYPE_CLASS)
8432 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8433 gc.a = ObjectToOBJECTREF(a);
8434 GCPROTECT_BEGIN(gc);
8435 if (gc.val != NULL &&
8436 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8437 COMPlusThrow(kArrayTypeMismatchException);
8438 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8445 void* destPtr = a->GetDataPtr() + index * sz;;
8447 if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
8449 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
8450 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
8451 // the value class contains GC pointers. We could do better...
8452 if (sz > sizeof(UINT64))
8454 // Large struct case: stack slot contains pointer...
8455 void* src = OpStackGet<void*>(valInd);
8456 CopyValueClassUnchecked(destPtr, src, valClsMT);
8457 LargeStructOperandStackPop(sz, src);
8461 // Small struct case -- is inline in operand stack.
8462 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
8470 *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
8473 *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
8476 *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
8479 *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
8488 void Interpreter::InitBlk()
8496 _ASSERTE(m_curStackHt >= 3);
8497 unsigned addrInd = m_curStackHt - 3;
8498 unsigned valInd = m_curStackHt - 2;
8499 unsigned sizeInd = m_curStackHt - 1;
8502 CorInfoType addrCIT = OpStackTypeGet(addrInd).ToCorInfoType();
8503 bool addrValidType = (addrCIT == CORINFO_TYPE_NATIVEINT || addrCIT == CORINFO_TYPE_BYREF);
8504 #if defined(HOST_AMD64)
8505 if (s_InterpreterLooseRules && addrCIT == CORINFO_TYPE_LONG)
8506 addrValidType = true;
8509 VerificationError("Addr of InitBlk must be native int or &.");
8511 CorInfoType valCIT = OpStackTypeGet(valInd).ToCorInfoType();
8512 if (valCIT != CORINFO_TYPE_INT)
8513 VerificationError("Value of InitBlk must be int");
8517 CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
8518 bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
8521 if (sizeCIT != CORINFO_TYPE_INT && !isLong)
8522 VerificationError("Size of InitBlk must be int");
8525 void* addr = OpStackGet<void*>(addrInd);
8526 ThrowOnInvalidPointer(addr);
8527 GCX_FORBID(); // addr is a potentially vulnerable byref.
8528 INT8 val = OpStackGet<INT8>(valInd);
8529 size_t size = (size_t) ((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
8530 memset(addr, val, size);
8532 m_curStackHt = addrInd;
8535 BarrierIfVolatile();
8538 void Interpreter::CpBlk()
8546 _ASSERTE(m_curStackHt >= 3);
8547 unsigned destInd = m_curStackHt - 3;
8548 unsigned srcInd = m_curStackHt - 2;
8549 unsigned sizeInd = m_curStackHt - 1;
8552 CorInfoType destCIT = OpStackTypeGet(destInd).ToCorInfoType();
8553 bool destValidType = (destCIT == CORINFO_TYPE_NATIVEINT || destCIT == CORINFO_TYPE_BYREF);
8554 #if defined(HOST_AMD64)
8555 if (s_InterpreterLooseRules && destCIT == CORINFO_TYPE_LONG)
8556 destValidType = true;
8560 VerificationError("Dest addr of CpBlk must be native int or &.");
8562 CorInfoType srcCIT = OpStackTypeGet(srcInd).ToCorInfoType();
8563 bool srcValidType = (srcCIT == CORINFO_TYPE_NATIVEINT || srcCIT == CORINFO_TYPE_BYREF);
8564 #if defined(HOST_AMD64)
8565 if (s_InterpreterLooseRules && srcCIT == CORINFO_TYPE_LONG)
8566 srcValidType = true;
8569 VerificationError("Src addr of CpBlk must be native int or &.");
8572 CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
8573 bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
8576 if (sizeCIT != CORINFO_TYPE_INT && !isLong)
8577 VerificationError("Size of CpBlk must be int");
8581 void* destAddr = OpStackGet<void*>(destInd);
8582 void* srcAddr = OpStackGet<void*>(srcInd);
8583 ThrowOnInvalidPointer(destAddr);
8584 ThrowOnInvalidPointer(srcAddr);
8585 GCX_FORBID(); // destAddr & srcAddr are potentially vulnerable byrefs.
8586 size_t size = (size_t)((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
8587 memcpyNoGCRefs(destAddr, srcAddr, size);
8589 m_curStackHt = destInd;
8592 BarrierIfVolatile();
8595 void Interpreter::Box()
8603 _ASSERTE(m_curStackHt >= 1);
8604 unsigned ind = m_curStackHt - 1;
8606 DWORD boxTypeAttribs = 0;
8609 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Box]);
8610 #endif // INTERP_TRACING
8612 CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Box));
8616 boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
8621 if (boxTypeAttribs & CORINFO_FLG_VALUECLASS)
8623 InterpreterType valIt = OpStackTypeGet(ind);
8626 if (valIt.IsLargeStruct(&m_interpCeeInfo))
8628 // Operand stack entry is pointer to the data.
8629 valPtr = OpStackGet<void*>(ind);
8633 // Operand stack entry *is* the data.
8634 size_t classSize = getClassSize(boxTypeClsHnd);
8635 valPtr = OpStackGetAddr(ind, classSize);
8638 TypeHandle th(boxTypeClsHnd);
8639 if (th.IsTypeDesc())
8641 COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
8644 MethodTable* pMT = th.AsMethodTable();
8647 Object* res = OBJECTREFToObject(pMT->Box(valPtr));
8651 // If we're popping a large struct off the operand stack, make sure we clean up.
8652 if (valIt.IsLargeStruct(&m_interpCeeInfo))
8654 LargeStructOperandStackPop(valIt.Size(&m_interpCeeInfo), valPtr);
8656 OpStackSet<Object*>(ind, res);
8657 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
8662 void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
8670 _ASSERTE_MSG(ind < m_curStackHt, "Precondition");
8673 _ASSERTE_MSG(m_interpCeeInfo.getClassAttribs(valCls) & CORINFO_FLG_VALUECLASS, "Precondition");
8675 _ASSERTE_MSG(OpStackTypeGet(ind).ToCorInfoType() == CORINFO_TYPE_BYREF, "Precondition");
8677 InterpreterType valIt = InterpreterType(&m_interpCeeInfo, valCls);
8679 void* valPtr = OpStackGet<void*>(ind);
8681 TypeHandle th(valCls);
8682 if (th.IsTypeDesc())
8683 COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
8685 MethodTable* pMT = th.AsMethodTable();
8688 Object* res = OBJECTREFToObject(pMT->Box(valPtr));
8692 OpStackSet<Object*>(ind, res);
8693 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
8698 void Interpreter::Unbox()
8706 _ASSERTE(m_curStackHt > 0);
8707 unsigned tos = m_curStackHt - 1;
8710 CorInfoType tosCIT = OpStackTypeGet(tos).ToCorInfoType();
8711 if (tosCIT != CORINFO_TYPE_CLASS)
8712 VerificationError("Unbox requires that TOS is an object pointer.");
8716 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Unbox]);
8717 #endif // INTERP_TRACING
8719 CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Unbox));
8721 CorInfoHelpFunc unboxHelper;
8725 unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
8729 Object* obj = OpStackGet<Object*>(tos);
8731 switch (unboxHelper)
8733 case CORINFO_HELP_UNBOX:
8735 ThrowOnInvalidPointer(obj);
8737 MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
8738 MethodTable* pMT2 = obj->GetMethodTable();
8740 if (pMT1->IsEquivalentTo(pMT2))
8742 res = OpStackGet<Object*>(tos)->UnBox();
8746 CorElementType type1 = pMT1->GetInternalCorElementType();
8747 CorElementType type2 = pMT2->GetInternalCorElementType();
8749 // we allow enums and their primitive type to be interchangeable
8752 if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
8753 (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
8755 res = OpStackGet<Object*>(tos)->UnBox();
8762 COMPlusThrow(kInvalidCastException);
8767 case CORINFO_HELP_UNBOX_NULLABLE:
8769 // For "unbox Nullable<T>", we need to create a new object (maybe in some temporary local
8770 // space (that we reuse every time we hit this IL instruction?), that gets reported to the GC,
8771 // maybe in the GC heap itself). That object will contain an embedded Nullable<T>. Then, we need to
8772 // get a byref to the data within the object.
8774 NYI_INTERP("Unhandled 'unbox' of Nullable<T>.");
8779 NYI_INTERP("Unhandled 'unbox' helper.");
8784 OpStackSet<void*>(tos, res);
8785 OpStackTypeSet(tos, InterpreterType(CORINFO_TYPE_BYREF));
8792 void Interpreter::Throw()
8800 _ASSERTE(m_curStackHt >= 1);
8802 // Note that we can't decrement the stack height here, since the operand stack
8803 // protects the thrown object. Nor do we need to, since the ostack will be cleared on
8804 // any catch within this method.
8805 unsigned exInd = m_curStackHt - 1;
8808 CorInfoType exCIT = OpStackTypeGet(exInd).ToCorInfoType();
8809 if (exCIT != CORINFO_TYPE_CLASS)
8811 VerificationError("Can only throw an object.");
8815 Object* obj = OpStackGet<Object*>(exInd);
8816 ThrowOnInvalidPointer(obj);
8818 OBJECTREF oref = ObjectToOBJECTREF(obj);
8819 if (!IsException(oref->GetMethodTable()))
8821 GCPROTECT_BEGIN(oref);
8822 WrapNonCompliantException(&oref);
8828 void Interpreter::Rethrow()
8836 OBJECTREF throwable = GetThread()->LastThrownObject();
8837 COMPlusThrow(throwable);
8840 void Interpreter::UnboxAny()
8848 _ASSERTE(m_curStackHt > 0);
8849 unsigned tos = m_curStackHt - 1;
8851 unsigned boxTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
8855 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_UnboxAny]);
8856 #endif // INTERP_TRACING
8858 CORINFO_RESOLVED_TOKEN boxTypeResolvedTok;
8859 CORINFO_CLASS_HANDLE boxTypeClsHnd;
8860 DWORD boxTypeAttribs = 0;
8864 ResolveToken(&boxTypeResolvedTok, boxTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_UnboxAny));
8865 boxTypeClsHnd = boxTypeResolvedTok.hClass;
8866 boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
8869 CorInfoType unboxCIT = OpStackTypeGet(tos).ToCorInfoType();
8870 if (unboxCIT != CORINFO_TYPE_CLASS)
8871 VerificationError("Type mismatch in UNBOXANY.");
8873 if ((boxTypeAttribs & CORINFO_FLG_VALUECLASS) == 0)
8875 Object* obj = OpStackGet<Object*>(tos);
8876 if (obj != NULL && !ObjIsInstanceOf(obj, TypeHandle(boxTypeClsHnd), TRUE))
8878 UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
8883 CorInfoHelpFunc unboxHelper;
8887 unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
8890 // Important that this *not* be factored out with the identical statement in the "if" branch:
8891 // delay read from GC-protected operand stack until after COOP-->PREEMP transition above.
8892 Object* obj = OpStackGet<Object*>(tos);
8894 switch (unboxHelper)
8896 case CORINFO_HELP_UNBOX:
8898 ThrowOnInvalidPointer(obj);
8900 MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
8901 MethodTable* pMT2 = obj->GetMethodTable();
8904 if (pMT1->IsEquivalentTo(pMT2))
8906 res = OpStackGet<Object*>(tos)->UnBox();
8910 if (pMT1->GetInternalCorElementType() == pMT2->GetInternalCorElementType() &&
8911 (pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
8912 (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
8914 res = OpStackGet<Object*>(tos)->UnBox();
8920 COMPlusThrow(kInvalidCastException);
8923 // As the ECMA spec says, the rest is like a "ldobj".
8924 LdObjValueClassWork(boxTypeClsHnd, tos, res);
8928 case CORINFO_HELP_UNBOX_NULLABLE:
8930 InterpreterType it = InterpreterType(&m_interpCeeInfo, boxTypeClsHnd);
8931 size_t sz = it.Size(&m_interpCeeInfo);
8932 if (sz > sizeof(INT64))
8934 void* destPtr = LargeStructOperandStackPush(sz);
8935 if (!Nullable::UnBox(destPtr, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
8937 COMPlusThrow(kInvalidCastException);
8939 OpStackSet<void*>(tos, destPtr);
8944 if (!Nullable::UnBox(&dest, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
8946 COMPlusThrow(kInvalidCastException);
8948 OpStackSet<INT64>(tos, dest);
8950 OpStackTypeSet(tos, it.StackNormalize());
8955 NYI_INTERP("Unhandled 'unbox.any' helper.");
8960 void Interpreter::LdLen()
8968 _ASSERTE(m_curStackHt >= 1);
8969 unsigned arrInd = m_curStackHt - 1;
8971 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8975 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8976 ThrowOnInvalidPointer(a);
8977 int len = a->GetNumComponents();
8979 OpStackSet<NativeUInt>(arrInd, NativeUInt(len));
8980 // The ECMA spec says that the type of the length value is NATIVEUINT, but this
8981 // doesn't make any sense -- unsigned types are not stack-normalized. So I'm
8982 // using NATIVEINT, to get the width right.
8983 OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
8987 void Interpreter::DoCall(bool virtualCall)
8989 #if INTERP_DYNAMIC_CONTRACTS
8996 // Dynamic contract occupies too much stack.
8997 STATIC_CONTRACT_THROWS;
8998 STATIC_CONTRACT_GC_TRIGGERS;
8999 STATIC_CONTRACT_MODE_COOPERATIVE;
9003 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Call]);
9004 #endif // INTERP_TRACING
9006 DoCallWork(virtualCall);
9011 CORINFO_CONTEXT_HANDLE InterpreterMethodInfo::GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg)
9013 // If the caller has a generic argument, then we need to get the exact methodContext.
9014 // There are several possibilities that lead to a generic argument:
9015 // 1) Static method of generic class: generic argument is the method table of the class.
9016 // 2) generic method of a class: generic argument is the precise MethodDesc* of the method.
9017 if (GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
9019 _ASSERTE(GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>() || GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>());
9020 if (GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>())
9022 return MAKE_METHODCONTEXT(reinterpret_cast<CORINFO_METHOD_HANDLE>(genericsCtxtArg));
9026 MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
9027 MethodTable* contextClass = reinterpret_cast<MethodTable*>(genericsCtxtArg)->GetMethodTableMatchingParentClass(methodClass);
9028 return MAKE_CLASSCONTEXT(contextClass);
9031 // TODO: This condition isn't quite right. If the actual class is a subtype of the declaring type of the method,
9032 // then it might be in another module, the scope and context won't agree.
9033 else if (GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>()
9034 && !GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>()
9035 && GetFlag<InterpreterMethodInfo::Flag_hasThisArg>()
9036 && GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>() && thisArg != NULL)
9038 MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
9039 MethodTable* contextClass = thisArg->GetMethodTable()->GetMethodTableMatchingParentClass(methodClass);
9040 return MAKE_CLASSCONTEXT(contextClass);
9044 return MAKE_METHODCONTEXT(m_method);
9048 void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_TOKEN* methTokPtr, CORINFO_CALL_INFO* callInfoPtr)
9050 #if INTERP_DYNAMIC_CONTRACTS
9057 // Dynamic contract occupies too much stack.
9058 STATIC_CONTRACT_THROWS;
9059 STATIC_CONTRACT_GC_TRIGGERS;
9060 STATIC_CONTRACT_MODE_COOPERATIVE;
9063 #if INTERP_ILCYCLE_PROFILE
9066 unsigned __int64 callStartCycles;
9067 bool b = CycleTimer::GetThreadCyclesS(&callStartCycles); _ASSERTE(b);
9068 unsigned __int64 callStartExemptCycles = m_exemptCycles;
9070 #endif // INTERP_ILCYCLE_PROFILE
9073 InterlockedIncrement(&s_totalInterpCalls);
9074 #endif // INTERP_TRACING
9075 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
9077 // It's possible for an IL method to push a capital-F Frame. If so, we pop it and save it;
9078 // we'll push it back on after our GCPROTECT frame is popped.
9079 Frame* ilPushedFrame = NULL;
9081 // We can't protect "thisArg" with a GCPROTECT, because this pushes a Frame, and there
9082 // exist managed methods that push (and pop) Frames -- so that the Frame chain does not return
9083 // to its original state after a call. Therefore, we can't have a Frame on the stack over the duration
9084 // of a call. (I assume that any method that calls a Frame-pushing IL method performs a matching
9085 // call to pop that Frame before the caller method completes. If this were not true, if one method could push
9086 // a Frame, but defer the pop to its caller, then we could *never* use a Frame in the interpreter, and
9087 // our implementation plan would be doomed.)
9088 _ASSERTE(m_callThisArg == NULL);
9089 m_callThisArg = thisArg;
9091 // Have we already cached a MethodDescCallSite for this call? (We do this only in loops
9092 // in the current execution).
9093 unsigned iloffset = CurOffset();
9094 CallSiteCacheData* pCscd = NULL;
9095 if (s_InterpreterUseCaching) pCscd = GetCachedCallInfo(iloffset);
9097 // If this is true, then we should not cache this call site.
9100 CORINFO_RESOLVED_TOKEN methTok;
9101 CORINFO_CALL_INFO callInfo;
9102 MethodDesc* methToCall = NULL;
9103 CORINFO_CLASS_HANDLE exactClass = NULL;
9104 CORINFO_SIG_INFO_SMALL sigInfo;
9108 methToCall = pCscd->m_pMD;
9109 sigInfo = pCscd->m_sigInfo;
9111 doNotCache = true; // We already have a cache entry.
9115 doNotCache = false; // Until we determine otherwise.
9116 if (callInfoPtr == NULL)
9120 // callInfoPtr and methTokPtr must either both be NULL, or neither.
9121 _ASSERTE(methTokPtr == NULL);
9123 methTokPtr = &methTok;
9124 ResolveToken(methTokPtr, tok, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
9125 OPCODE opcode = (OPCODE)(*m_ILCodePtr);
9127 m_interpCeeInfo.getCallInfo(methTokPtr,
9128 m_constrainedFlag ? & m_constrainedResolvedToken : NULL,
9129 m_methInfo->m_method,
9130 //this is how impImportCall invokes getCallInfo
9131 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM,
9132 CORINFO_CALLINFO_SECURITYCHECKS),
9133 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
9134 : CORINFO_CALLINFO_NONE),
9136 #if INTERP_ILCYCLE_PROFILE
9140 unsigned __int64 callEndCycles;
9141 b = CycleTimer::GetThreadCyclesS(&callEndCycles); _ASSERTE(b);
9142 unsigned __int64 delta = (callEndCycles - callStartCycles);
9143 delta -= (m_exemptCycles - callStartExemptCycles);
9144 s_callCycles += delta;
9148 #endif // INTERP_ILCYCLE_PROFILE
9150 callInfoPtr = &callInfo;
9152 _ASSERTE(!callInfoPtr->exactContextNeedsRuntimeLookup);
9154 methToCall = reinterpret_cast<MethodDesc*>(methTok.hMethod);
9155 exactClass = methTok.hClass;
9159 // callInfoPtr and methTokPtr must either both be NULL, or neither.
9160 _ASSERTE(methTokPtr != NULL);
9162 _ASSERTE(!callInfoPtr->exactContextNeedsRuntimeLookup);
9164 methToCall = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
9165 exactClass = methTokPtr->hClass;
9168 // We used to take the sigInfo from the callInfo here, but that isn't precise, since
9169 // we may have made "methToCall" more precise wrt generics than the method handle in
9170 // the callinfo. So look up th emore precise signature.
9173 CORINFO_SIG_INFO sigInfoFull;
9174 m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull, nullptr);
9175 sigInfo.retTypeClass = sigInfoFull.retTypeClass;
9176 sigInfo.numArgs = sigInfoFull.numArgs;
9177 sigInfo.callConv = sigInfoFull.callConv;
9178 sigInfo.retType = sigInfoFull.retType;
9181 // Point A in our cycle count.
9184 // TODO: enable when NamedIntrinsic is available to interpreter
9187 // Is the method an intrinsic? If so, and if it's one we've written special-case code for
9188 // handle intrinsically.
9189 NamedIntrinsic intrinsicName;
9192 intrinsicName = getIntrinsicName(CORINFO_METHOD_HANDLE(methToCall), nullptr);
9196 if (intrinsicName == NI_Illegal)
9197 InterlockedIncrement(&s_totalInterpCallsToIntrinsics);
9198 #endif // INTERP_TRACING
9199 bool didIntrinsic = false;
9200 if (!m_constrainedFlag)
9202 switch (intrinsicId)
9205 case NI_System_StubHelpers_GetStubContext:
9206 OpStackSet<void*>(m_curStackHt, GetStubContext());
9207 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
9208 m_curStackHt++; didIntrinsic = true;
9210 #endif // INTERP_ILSTUBS
9213 InterlockedIncrement(&s_totalInterpCallsToIntrinsicsUnhandled);
9214 #endif // INTERP_TRACING
9218 // Plus some other calls that we're going to treat "like" intrinsics...
9219 if (methToCall == CoreLibBinder::GetMethod(METHOD__STUBHELPERS__SET_LAST_ERROR))
9221 // If we're interpreting a method that calls "SetLastError", it's very likely that the call(i) whose
9222 // error we're trying to capture was performed with MethodDescCallSite machinery that itself trashes
9223 // the last error. We solve this by saving the last error in a special interpreter-specific field of
9224 // "Thread" in that case, and essentially implement SetLastError here, taking that field as the
9225 // source for the last error.
9226 Thread* thrd = GetThread();
9227 thrd->m_dwLastError = thrd->m_dwLastErrorInterp;
9228 didIntrinsic = true;
9231 // TODO: The following check for hardware intrinsics is not a production-level
9232 // solution and may produce incorrect results.
9233 static ConfigDWORD s_InterpreterHWIntrinsicsIsSupportedFalse;
9234 if (s_InterpreterHWIntrinsicsIsSupportedFalse.val(CLRConfig::INTERNAL_InterpreterHWIntrinsicsIsSupportedFalse) != 0)
9238 // Hardware intrinsics are recognized by name.
9239 const char* namespaceName = NULL;
9240 const char* className = NULL;
9241 const char* methodName = m_interpCeeInfo.getMethodNameFromMetadata((CORINFO_METHOD_HANDLE)methToCall, &className, &namespaceName, NULL);
9243 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9244 strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0 &&
9245 #elif defined(TARGET_ARM64)
9246 strcmp(namespaceName, "System.Runtime.Intrinsics.Arm") == 0 &&
9247 #endif // defined(TARGET_X86) || defined(TARGET_AMD64)
9248 strcmp(methodName, "get_IsSupported") == 0
9253 didIntrinsic = true;
9258 // Check for the simd class...
9259 _ASSERTE(exactClass != NULL);
9261 bool isIntrinsicType = m_interpCeeInfo.isIntrinsicType(exactClass);
9263 if (isIntrinsicType)
9265 // SIMD intrinsics are recognized by name.
9266 const char* namespaceName = NULL;
9267 const char* className = NULL;
9268 const char* methodName = m_interpCeeInfo.getMethodNameFromMetadata((CORINFO_METHOD_HANDLE)methToCall, &className, &namespaceName, NULL);
9269 if ((strcmp(methodName, "get_IsHardwareAccelerated") == 0) && (strcmp(className, "Vector") == 0) && (strcmp(namespaceName, "System.Numerics") == 0))
9272 DoSIMDHwAccelerated();
9273 didIntrinsic = true;
9279 // Must block caching or we lose easy access to the class
9282 #endif // FEATURE_SIMD
9288 if (s_InterpreterUseCaching && !doNotCache)
9290 // Cache the token resolution result...
9291 pCscd = new CallSiteCacheData(methToCall, sigInfo);
9292 CacheCallInfo(iloffset, pCscd);
9294 // Now we can return.
9299 // Handle other simple special cases:
9301 #if FEATURE_INTERPRETER_DEADSIMPLE_OPT
9302 #ifndef DACCESS_COMPILE
9303 // Dead simple static getters.
9304 InterpreterMethodInfo* calleeInterpMethInfo;
9305 if (GetMethodHandleToInterpMethInfoPtrMap()->Lookup(CORINFO_METHOD_HANDLE(methToCall), &calleeInterpMethInfo))
9307 if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>())
9309 if (methToCall->IsStatic())
9315 ILOffsetToItemCache* calleeCache;
9317 Object* thisArg = OpStackGet<Object*>(m_curStackHt-1);
9319 // We pass NULL for the generic context arg, because a dead simple getter takes none, by definition.
9320 calleeCache = calleeInterpMethInfo->GetCacheForCall(thisArg, /*genericsContextArg*/NULL);
9322 // We've interpreted the getter at least once, so the cache for *some* generics context is populated -- but maybe not
9323 // this one. We're hoping that it usually is.
9324 if (calleeCache != NULL)
9326 CachedItem cachedItem;
9327 unsigned offsetOfLd;
9328 if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>())
9329 offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
9331 offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
9333 bool b = calleeCache->GetItem(offsetOfLd, cachedItem);
9334 _ASSERTE_MSG(b, "If the cache exists for this generic context, it should an entry for the LdFld.");
9335 _ASSERTE_MSG(cachedItem.m_tag == CIK_InstanceField, "If it's there, it should be an instance field cache.");
9336 LdFld(cachedItem.m_value.m_instanceField);
9338 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
9339 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGettersShortCircuited);
9340 #endif // INTERP_TRACING
9346 #endif // DACCESS_COMPILE
9347 #endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
9349 unsigned totalSigArgs;
9350 CORINFO_VARARGS_HANDLE vaSigCookie = nullptr;
9351 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
9352 (sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
9355 CORINFO_SIG_INFO sig;
9356 m_interpCeeInfo.findCallSiteSig(m_methInfo->m_module, methTokPtr->token, MAKE_METHODCONTEXT(m_methInfo->m_method), &sig);
9357 sigInfo.retTypeClass = sig.retTypeClass;
9358 sigInfo.numArgs = sig.numArgs;
9359 sigInfo.callConv = sig.callConv;
9360 sigInfo.retType = sig.retType;
9361 // Adding 'this' pointer because, numArgs doesn't include the this pointer.
9362 totalSigArgs = sigInfo.numArgs + sigInfo.hasThis();
9364 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
9366 Module* module = GetModule(sig.scope);
9367 vaSigCookie = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig.pSig, sig.cbSig)));
9373 totalSigArgs = sigInfo.totalILArgs();
9376 // Note that "totalNativeArgs()" includes space for ret buff arg.
9377 unsigned nSlots = totalSigArgs + 1;
9378 if (sigInfo.hasTypeArg()) nSlots++;
9379 if (sigInfo.isVarArg()) nSlots++;
9381 DelegateCtorArgs ctorData;
9382 // If any of these are non-null, they will be pushed as extra arguments (see the code below).
9383 ctorData.pArg3 = NULL;
9384 ctorData.pArg4 = NULL;
9385 ctorData.pArg5 = NULL;
9387 // Since we make "doNotCache" true below, well never have a non-null "pCscd" for a delegate
9388 // constructor. But we have to check for a cached method first, since callInfoPtr may be null in the cached case.
9389 if (pCscd == NULL && callInfoPtr->classFlags & CORINFO_FLG_DELEGATE && callInfoPtr->methodFlags & CORINFO_FLG_CONSTRUCTOR)
9391 // We won't cache this case.
9394 _ASSERTE_MSG(!sigInfo.hasTypeArg(), "I assume that this isn't possible.");
9397 ctorData.pMethod = methToCall;
9399 // Second argument to delegate constructor will be code address of the function the delegate wraps.
9400 _ASSERTE(TOSIsPtr() && OpStackTypeGet(m_curStackHt-1).ToCorInfoType() != CORINFO_TYPE_BYREF);
9401 CORINFO_METHOD_HANDLE targetMethodHnd = GetFunctionPointerStack()[m_curStackHt-1];
9402 _ASSERTE(targetMethodHnd != NULL);
9403 CORINFO_METHOD_HANDLE alternateCtorHnd = m_interpCeeInfo.GetDelegateCtor(reinterpret_cast<CORINFO_METHOD_HANDLE>(methToCall), methTokPtr->hClass, targetMethodHnd, &ctorData);
9404 MethodDesc* alternateCtor = reinterpret_cast<MethodDesc*>(alternateCtorHnd);
9405 if (alternateCtor != methToCall)
9407 methToCall = alternateCtor;
9409 // Translate the method address argument from a method handle to the actual callable code address.
9410 void* val = (void *)((MethodDesc *)targetMethodHnd)->GetMultiCallableAddrOfCode();
9411 // Change the method argument to the code pointer.
9412 OpStackSet<void*>(m_curStackHt-1, val);
9414 // Now if there are extra arguments, add them to the number of slots; we'll push them on the
9416 if (ctorData.pArg3) nSlots++;
9417 if (ctorData.pArg4) nSlots++;
9418 if (ctorData.pArg5) nSlots++;
9422 // Make sure that the operand stack has the required number of arguments.
9423 // (Note that this is IL args, not native.)
9426 // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
9427 // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
9428 // is provided by an allocation (the call is to a constructor).
9429 unsigned totalArgsOnILStack = totalSigArgs;
9430 if (m_callThisArg != NULL)
9432 _ASSERTE(totalArgsOnILStack > 0);
9433 totalArgsOnILStack--;
9436 #if defined(FEATURE_HFA)
9437 // Does the callee have an HFA return type?
9438 unsigned HFAReturnArgSlots = 0;
9442 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS
9443 && (m_interpCeeInfo.getHFAType(sigInfo.retTypeClass) != CORINFO_HFA_ELEM_NONE)
9444 && (sigInfo.getCallConv() & CORINFO_CALLCONV_VARARG) == 0)
9446 HFAReturnArgSlots = getClassSize(sigInfo.retTypeClass);
9447 // Round up to a multiple of double size.
9448 HFAReturnArgSlots = (HFAReturnArgSlots + sizeof(ARG_SLOT) - 1) / sizeof(ARG_SLOT);
9451 #elif defined(UNIX_AMD64_ABI)
9452 unsigned HasTwoSlotBuf = sigInfo.retType == CORINFO_TYPE_VALUECLASS &&
9453 getClassSize(sigInfo.retTypeClass) == 16;
9458 const unsigned LOCAL_ARG_SLOTS = 8;
9459 ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
9460 InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
9463 InterpreterType* argTypes;
9464 #if defined(HOST_X86)
9465 unsigned totalArgSlots = nSlots;
9466 #elif defined(HOST_ARM) || defined(HOST_ARM64)
9467 // ARM64TODO: Verify that the following statement is correct for ARM64.
9468 unsigned totalArgSlots = nSlots + HFAReturnArgSlots;
9469 #elif defined(HOST_AMD64)
9470 unsigned totalArgSlots = nSlots;
9471 #elif defined(HOST_LOONGARCH64)
9472 unsigned totalArgSlots = nSlots;
9473 #elif defined(HOST_RISCV64)
9474 unsigned totalArgSlots = nSlots;
9476 #error "unsupported platform"
9479 if (totalArgSlots <= LOCAL_ARG_SLOTS)
9481 args = &localArgs[0];
9482 argTypes = &localArgTypes[0];
9486 args = (ARG_SLOT*)_alloca(totalArgSlots * sizeof(ARG_SLOT));
9487 #if defined(HOST_ARM)
9488 // The HFA return buffer, if any, is assumed to be at a negative
9489 // offset from the IL arg pointer, so adjust that pointer upward.
9490 args = args + HFAReturnArgSlots;
9491 #endif // defined(HOST_ARM)
9492 argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
9494 // Make sure that we don't scan any of these until we overwrite them with
9495 // the real types of the arguments.
9496 InterpreterType undefIt(CORINFO_TYPE_UNDEF);
9497 for (unsigned i = 0; i < nSlots; i++) argTypes[i] = undefIt;
9499 // GC-protect the argument array (as byrefs).
9500 m_args = args; m_argsSize = nSlots; m_argTypes = argTypes;
9502 // This is the index into the "args" array (where we copy the value to).
9505 // The operand stack index of the first IL argument.
9506 _ASSERTE(m_curStackHt >= totalArgsOnILStack);
9507 int argsBase = m_curStackHt - totalArgsOnILStack;
9509 // Current on-stack argument index.
9512 // We do "this" -- in the case of a constructor, we "shuffle" the "m_callThisArg" argument in as the first
9513 // argument -- it isn't on the IL operand stack.
9515 if (m_constrainedFlag)
9517 _ASSERTE(m_callThisArg == NULL); // "m_callThisArg" non-null only for .ctor, which are not callvirts.
9519 CorInfoType argCIT = OpStackTypeGet(argsBase + arg).ToCorInfoType();
9520 if (argCIT != CORINFO_TYPE_BYREF)
9521 VerificationError("This arg of constrained call must be managed pointer.");
9523 // We only cache for the CORINFO_NO_THIS_TRANSFORM case, so we may assume that if we have a cached call site,
9524 // there's no thisTransform to perform.
9527 switch (callInfoPtr->thisTransform)
9529 case CORINFO_NO_THIS_TRANSFORM:
9530 // It is a constrained call on a method implemented by a value type; this is already the proper managed pointer.
9533 case CORINFO_DEREF_THIS:
9537 DWORD clsAttribs = m_interpCeeInfo.getClassAttribs(m_constrainedResolvedToken.hClass);
9538 _ASSERTE((clsAttribs & CORINFO_FLG_VALUECLASS) == 0);
9542 // As per the spec, dereference the byref to the "this" pointer, and substitute it as the new "this" pointer.
9544 Object** objPtrPtr = OpStackGet<Object**>(argsBase + arg);
9545 OpStackSet<Object*>(argsBase + arg, *objPtrPtr);
9546 OpStackTypeSet(argsBase + arg, InterpreterType(CORINFO_TYPE_CLASS));
9551 case CORINFO_BOX_THIS:
9552 // This is the case where the call is to a virtual method of Object the given
9553 // struct class does not override -- the struct must be boxed, so that the
9554 // method can be invoked as a virtual.
9555 BoxStructRefAt(argsBase + arg, m_constrainedResolvedToken.hClass);
9560 exactClass = m_constrainedResolvedToken.hClass;
9563 DWORD exactClassAttribs = m_interpCeeInfo.getClassAttribs(exactClass);
9564 // If the constraint type is a value class, then it is the exact class (which will be the
9565 // "owner type" in the MDCS below.) If it is not, leave it as the (precise) interface method.
9566 if (exactClassAttribs & CORINFO_FLG_VALUECLASS)
9568 MethodTable* exactClassMT = GetMethodTableFromClsHnd(exactClass);
9569 // Find the method on exactClass corresponding to methToCall.
9570 methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(
9571 reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod), // pPrimaryMD
9572 exactClassMT, // pExactMT
9573 FALSE, // forceBoxedEntryPoint
9574 methToCall->GetMethodInstantiation(), // methodInst
9575 FALSE); // allowInstParam
9579 exactClass = methTokPtr->hClass;
9584 // We've consumed the constraint, so reset the flag.
9585 m_constrainedFlag = false;
9590 if (callInfoPtr->methodFlags & CORINFO_FLG_STATIC)
9592 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
9593 EnsureClassInit(pMD->GetMethodTable());
9599 // We must do anything that might make a COOP->PREEMP transition before copying arguments out of the
9600 // operand stack (where they are GC-protected) into the args array (where they are not).
9602 const char* clsOfMethToCallName;;
9603 const char* methToCallName = NULL;
9606 methToCallName = m_interpCeeInfo.getMethodNameFromMetadata(CORINFO_METHOD_HANDLE(methToCall), &clsOfMethToCallName, NULL, NULL);
9609 if (strncmp(methToCallName, "get_", 4) == 0)
9611 InterlockedIncrement(&s_totalInterpCallsToGetters);
9613 if (IsDeadSimpleGetter(&m_interpCeeInfo, methToCall, &offsetOfLd))
9615 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
9618 else if (strncmp(methToCallName, "set_", 4) == 0)
9620 InterlockedIncrement(&s_totalInterpCallsToSetters);
9622 #endif // INTERP_TRACING
9624 // Only do this check on the first call, since it should be the same each time.
9627 // Ensure that any value types used as argument types are loaded. This property is checked
9628 // by the MethodDescCall site mechanisms. Since enums are freely convertible with their underlying
9629 // integer type, this is at least one case where a caller may push a value convertible to a value type
9630 // without any code having caused the value type to be loaded. This is DEBUG-only because if the callee
9631 // the integer-type value as the enum value type, it will have loaded the value type.
9632 MetaSig ms(methToCall);
9633 CorElementType argType;
9634 while ((argType = ms.NextArg()) != ELEMENT_TYPE_END)
9636 if (argType == ELEMENT_TYPE_VALUETYPE)
9638 TypeHandle th = ms.GetLastTypeHandleThrowing(ClassLoader::LoadTypes);
9639 CONSISTENCY_CHECK(th.CheckFullyLoaded());
9640 CONSISTENCY_CHECK(th.IsRestored());
9646 // CYCLE PROFILE: BEFORE ARG PROCESSING.
9648 if (sigInfo.hasThis())
9650 if (m_callThisArg != NULL)
9652 if (size_t(m_callThisArg) == 0x1)
9654 args[curArgSlot] = NULL;
9658 args[curArgSlot] = PtrToArgSlot(m_callThisArg);
9660 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_BYREF);
9664 args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
9665 argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
9668 // AV -> NullRef translation is NYI for the interpreter,
9669 // so we should manually check and throw the correct exception.
9670 if (args[curArgSlot] == NULL)
9672 // If we're calling a constructor, we bypass this check since the runtime
9673 // should have thrown OOM if it was unable to allocate an instance.
9674 if (m_callThisArg == NULL)
9676 _ASSERTE(!methToCall->IsStatic());
9677 ThrowNullPointerException();
9679 // ...except in the case of strings, which are both
9680 // allocated and initialized by their special constructor.
9683 _ASSERTE(methToCall->IsCtor() && methToCall->GetMethodTable()->IsString());
9689 // This is the argument slot that will be used to hold the return value.
9690 // In UNIX_AMD64_ABI, return type may have need tow ARG_SLOTs.
9691 ARG_SLOT retVals[2] = {0, 0};
9692 #if !defined(HOST_ARM) && !defined(UNIX_AMD64_ABI)
9693 _ASSERTE (NUMBER_RETURNVALUE_SLOTS == 1);
9696 // If the return type is a structure, then these will be initialized.
9697 CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
9698 InterpreterType retTypeIt;
9699 size_t retTypeSz = 0;
9701 // If non-null, space allocated to hold a large struct return value. Should be deleted later.
9702 // (I could probably optimize this pop all the arguments first, then allocate space for the return value
9703 // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
9704 // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
9705 BYTE* pLargeStructRetVal = NULL;
9707 // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
9708 // otherwise, we'll dynamically allocate memory for it.
9709 ARG_SLOT smallStructRetVal = 0;
9711 // We should have no return buffer temp space registered here...unless this is a constructor, in which
9712 // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
9713 _ASSERTE_MSG((pCscd != NULL) || sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
9715 // Is it the return value a struct with a ret buff?
9716 _ASSERTE_MSG(methToCall != NULL, "assumption");
9717 bool hasRetBuffArg = false;
9718 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS || sigInfo.retType == CORINFO_TYPE_REFANY)
9720 hasRetBuffArg = !!methToCall->HasRetBuffArg();
9721 retTypeClsHnd = sigInfo.retTypeClass;
9723 MetaSig ms(methToCall);
9726 // On ARM, if there's an HFA return type, we must also allocate a return buffer, since the
9727 // MDCS calling convention requires it.
9729 #if defined(HOST_ARM)
9730 || HFAReturnArgSlots > 0
9731 #endif // defined(HOST_ARM)
9734 _ASSERTE(retTypeClsHnd != NULL);
9735 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
9736 retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
9738 #if defined(HOST_ARM)
9739 if (HFAReturnArgSlots > 0)
9741 args[curArgSlot] = PtrToArgSlot(args - HFAReturnArgSlots);
9744 #endif // defined(HOST_ARM)
9746 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
9748 size_t retBuffSize = retTypeSz;
9749 // If the target architecture can sometimes return a struct in several registers,
9750 // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
9751 // It will then copy *all* of this into the return buffer area we allocate. So make sure
9752 // we allocate at least that much.
9753 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
9754 retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
9755 #endif // ENREGISTERED_RETURNTYPE_MAXSIZE
9756 pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
9757 // Clear this in case a GC happens.
9758 for (unsigned i = 0; i < retTypeSz; i++) pLargeStructRetVal[i] = 0;
9759 // Register this as location needing GC.
9760 m_structRetValTempSpace = pLargeStructRetVal;
9761 // Set it as the return buffer.
9762 args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
9766 // Clear this in case a GC happens.
9767 smallStructRetVal = 0;
9768 // Register this as location needing GC.
9769 m_structRetValTempSpace = &smallStructRetVal;
9770 // Set it as the return buffer.
9771 args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
9773 m_structRetValITPtr = &retTypeIt;
9774 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9779 // The struct type might "normalize" to a primitive type.
9780 if (retTypeClsHnd == NULL)
9782 retTypeIt = InterpreterType(CEEInfo::asCorInfoType(ms.GetReturnTypeNormalized()));
9786 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
9791 if (((sigInfo.callConv & CORINFO_CALLCONV_VARARG) != 0) && sigInfo.isVarArg())
9793 _ASSERTE(vaSigCookie != nullptr);
9794 args[curArgSlot] = PtrToArgSlot(vaSigCookie);
9795 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9801 if (sigInfo.hasTypeArg())
9804 // We will find the instantiating stub for the method, and call that instead.
9805 CORINFO_SIG_INFO sigInfoFull;
9806 Instantiation methodInst = methToCall->GetMethodInstantiation();
9807 BOOL fNeedUnboxingStub = virtualCall && TypeHandle(exactClass).IsValueType() && methToCall->IsVirtual();
9808 methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(methToCall,
9809 TypeHandle(exactClass).GetMethodTable(), fNeedUnboxingStub, methodInst, FALSE, TRUE);
9810 m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull, nullptr);
9811 sigInfo.retTypeClass = sigInfoFull.retTypeClass;
9812 sigInfo.numArgs = sigInfoFull.numArgs;
9813 sigInfo.callConv = sigInfoFull.callConv;
9814 sigInfo.retType = sigInfoFull.retType;
9817 if (sigInfo.hasTypeArg())
9819 // If we still have a type argument, we're calling an ArrayOpStub and need to pass the array TypeHandle.
9820 _ASSERTE(methToCall->IsArray());
9822 args[curArgSlot] = PtrToArgSlot(exactClass);
9823 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9828 // Now we do the non-this arguments.
9829 size_t largeStructSpaceToPop = 0;
9830 for (; arg < totalArgsOnILStack; arg++)
9832 InterpreterType argIt = OpStackTypeGet(argsBase + arg);
9833 size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
9837 args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
9840 args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
9843 args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
9849 void* srcPtr = OpStackGet<void*>(argsBase + arg);
9850 args[curArgSlot] = PtrToArgSlot(srcPtr);
9851 if (!IsInLargeStructLocalArea(srcPtr))
9852 largeStructSpaceToPop += sz;
9856 args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
9860 argTypes[curArgSlot] = argIt;
9866 args[curArgSlot] = PtrToArgSlot(ctorData.pArg3);
9867 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9872 args[curArgSlot] = PtrToArgSlot(ctorData.pArg4);
9873 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9878 args[curArgSlot] = PtrToArgSlot(ctorData.pArg5);
9879 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9883 // CYCLE PROFILE: AFTER ARG PROCESSING.
9885 Thread* thr = GetThread();
9887 Object** thisArgHnd = NULL;
9888 ARG_SLOT nullThisArg = NULL;
9889 if (sigInfo.hasThis())
9891 if (m_callThisArg != NULL)
9893 if (size_t(m_callThisArg) == 0x1)
9895 thisArgHnd = reinterpret_cast<Object**>(&nullThisArg);
9899 thisArgHnd = reinterpret_cast<Object**>(&m_callThisArg);
9904 thisArgHnd = OpStackGetAddr<Object*>(argsBase);
9908 Frame* topFrameBefore = thr->GetFrame();
9910 #if INTERP_ILCYCLE_PROFILE
9911 unsigned __int64 startCycles;
9912 #endif // INTERP_ILCYCLE_PROFILE
9914 // CYCLE PROFILE: BEFORE MDCS CREATION.
9916 PCODE target = NULL;
9917 MethodDesc *exactMethToCall = methToCall;
9919 // Determine the target of virtual calls.
9920 if (virtualCall && methToCall->IsVtableMethod())
9924 _ASSERTE(thisArgHnd != NULL);
9925 OBJECTREF objRef = ObjectToOBJECTREF(*thisArgHnd);
9926 GCPROTECT_BEGIN(objRef);
9927 pCode = methToCall->GetMultiCallableAddrOfVirtualizedCode(&objRef, methToCall->GetMethodTable());
9930 exactMethToCall = Entry2MethodDesc(pCode, objRef->GetMethodTable());
9933 // Compile the target in advance of calling.
9934 if (exactMethToCall->IsPointingToPrestub())
9936 MethodTable* dispatchingMT = NULL;
9937 if (exactMethToCall->IsVtableMethod())
9939 _ASSERTE(thisArgHnd != NULL);
9940 dispatchingMT = (*thisArgHnd)->GetMethodTable();
9943 target = exactMethToCall->DoPrestub(dispatchingMT);
9947 target = exactMethToCall->GetMethodEntryPoint();
9950 // If we're interpreting the method, simply call it directly.
9951 if (InterpretationStubToMethodInfo(target) == exactMethToCall)
9953 _ASSERTE(!exactMethToCall->IsILStub());
9954 InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(exactMethToCall));
9955 _ASSERTE(methInfo != NULL);
9956 #if INTERP_ILCYCLE_PROFILE
9957 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
9958 #endif // INTERP_ILCYCLE_PROFILE
9959 retVals[0] = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
9960 pCscd = NULL; // Nothing to cache.
9964 MetaSig msig(exactMethToCall);
9965 // We've already resolved the virtual call target above, so there is no need to do it again.
9966 MethodDescCallSite mdcs(exactMethToCall, &msig, target);
9967 #if INTERP_ILCYCLE_PROFILE
9968 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
9969 #endif // INTERP_ILCYCLE_PROFILE
9971 #if defined(UNIX_AMD64_ABI)
9972 mdcs.CallTargetWorker(args, retVals, HasTwoSlotBuf ? 16: 8);
9974 mdcs.CallTargetWorker(args, retVals, 8);
9979 // We will do a check at the end to determine whether to cache pCscd, to set
9980 // to NULL here to make sure we don't.
9985 // For now, we won't cache virtual calls to virtual methods.
9986 // TODO: fix this somehow.
9987 if (virtualCall && (callInfoPtr->methodFlags & CORINFO_FLG_VIRTUAL)) doNotCache = true;
9989 if (s_InterpreterUseCaching && !doNotCache)
9991 // We will add this to the cache later; the locking provokes a GC,
9992 // and "retVal" is vulnerable.
9993 pCscd = new CallSiteCacheData(exactMethToCall, sigInfo);
9997 #if INTERP_ILCYCLE_PROFILE
9998 unsigned __int64 endCycles;
9999 bool b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
10000 m_exemptCycles += (endCycles - startCycles);
10001 #endif // INTERP_ILCYCLE_PROFILE
10003 // retVal is now vulnerable.
10006 // Some managed methods, believe it or not, can push capital-F Frames on the Frame chain.
10007 // If this happens, executing the EX_CATCH below will pop it, which is bad.
10008 // So detect that case, pop the explicitly-pushed frame, and push it again after the EX_CATCH.
10009 // (Asserting that there is only 1 such frame!)
10010 if (thr->GetFrame() != topFrameBefore)
10012 ilPushedFrame = thr->GetFrame();
10013 if (ilPushedFrame != NULL)
10015 ilPushedFrame->Pop(thr);
10016 if (thr->GetFrame() != topFrameBefore)
10018 // This wasn't an IL-pushed frame, so restore.
10019 ilPushedFrame->Push(thr);
10020 ilPushedFrame = NULL;
10026 // retVal is still vulnerable.
10031 // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
10032 m_curStackHt -= totalArgsOnILStack;
10033 // We've already checked that "largeStructSpaceToPop
10034 LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
10036 if (size_t(m_callThisArg) == 0x1)
10038 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
10039 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVals[0]));
10040 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
10043 else if (sigInfo.retType != CORINFO_TYPE_VOID)
10045 switch (sigInfo.retType)
10047 case CORINFO_TYPE_BOOL:
10048 case CORINFO_TYPE_BYTE:
10049 OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVals[0]));
10051 case CORINFO_TYPE_UBYTE:
10052 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVals[0]));
10054 case CORINFO_TYPE_SHORT:
10055 OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVals[0]));
10057 case CORINFO_TYPE_USHORT:
10058 case CORINFO_TYPE_CHAR:
10059 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVals[0]));
10061 case CORINFO_TYPE_INT:
10062 case CORINFO_TYPE_UINT:
10063 case CORINFO_TYPE_FLOAT:
10064 OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVals[0]));
10066 case CORINFO_TYPE_LONG:
10067 case CORINFO_TYPE_ULONG:
10068 case CORINFO_TYPE_DOUBLE:
10069 OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVals[0]));
10071 case CORINFO_TYPE_NATIVEINT:
10072 case CORINFO_TYPE_NATIVEUINT:
10073 case CORINFO_TYPE_PTR:
10074 OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVals[0]));
10076 case CORINFO_TYPE_CLASS:
10077 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVals[0]));
10079 case CORINFO_TYPE_BYREF:
10080 OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(retVals[0]));
10082 case CORINFO_TYPE_VALUECLASS:
10083 case CORINFO_TYPE_REFANY:
10085 // We must be careful here to write the value, the type, and update the stack height in one
10086 // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
10087 // is protected by being fully "on" the operandStack.
10088 #if defined(HOST_ARM)
10089 // Is the return type an HFA?
10090 if (HFAReturnArgSlots > 0)
10092 ARG_SLOT* hfaRetBuff = args - HFAReturnArgSlots;
10093 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
10095 void* dst = LargeStructOperandStackPush(retTypeSz);
10096 memcpy(dst, hfaRetBuff, retTypeSz);
10097 OpStackSet<void*>(m_curStackHt, dst);
10101 memcpy(OpStackGetAddr<UINT64>(m_curStackHt), hfaRetBuff, retTypeSz);
10105 #endif // defined(HOST_ARM)
10106 if (pLargeStructRetVal != NULL)
10108 _ASSERTE(hasRetBuffArg);
10109 void* dst = LargeStructOperandStackPush(retTypeSz);
10110 CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
10111 OpStackSet<void*>(m_curStackHt, dst);
10113 else if (hasRetBuffArg)
10115 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
10117 #if defined(UNIX_AMD64_ABI)
10118 else if (HasTwoSlotBuf)
10120 void* dst = LargeStructOperandStackPush(16);
10121 CopyValueClassUnchecked(dst, retVals, GetMethodTableFromClsHnd(retTypeClsHnd));
10122 OpStackSet<void*>(m_curStackHt, dst);
10127 OpStackSet<UINT64>(m_curStackHt, retVals[0]);
10129 // We already created this interpreter type, so use it.
10130 OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
10134 // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
10135 // Make sure it's unregistered.
10136 m_structRetValITPtr = NULL;
10140 NYI_INTERP("Unhandled return type");
10143 _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
10145 // The valueclass case is handled fully in the switch above.
10146 if (sigInfo.retType != CORINFO_TYPE_VALUECLASS &&
10147 sigInfo.retType != CORINFO_TYPE_REFANY)
10149 OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
10155 // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
10156 // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
10157 // is vulnerable. So, for completeness, do it here.
10158 _ASSERTE(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
10160 // If we created a cached call site, cache it now (when it's safe to take a GC).
10161 if (pCscd != NULL && !doNotCache)
10163 CacheCallInfo(iloffset, pCscd);
10166 m_callThisArg = NULL;
10168 // If the call we just made pushed a Frame, we popped it above, so re-push it.
10169 if (ilPushedFrame != NULL) ilPushedFrame->Push();
10172 #include "metadata.h"
10174 void Interpreter::CallI()
10176 #if INTERP_DYNAMIC_CONTRACTS
10183 // Dynamic contract occupies too much stack.
10184 STATIC_CONTRACT_THROWS;
10185 STATIC_CONTRACT_GC_TRIGGERS;
10186 STATIC_CONTRACT_MODE_COOPERATIVE;
10190 InterlockedIncrement(&s_totalInterpCalls);
10191 #endif // INTERP_TRACING
10193 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
10195 CORINFO_SIG_INFO sigInfo;
10199 m_interpCeeInfo.findSig(m_methInfo->m_module, tok, GetPreciseGenericsContext(), &sigInfo);
10202 // I'm assuming that a calli can't depend on the generics context, so the simple form of type
10203 // context should suffice?
10204 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
10205 SigTypeContext sigTypeCtxt(pMD);
10206 MetaSig mSig(sigInfo.pSig, sigInfo.cbSig, GetModule(sigInfo.scope), &sigTypeCtxt);
10208 unsigned totalSigArgs = sigInfo.totalILArgs();
10210 // Note that "totalNativeArgs()" includes space for ret buff arg.
10211 unsigned nSlots = totalSigArgs + 1;
10212 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
10217 // Make sure that the operand stack has the required number of arguments.
10218 // (Note that this is IL args, not native.)
10221 // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
10222 // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
10223 // is provided by an allocation (the call is to a constructor).
10224 unsigned totalArgsOnILStack = totalSigArgs;
10226 const unsigned LOCAL_ARG_SLOTS = 8;
10227 ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
10228 InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
10231 InterpreterType* argTypes;
10232 if (nSlots <= LOCAL_ARG_SLOTS)
10234 args = &localArgs[0];
10235 argTypes = &localArgTypes[0];
10239 args = (ARG_SLOT*)_alloca(nSlots * sizeof(ARG_SLOT));
10240 argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
10242 // Make sure that we don't scan any of these until we overwrite them with
10243 // the real types of the arguments.
10244 InterpreterType undefIt(CORINFO_TYPE_UNDEF);
10245 for (unsigned i = 0; i < nSlots; i++)
10247 argTypes[i] = undefIt;
10250 // GC-protect the argument array (as byrefs).
10252 m_argsSize = nSlots;
10253 m_argTypes = argTypes;
10255 // This is the index into the "args" array (where we copy the value to).
10256 int curArgSlot = 0;
10258 // The operand stack index of the first IL argument.
10259 unsigned totalArgPositions = totalArgsOnILStack + 1; // + 1 for the ftn argument.
10260 _ASSERTE(m_curStackHt >= totalArgPositions);
10261 int argsBase = m_curStackHt - totalArgPositions;
10263 // Current on-stack argument index.
10266 if (sigInfo.hasThis())
10268 args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
10269 argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
10270 // AV -> NullRef translation is NYI for the interpreter,
10271 // so we should manually check and throw the correct exception.
10272 ThrowOnInvalidPointer((void*)args[curArgSlot]);
10277 // This is the argument slot that will be used to hold the return value.
10278 ARG_SLOT retVal = 0;
10280 // If the return type is a structure, then these will be initialized.
10281 CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
10282 InterpreterType retTypeIt;
10283 size_t retTypeSz = 0;
10285 // If non-null, space allocated to hold a large struct return value. Should be deleted later.
10286 // (I could probably optimize this pop all the arguments first, then allocate space for the return value
10287 // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
10288 // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
10289 BYTE* pLargeStructRetVal = NULL;
10291 // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
10292 // otherwise, we'll dynamically allocate memory for it.
10293 ARG_SLOT smallStructRetVal = 0;
10295 // We should have no return buffer temp space registered here...unless this is a constructor, in which
10296 // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
10297 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
10299 // Is it the return value a struct with a ret buff?
10300 bool hasRetBuffArg = false;
10301 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS)
10303 retTypeClsHnd = sigInfo.retTypeClass;
10304 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
10305 retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
10307 #if defined(UNIX_AMD64_ABI)
10309 #elif defined(HOST_AMD64)
10310 // TODO: Investigate why HasRetBuffArg can't be used. pMD is a hacked up MD for the
10311 // calli because it belongs to the current method. Doing what the JIT does.
10312 hasRetBuffArg = (retTypeSz > sizeof(void*)) || ((retTypeSz & (retTypeSz - 1)) != 0);
10314 hasRetBuffArg = !!pMD->HasRetBuffArg();
10318 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
10320 size_t retBuffSize = retTypeSz;
10321 // If the target architecture can sometimes return a struct in several registers,
10322 // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
10323 // It will then copy *all* of this into the return buffer area we allocate. So make sure
10324 // we allocate at least that much.
10325 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
10326 retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
10327 #endif // ENREGISTERED_RETURNTYPE_MAXSIZE
10328 pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
10330 // Clear this in case a GC happens.
10331 for (unsigned i = 0; i < retTypeSz; i++)
10333 pLargeStructRetVal[i] = 0;
10336 // Register this as location needing GC.
10337 m_structRetValTempSpace = pLargeStructRetVal;
10339 // Set it as the return buffer.
10340 args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
10344 // Clear this in case a GC happens.
10345 smallStructRetVal = 0;
10347 // Register this as location needing GC.
10348 m_structRetValTempSpace = &smallStructRetVal;
10350 // Set it as the return buffer.
10351 args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
10353 m_structRetValITPtr = &retTypeIt;
10354 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
10359 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
10361 Module* module = GetModule(sigInfo.scope);
10362 CORINFO_VARARGS_HANDLE handle = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sigInfo.pSig, sigInfo.cbSig)));
10363 args[curArgSlot] = PtrToArgSlot(handle);
10364 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
10368 // Now we do the non-this arguments.
10369 size_t largeStructSpaceToPop = 0;
10370 for (; arg < totalArgsOnILStack; arg++)
10372 InterpreterType argIt = OpStackTypeGet(argsBase + arg);
10373 size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
10377 args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
10380 args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
10383 args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
10389 void* srcPtr = OpStackGet<void*>(argsBase + arg);
10390 args[curArgSlot] = PtrToArgSlot(srcPtr);
10391 if (!IsInLargeStructLocalArea(srcPtr))
10393 largeStructSpaceToPop += sz;
10398 args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
10402 argTypes[curArgSlot] = argIt;
10406 // Finally, we get the code pointer.
10407 unsigned ftnInd = m_curStackHt - 1;
10409 CorInfoType ftnType = OpStackTypeGet(ftnInd).ToCorInfoType();
10410 _ASSERTE(ftnType == CORINFO_TYPE_NATIVEINT
10411 || ftnType == CORINFO_TYPE_INT
10412 || ftnType == CORINFO_TYPE_LONG);
10415 PCODE ftnPtr = OpStackGet<PCODE>(ftnInd);
10418 MethodDesc* methToCall;
10419 // If we're interpreting the target, simply call it directly.
10420 if ((methToCall = InterpretationStubToMethodInfo((PCODE)ftnPtr)) != NULL)
10422 InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(methToCall));
10423 _ASSERTE(methInfo != NULL);
10424 #if INTERP_ILCYCLE_PROFILE
10425 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
10426 #endif // INTERP_ILCYCLE_PROFILE
10427 retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
10431 // This is not a great workaround. For the most part, we really don't care what method desc we're using, since
10432 // we're providing the signature and function pointer -- other than that it's well-formed and "activated."
10433 // And also, one more thing: whether it is static or not. Which is actually determined by the signature.
10434 // So we query the signature we have to determine whether we need a static or instance MethodDesc, and then
10435 // use one of the appropriate staticness that happens to be sitting around in global variables. For static
10436 // we use "RuntimeHelpers.PrepareConstrainedRegions", for instance we use the default constructor of "Object."
10437 // TODO: make this cleaner -- maybe invent a couple of empty methods with instructive names, just for this purpose.
10439 if (mSig.HasThis())
10441 pMD = g_pObjectFinalizerMD;
10445 pMD = CoreLibBinder::GetMethod(METHOD__INTERLOCKED__COMPARE_EXCHANGE_OBJECT); // A random static method.
10447 MethodDescCallSite mdcs(pMD, &mSig, ftnPtr);
10449 // If the current method being interpreted is an IL stub, we're calling native code, so
10450 // change the GC mode. (We'll only do this at the call if the calling convention turns out
10451 // to be a managed calling convention.)
10452 MethodDesc* pStubContextMD = reinterpret_cast<MethodDesc*>(m_stubContext);
10453 bool transitionToPreemptive = (pStubContextMD != NULL && !pStubContextMD->IsIL());
10454 mdcs.CallTargetWorker(args, &retVal, sizeof(retVal), transitionToPreemptive);
10456 // TODO The code above triggers assertion at threads.cpp:6861:
10457 // _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
10458 // The workaround will likely break more things than what it is fixing:
10459 // just do not make transition to preemptive GC for now.
10460 mdcs.CallTargetWorker(args, &retVal, sizeof(retVal));
10463 // retVal is now vulnerable.
10467 // retVal is still vulnerable.
10472 // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
10473 m_curStackHt -= totalArgPositions;
10475 // We've already checked that "largeStructSpaceToPop
10476 LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
10478 if (size_t(m_callThisArg) == 0x1)
10480 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
10481 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
10482 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
10485 else if (sigInfo.retType != CORINFO_TYPE_VOID)
10487 switch (sigInfo.retType)
10489 case CORINFO_TYPE_BOOL:
10490 case CORINFO_TYPE_BYTE:
10491 OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
10493 case CORINFO_TYPE_UBYTE:
10494 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
10496 case CORINFO_TYPE_SHORT:
10497 OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
10499 case CORINFO_TYPE_USHORT:
10500 case CORINFO_TYPE_CHAR:
10501 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
10503 case CORINFO_TYPE_INT:
10504 case CORINFO_TYPE_UINT:
10505 case CORINFO_TYPE_FLOAT:
10506 OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
10508 case CORINFO_TYPE_LONG:
10509 case CORINFO_TYPE_ULONG:
10510 case CORINFO_TYPE_DOUBLE:
10511 OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
10513 case CORINFO_TYPE_NATIVEINT:
10514 case CORINFO_TYPE_NATIVEUINT:
10515 case CORINFO_TYPE_PTR:
10516 OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
10518 case CORINFO_TYPE_CLASS:
10519 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
10521 case CORINFO_TYPE_VALUECLASS:
10523 // We must be careful here to write the value, the type, and update the stack height in one
10524 // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
10525 // is protected by being fully "on" the operandStack.
10526 if (pLargeStructRetVal != NULL)
10528 _ASSERTE(hasRetBuffArg);
10529 void* dst = LargeStructOperandStackPush(retTypeSz);
10530 CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
10531 OpStackSet<void*>(m_curStackHt, dst);
10533 else if (hasRetBuffArg)
10535 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
10539 OpStackSet<UINT64>(m_curStackHt, retVal);
10541 // We already created this interpreter type, so use it.
10542 OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
10545 // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
10546 // Make sure it's unregistered.
10547 m_structRetValITPtr = NULL;
10551 NYI_INTERP("Unhandled return type");
10554 _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
10556 // The valueclass case is handled fully in the switch above.
10557 if (sigInfo.retType != CORINFO_TYPE_VALUECLASS)
10559 OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
10565 // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
10566 // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
10567 // is vulnerable. So, for completeness, do it here.
10568 _ASSERTE(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
10574 bool Interpreter::IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd)
10582 DWORD flags = pMD->GetAttrs();
10583 CORINFO_METHOD_INFO methInfo;
10586 bool b = info->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo, NULL);
10587 if (!b) return false;
10590 // If the method takes a generic type argument, it's not dead simple...
10591 if (methInfo.args.callConv & CORINFO_CALLCONV_PARAMTYPE) return false;
10593 BYTE* codePtr = methInfo.ILCode;
10595 if (flags & CORINFO_FLG_STATIC)
10597 if (methInfo.ILCodeSize != 6)
10599 if (*codePtr != CEE_LDSFLD)
10601 _ASSERTE(ILOffsetOfLdSFldInDeadSimpleStaticGetter == 0);
10604 return (*codePtr == CEE_RET);
10608 // We handle two forms, one for DBG IL, and one for OPT IL.
10610 if (methInfo.ILCodeSize == 0xc)
10612 else if (methInfo.ILCodeSize != 7)
10617 if (*codePtr != CEE_NOP)
10621 if (*codePtr != CEE_LDARG_0)
10624 if (*codePtr != CEE_LDFLD)
10626 *offsetOfLd = codePtr - methInfo.ILCode;
10627 _ASSERTE((dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg == *offsetOfLd)
10628 || (!dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt == *offsetOfLd));
10632 if (*codePtr != CEE_STLOC_0)
10635 if (*codePtr != CEE_BR)
10637 if (getU4LittleEndian(codePtr + 1) != 0)
10640 if (*codePtr != CEE_LDLOC_0)
10643 return (*codePtr == CEE_RET);
10647 void Interpreter::DoStringLength()
10655 _ASSERTE(m_curStackHt > 0);
10656 unsigned ind = m_curStackHt - 1;
10659 CorInfoType stringCIT = OpStackTypeGet(ind).ToCorInfoType();
10660 if (stringCIT != CORINFO_TYPE_CLASS)
10662 VerificationError("StringLength called on non-string.");
10666 Object* obj = OpStackGet<Object*>(ind);
10670 ThrowNullPointerException();
10674 if (obj->GetMethodTable() != g_pStringClass)
10676 VerificationError("StringLength called on non-string.");
10680 StringObject* str = reinterpret_cast<StringObject*>(obj);
10681 INT32 len = str->GetStringLength();
10682 OpStackSet<INT32>(ind, len);
10683 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_INT));
10686 void Interpreter::DoStringGetChar()
10694 _ASSERTE(m_curStackHt >= 2);
10695 unsigned strInd = m_curStackHt - 2;
10696 unsigned indexInd = strInd + 1;
10699 CorInfoType stringCIT = OpStackTypeGet(strInd).ToCorInfoType();
10700 if (stringCIT != CORINFO_TYPE_CLASS)
10702 VerificationError("StringGetChar called on non-string.");
10706 Object* obj = OpStackGet<Object*>(strInd);
10710 ThrowNullPointerException();
10714 if (obj->GetMethodTable() != g_pStringClass)
10716 VerificationError("StringGetChar called on non-string.");
10720 StringObject* str = reinterpret_cast<StringObject*>(obj);
10723 CorInfoType indexCIT = OpStackTypeGet(indexInd).ToCorInfoType();
10724 if (indexCIT != CORINFO_TYPE_INT)
10726 VerificationError("StringGetChar needs integer index.");
10730 INT32 ind = OpStackGet<INT32>(indexInd);
10732 ThrowArrayBoundsException();
10733 UINT32 uind = static_cast<UINT32>(ind);
10734 if (uind >= str->GetStringLength())
10735 ThrowArrayBoundsException();
10738 GCX_FORBID(); // str is vulnerable.
10739 UINT16* dataPtr = reinterpret_cast<UINT16*>(reinterpret_cast<INT8*>(str) + StringObject::GetBufferOffset());
10740 UINT32 filledChar = dataPtr[ind];
10741 OpStackSet<UINT32>(strInd, filledChar);
10742 OpStackTypeSet(strInd, InterpreterType(CORINFO_TYPE_INT));
10743 m_curStackHt = indexInd;
10746 void Interpreter::DoGetTypeFromHandle()
10754 _ASSERTE(m_curStackHt > 0);
10755 unsigned ind = m_curStackHt - 1;
10758 CorInfoType handleCIT = OpStackTypeGet(ind).ToCorInfoType();
10759 if (handleCIT != CORINFO_TYPE_VALUECLASS && handleCIT != CORINFO_TYPE_CLASS)
10761 VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
10763 Object* obj = OpStackGet<Object*>(ind);
10764 if (obj->GetMethodTable() != g_pRuntimeTypeClass)
10766 VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
10770 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
10773 void Interpreter::DoSIMDHwAccelerated()
10782 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
10784 fprintf(GetLogFile(), " System.Numerics.Vector.IsHardwareAccelerated -- intrinsic\n");
10786 #endif // INTERP_TRACING
10792 void Interpreter::DoGetIsSupported()
10800 OpStackSet<BOOL>(m_curStackHt, false);
10801 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
10805 void Interpreter::RecordConstrainedCall()
10814 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Constrained]);
10815 #endif // INTERP_TRACING
10819 ResolveToken(&m_constrainedResolvedToken, getU4LittleEndian(m_ILCodePtr + 2), CORINFO_TOKENKIND_Constrained InterpTracingArg(RTK_Constrained));
10822 m_constrainedFlag = true;
10827 void Interpreter::LargeStructOperandStackEnsureCanPush(size_t sz)
10829 size_t remaining = m_largeStructOperandStackAllocSize - m_largeStructOperandStackHt;
10830 if (remaining < sz)
10832 size_t newAllocSize = max(m_largeStructOperandStackAllocSize + sz * 4, m_largeStructOperandStackAllocSize * 2);
10833 BYTE* newStack = new BYTE[newAllocSize];
10834 m_largeStructOperandStackAllocSize = newAllocSize;
10835 if (m_largeStructOperandStack != NULL)
10837 memcpy(newStack, m_largeStructOperandStack, m_largeStructOperandStackHt);
10838 delete[] m_largeStructOperandStack;
10840 m_largeStructOperandStack = newStack;
10844 void* Interpreter::LargeStructOperandStackPush(size_t sz)
10846 LargeStructOperandStackEnsureCanPush(sz);
10847 _ASSERTE(m_largeStructOperandStackAllocSize >= m_largeStructOperandStackHt + sz);
10848 void* res = &m_largeStructOperandStack[m_largeStructOperandStackHt];
10849 m_largeStructOperandStackHt += sz;
10853 void Interpreter::LargeStructOperandStackPop(size_t sz, void* fromAddr)
10855 if (!IsInLargeStructLocalArea(fromAddr))
10857 _ASSERTE(m_largeStructOperandStackHt >= sz);
10858 m_largeStructOperandStackHt -= sz;
10863 bool Interpreter::LargeStructStackHeightIsValid()
10866 for (unsigned k = 0; k < m_curStackHt; k++)
10868 if (OpStackTypeGet(k).IsLargeStruct(&m_interpCeeInfo) && !IsInLargeStructLocalArea(OpStackGet<void*>(k)))
10870 sz2 += OpStackTypeGet(k).Size(&m_interpCeeInfo);
10873 _ASSERTE(sz2 == m_largeStructOperandStackHt);
10874 return sz2 == m_largeStructOperandStackHt;
10878 void Interpreter::VerificationError(const char* msg)
10880 // TODO: Should raise an exception eventually; for now:
10881 const char* const msgPrefix = "Verification Error: ";
10882 size_t len = strlen(msgPrefix) + strlen(msg) + 1;
10883 char* msgFinal = (char*)_alloca(len);
10884 strcpy_s(msgFinal, len, msgPrefix);
10885 strcat_s(msgFinal, len, msg);
10886 _ASSERTE_MSG(false, msgFinal);
10889 void Interpreter::ThrowDivideByZero()
10897 COMPlusThrow(kDivideByZeroException);
10900 void Interpreter::ThrowSysArithException()
10908 // According to the ECMA spec, this should be an ArithmeticException; however,
10909 // the JITs throw an OverflowException and consistency is top priority...
10910 COMPlusThrow(kOverflowException);
10913 void Interpreter::ThrowNullPointerException()
10921 COMPlusThrow(kNullReferenceException);
10924 void Interpreter::ThrowOverflowException()
10932 COMPlusThrow(kOverflowException);
10935 void Interpreter::ThrowArrayBoundsException()
10943 COMPlusThrow(kIndexOutOfRangeException);
10946 void Interpreter::ThrowInvalidCastException()
10954 COMPlusThrow(kInvalidCastException);
10957 void Interpreter::ThrowStackOverflow()
10965 COMPlusThrow(kStackOverflowException);
10968 float Interpreter::RemFunc(float v1, float v2)
10970 return fmodf(v1, v2);
10973 double Interpreter::RemFunc(double v1, double v2)
10975 return fmod(v1, v2);
10978 // Static members and methods.
10979 Interpreter::AddrToMDMap* Interpreter::s_addrToMDMap = NULL;
10981 unsigned Interpreter::s_interpreterStubNum = 0;
10983 // TODO: contracts and synchronization for the AddrToMDMap methods.
10984 // Requires caller to hold "s_interpStubToMDMapLock".
10985 Interpreter::AddrToMDMap* Interpreter::GetAddrToMdMap()
10994 if (s_addrToMDMap == NULL)
10996 s_addrToMDMap = new AddrToMDMap();
10998 return s_addrToMDMap;
11001 void Interpreter::RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr)
11010 CrstHolder ch(&s_interpStubToMDMapLock);
11012 AddrToMDMap* map = Interpreter::GetAddrToMdMap();
11014 CORINFO_METHOD_HANDLE dummy;
11015 _ASSERTE(!map->Lookup(addr, &dummy));
11017 map->AddOrReplace(KeyValuePair<void*,CORINFO_METHOD_HANDLE>(addr, md));
11020 MethodDesc* Interpreter::InterpretationStubToMethodInfo(PCODE addr)
11028 // This query function will never allocate the table...
11029 if (s_addrToMDMap == NULL)
11032 // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
11033 // CrstHolder ch(&s_interpStubToMDMapLock);
11035 AddrToMDMap* map = Interpreter::GetAddrToMdMap();
11036 CORINFO_METHOD_HANDLE result = NULL;
11037 (void)map->Lookup((void*)addr, &result);
11038 return (MethodDesc*)result;
11041 Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::s_methodHandleToInterpMethInfoPtrMap = NULL;
11043 // Requires caller to hold "s_interpStubToMDMapLock".
11044 Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::GetMethodHandleToInterpMethInfoPtrMap()
11053 if (s_methodHandleToInterpMethInfoPtrMap == NULL)
11055 s_methodHandleToInterpMethInfoPtrMap = new MethodHandleToInterpMethInfoPtrMap();
11057 return s_methodHandleToInterpMethInfoPtrMap;
11060 InterpreterMethodInfo* Interpreter::RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo)
11069 CrstHolder ch(&s_interpStubToMDMapLock);
11071 MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
11074 if (map->Lookup(md, &mi))
11076 // If there's already an entry, make sure it was created by another thread -- the same thread shouldn't create two
11078 _ASSERTE_MSG(mi.m_thread != GetThread(), "Two InterpMethInfo's for same meth by same thread.");
11079 // If we were creating an interpreter stub at the same time as another thread, and we lost the race to
11080 // insert it, use the already-existing one, and delete this one.
11085 mi.m_info = methInfo;
11087 mi.m_thread = GetThread();
11090 _ASSERTE_MSG(map->LookupPtr(md) == NULL, "Multiple InterpMethInfos for method desc.");
11095 InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md)
11102 // This query function will never allocate the table...
11103 if (s_methodHandleToInterpMethInfoPtrMap == NULL)
11106 // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
11107 CrstHolder ch(&s_interpStubToMDMapLock);
11109 MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
11113 (void)map->Lookup(md, &mi);
11118 #ifndef DACCESS_COMPILE
11120 // Requires that the current thread holds "s_methodCacheLock."
11121 ILOffsetToItemCache* InterpreterMethodInfo::GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc)
11123 // First, does the current method have dynamic generic information, and, if so,
11125 CORINFO_CONTEXT_HANDLE context = GetPreciseGenericsContext(thisArg, genericsCtxtArg);
11126 if (context == MAKE_METHODCONTEXT(m_method))
11128 // No dynamic generics context information. The caching field in "m_methInfo" is the
11129 // ILoffset->Item cache directly.
11130 // First, ensure that it's allocated.
11131 if (m_methodCache == NULL && alloc)
11133 // Lazy init via compare-exchange.
11134 ILOffsetToItemCache* cache = new ILOffsetToItemCache();
11135 void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, cache, NULL);
11136 if (prev != NULL) delete cache;
11138 return reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
11142 // Otherwise, it does have generic info, so find the right cache.
11143 // First ensure that the top-level generics-context --> cache cache exists.
11144 GenericContextToInnerCache* outerCache = reinterpret_cast<GenericContextToInnerCache*>(m_methodCache);
11145 if (outerCache == NULL)
11149 // Lazy init via compare-exchange.
11150 outerCache = new GenericContextToInnerCache();
11151 void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, outerCache, NULL);
11155 outerCache = reinterpret_cast<GenericContextToInnerCache*>(prev);
11163 // Does the outerCache already have an entry for this instantiation?
11164 ILOffsetToItemCache* innerCache = NULL;
11165 if (!outerCache->GetItem(size_t(context), innerCache) && alloc)
11167 innerCache = new ILOffsetToItemCache();
11168 outerCache->AddItem(size_t(context), innerCache);
11174 void Interpreter::CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo)
11176 CrstHolder ch(&s_methodCacheLock);
11178 ILOffsetToItemCache* cache = GetThisExecCache(true);
11179 // Insert, but if the item is already there, delete "mdcs" (which would have been owned
11181 // (Duplicate entries can happen because of recursive calls -- F makes a recursive call to F, and when it
11182 // returns wants to cache it, but the recursive call makes a furher recursive call, and caches that, so the
11183 // first call finds the iloffset already occupied.)
11184 if (!cache->AddItem(iloffset, CachedItem(callInfo)))
11190 CallSiteCacheData* Interpreter::GetCachedCallInfo(unsigned iloffset)
11192 CrstHolder ch(&s_methodCacheLock);
11194 ILOffsetToItemCache* cache = GetThisExecCache(false);
11195 if (cache == NULL) return NULL;
11198 if (cache->GetItem(iloffset, item))
11200 _ASSERTE_MSG(item.m_tag == CIK_CallSite, "Wrong cached item tag.");
11201 return item.m_value.m_callSiteInfo;
11209 void Interpreter::CacheInstanceField(unsigned iloffset, FieldDesc* fld)
11211 CrstHolder ch(&s_methodCacheLock);
11213 ILOffsetToItemCache* cache = GetThisExecCache(true);
11214 cache->AddItem(iloffset, CachedItem(fld));
11217 FieldDesc* Interpreter::GetCachedInstanceField(unsigned iloffset)
11219 CrstHolder ch(&s_methodCacheLock);
11221 ILOffsetToItemCache* cache = GetThisExecCache(false);
11222 if (cache == NULL) return NULL;
11225 if (cache->GetItem(iloffset, item))
11227 _ASSERTE_MSG(item.m_tag == CIK_InstanceField, "Wrong cached item tag.");
11228 return item.m_value.m_instanceField;
11236 void Interpreter::CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry)
11238 CrstHolder ch(&s_methodCacheLock);
11240 ILOffsetToItemCache* cache = GetThisExecCache(true);
11241 // If (say) a concurrent thread has beaten us to this, delete the entry (which otherwise would have
11242 // been owned by the cache).
11243 if (!cache->AddItem(iloffset, CachedItem(pEntry)))
11249 StaticFieldCacheEntry* Interpreter::GetCachedStaticField(unsigned iloffset)
11251 CrstHolder ch(&s_methodCacheLock);
11253 ILOffsetToItemCache* cache = GetThisExecCache(false);
11259 if (cache->GetItem(iloffset, item))
11261 _ASSERTE_MSG(item.m_tag == CIK_StaticField, "Wrong cached item tag.");
11262 return item.m_value.m_staticFieldAddr;
11271 void Interpreter::CacheClassHandle(unsigned iloffset, CORINFO_CLASS_HANDLE clsHnd)
11273 CrstHolder ch(&s_methodCacheLock);
11275 ILOffsetToItemCache* cache = GetThisExecCache(true);
11276 cache->AddItem(iloffset, CachedItem(clsHnd));
11279 CORINFO_CLASS_HANDLE Interpreter::GetCachedClassHandle(unsigned iloffset)
11281 CrstHolder ch(&s_methodCacheLock);
11283 ILOffsetToItemCache* cache = GetThisExecCache(false);
11289 if (cache->GetItem(iloffset, item))
11291 _ASSERTE_MSG(item.m_tag == CIK_ClassHandle, "Wrong cached item tag.");
11292 return item.m_value.m_clsHnd;
11299 #endif // DACCESS_COMPILE
11303 // Theses are not debug-only.
11304 ConfigMethodSet Interpreter::s_InterpretMeths;
11305 ConfigMethodSet Interpreter::s_InterpretMethsExclude;
11306 ConfigDWORD Interpreter::s_InterpretMethHashMin;
11307 ConfigDWORD Interpreter::s_InterpretMethHashMax;
11308 ConfigDWORD Interpreter::s_InterpreterJITThreshold;
11309 ConfigDWORD Interpreter::s_InterpreterDoLoopMethodsFlag;
11310 ConfigDWORD Interpreter::s_InterpreterUseCachingFlag;
11311 ConfigDWORD Interpreter::s_InterpreterLooseRulesFlag;
11313 bool Interpreter::s_InterpreterDoLoopMethods;
11314 bool Interpreter::s_InterpreterUseCaching;
11315 bool Interpreter::s_InterpreterLooseRules;
11317 CrstExplicitInit Interpreter::s_methodCacheLock;
11318 CrstExplicitInit Interpreter::s_interpStubToMDMapLock;
11320 // The static variables below are debug-only.
11322 LONG Interpreter::s_totalInvocations = 0;
11323 LONG Interpreter::s_totalInterpCalls = 0;
11324 LONG Interpreter::s_totalInterpCallsToGetters = 0;
11325 LONG Interpreter::s_totalInterpCallsToDeadSimpleGetters = 0;
11326 LONG Interpreter::s_totalInterpCallsToDeadSimpleGettersShortCircuited = 0;
11327 LONG Interpreter::s_totalInterpCallsToSetters = 0;
11328 LONG Interpreter::s_totalInterpCallsToIntrinsics = 0;
11329 LONG Interpreter::s_totalInterpCallsToIntrinsicsUnhandled = 0;
11331 LONG Interpreter::s_tokenResolutionOpportunities[RTK_Count] = {0, };
11332 LONG Interpreter::s_tokenResolutionCalls[RTK_Count] = {0, };
11333 const char* Interpreter::s_tokenResolutionKindNames[RTK_Count] =
11365 FILE* Interpreter::s_InterpreterLogFile = NULL;
11366 ConfigDWORD Interpreter::s_DumpInterpreterStubsFlag;
11367 ConfigDWORD Interpreter::s_TraceInterpreterEntriesFlag;
11368 ConfigDWORD Interpreter::s_TraceInterpreterILFlag;
11369 ConfigDWORD Interpreter::s_TraceInterpreterOstackFlag;
11370 ConfigDWORD Interpreter::s_TraceInterpreterVerboseFlag;
11371 ConfigDWORD Interpreter::s_TraceInterpreterJITTransitionFlag;
11372 ConfigDWORD Interpreter::s_InterpreterStubMin;
11373 ConfigDWORD Interpreter::s_InterpreterStubMax;
11374 #endif // INTERP_TRACING
11376 #if INTERP_ILINSTR_PROFILE
11377 unsigned short Interpreter::s_ILInstrCategories[512];
11379 int Interpreter::s_ILInstrExecs[256] = {0, };
11380 int Interpreter::s_ILInstrExecsByCategory[512] = {0, };
11381 int Interpreter::s_ILInstr2ByteExecs[Interpreter::CountIlInstr2Byte] = {0, };
11382 #if INTERP_ILCYCLE_PROFILE
11383 unsigned __int64 Interpreter::s_ILInstrCycles[512] = { 0, };
11384 unsigned __int64 Interpreter::s_ILInstrCyclesByCategory[512] = { 0, };
11386 unsigned __int64 Interpreter::s_callCycles = 0;
11387 unsigned Interpreter::s_calls = 0;
11389 void Interpreter::UpdateCycleCount()
11391 unsigned __int64 endCycles;
11392 bool b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
11393 if (m_instr != CEE_COUNT)
11395 unsigned __int64 delta = (endCycles - m_startCycles);
11396 if (m_exemptCycles > 0)
11398 delta = delta - m_exemptCycles;
11399 m_exemptCycles = 0;
11401 CycleTimer::InterlockedAddU64(&s_ILInstrCycles[m_instr], delta);
11403 // In any case, set the instruction to the current one, and record it's start time.
11404 m_instr = (*m_ILCodePtr);
11405 if (m_instr == CEE_PREFIX1) {
11406 m_instr = *(m_ILCodePtr + 1) + 0x100;
11408 b = CycleTimer::GetThreadCyclesS(&m_startCycles); _ASSERTE(b);
11411 #endif // INTERP_ILCYCLE_PROFILE
11412 #endif // INTERP_ILINSTR_PROFILE
11415 InterpreterMethodInfo** Interpreter::s_interpMethInfos = NULL;
11416 unsigned Interpreter::s_interpMethInfosAllocSize = 0;
11417 unsigned Interpreter::s_interpMethInfosCount = 0;
11419 bool Interpreter::TOSIsPtr()
11421 if (m_curStackHt == 0)
11424 return CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt - 1).ToCorInfoType());
11428 ConfigDWORD Interpreter::s_PrintPostMortemFlag;
11430 // InterpreterCache.
11431 template<typename Key, typename Val>
11432 InterpreterCache<Key,Val>::InterpreterCache() : m_pairs(NULL), m_allocSize(0), m_count(0)
11435 AddAllocBytes(sizeof(*this));
11441 static unsigned InterpreterCacheAllocBytes = 0;
11442 const unsigned KBYTE = 1024;
11443 const unsigned MBYTE = KBYTE*KBYTE;
11444 const unsigned InterpreterCacheAllocBytesIncrement = 16*KBYTE;
11445 static unsigned InterpreterCacheAllocBytesNextTarget = InterpreterCacheAllocBytesIncrement;
11447 template<typename Key, typename Val>
11448 void InterpreterCache<Key,Val>::AddAllocBytes(unsigned bytes)
11450 // Reinstate this code if you want to track bytes attributable to caching.
11452 InterpreterCacheAllocBytes += bytes;
11453 if (InterpreterCacheAllocBytes > InterpreterCacheAllocBytesNextTarget)
11455 printf("Total cache alloc = %d bytes.\n", InterpreterCacheAllocBytes);
11457 InterpreterCacheAllocBytesNextTarget += InterpreterCacheAllocBytesIncrement;
11463 template<typename Key, typename Val>
11464 void InterpreterCache<Key,Val>::EnsureCanInsert()
11466 if (m_count < m_allocSize)
11469 // Otherwise, must make room.
11470 if (m_allocSize == 0)
11472 _ASSERTE(m_count == 0);
11473 m_pairs = new KeyValPair[InitSize];
11474 m_allocSize = InitSize;
11476 AddAllocBytes(m_allocSize * sizeof(KeyValPair));
11481 unsigned short newSize = min(m_allocSize * 2, USHRT_MAX);
11483 KeyValPair* newPairs = new KeyValPair[newSize];
11484 memcpy(newPairs, m_pairs, m_count * sizeof(KeyValPair));
11486 m_pairs = newPairs;
11488 AddAllocBytes((newSize - m_allocSize) * sizeof(KeyValPair));
11490 m_allocSize = newSize;
11494 template<typename Key, typename Val>
11495 bool InterpreterCache<Key,Val>::AddItem(Key key, Val val)
11498 // Find the index to insert before.
11499 unsigned firstGreaterOrEqual = 0;
11500 for (; firstGreaterOrEqual < m_count; firstGreaterOrEqual++)
11502 if (m_pairs[firstGreaterOrEqual].m_key >= key)
11505 if (firstGreaterOrEqual < m_count && m_pairs[firstGreaterOrEqual].m_key == key)
11507 _ASSERTE(m_pairs[firstGreaterOrEqual].m_val == val);
11510 // Move everything starting at firstGreater up one index (if necessary)
11513 for (unsigned k = m_count-1; k >= firstGreaterOrEqual; k--)
11515 m_pairs[k + 1] = m_pairs[k];
11520 // Now we can insert the new element.
11521 m_pairs[firstGreaterOrEqual].m_key = key;
11522 m_pairs[firstGreaterOrEqual].m_val = val;
11527 template<typename Key, typename Val>
11528 bool InterpreterCache<Key,Val>::GetItem(Key key, Val& v)
11531 unsigned hi = m_count;
11532 // Invariant: we've determined that the pair for "iloffset", if present,
11533 // is in the index interval [lo, hi).
11536 unsigned mid = (hi + lo)/2;
11537 Key midKey = m_pairs[mid].m_key;
11540 v = m_pairs[mid].m_val;
11543 else if (key < midKey)
11549 _ASSERTE(key > midKey);
11553 // If we reach here without returning, it's not here.
11557 // TODO: add a header comment here describing this function.
11558 void Interpreter::OpStackNormalize()
11560 size_t largeStructStackOffset = 0;
11561 // Yes, I've written a quadratic algorithm here. I don't think it will matter in practice.
11562 for (unsigned i = 0; i < m_curStackHt; i++)
11564 InterpreterType tp = OpStackTypeGet(i);
11565 if (tp.IsLargeStruct(&m_interpCeeInfo))
11567 size_t sz = tp.Size(&m_interpCeeInfo);
11569 void* addr = OpStackGet<void*>(i);
11570 if (IsInLargeStructLocalArea(addr))
11572 // We're going to allocate space at the top for the new value, then copy everything above the current slot
11573 // up into that new space, then copy the value into the vacated space.
11574 // How much will we have to copy?
11575 size_t toCopy = m_largeStructOperandStackHt - largeStructStackOffset;
11577 // Allocate space for the new value.
11578 void* dummy = LargeStructOperandStackPush(sz);
11580 // Remember where we're going to write to.
11581 BYTE* fromAddr = m_largeStructOperandStack + largeStructStackOffset;
11582 BYTE* toAddr = fromAddr + sz;
11583 memcpy(toAddr, fromAddr, toCopy);
11585 // Now copy the local variable value.
11586 memcpy(fromAddr, addr, sz);
11587 OpStackSet<void*>(i, fromAddr);
11589 largeStructStackOffset += sz;
11592 // When we've normalized the stack, it contains no pointers to locals.
11593 m_orOfPushedInterpreterTypes = 0;
11598 // Code copied from eeinterface.cpp in "compiler". Should be common...
11600 static const char* CorInfoTypeNames[] = {
11626 const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName)
11636 const char* returnType = NULL;
11638 const char* className;
11639 const char* methodName = info->getMethodNameFromMetadata(hnd, &className, NULL, NULL);
11640 if (clsName != NULL)
11642 *clsName = className;
11648 /* Generating the full signature is a two-pass process. First we have to walk
11649 the components in order to assess the total size, then we allocate the buffer
11650 and copy the elements into it.
11653 /* Right now there is a race-condition in the EE, className can be NULL */
11655 /* initialize length with length of className and '.' */
11659 length = strlen(className) + 1;
11663 _ASSERTE(strlen("<NULL>.") == 7);
11667 /* add length of methodName and opening bracket */
11668 length += strlen(methodName) + 1;
11670 CORINFO_SIG_INFO sig;
11671 info->getMethodSig(hnd, &sig, nullptr);
11672 CORINFO_ARG_LIST_HANDLE argLst = sig.args;
11674 CORINFO_CLASS_HANDLE dummyCls;
11675 for (i = 0; i < sig.numArgs; i++)
11677 CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
11679 length += strlen(CorInfoTypeNames[type]);
11680 argLst = info->getArgNext(argLst);
11683 /* add ',' if there is more than one argument */
11685 if (sig.numArgs > 1)
11687 length += (sig.numArgs - 1);
11690 if (sig.retType != CORINFO_TYPE_VOID)
11692 returnType = CorInfoTypeNames[sig.retType];
11693 length += strlen(returnType) + 1; // don't forget the delimiter ':'
11696 /* add closing bracket and null terminator */
11700 char* retName = new char[length];
11702 /* Now generate the full signature string in the allocated buffer */
11706 strcpy_s(retName, length, className);
11707 strcat_s(retName, length, ":");
11711 strcpy_s(retName, length, "<NULL>.");
11714 strcat_s(retName, length, methodName);
11716 // append the signature
11717 strcat_s(retName, length, "(");
11721 for (i = 0; i < sig.numArgs; i++)
11723 CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
11724 strcat_s(retName, length, CorInfoTypeNames[type]);
11726 argLst = info->getArgNext(argLst);
11727 if (i + 1 < sig.numArgs)
11729 strcat_s(retName, length, ",");
11733 strcat_s(retName, length, ")");
11737 strcat_s(retName, length, ":");
11738 strcat_s(retName, length, returnType);
11741 _ASSERTE(strlen(retName) == length - 1);
11746 const char* Interpreter::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
11748 return ::eeGetMethodFullName(&m_interpCeeInfo, hnd);
11751 const char* ILOpNames[256*2];
11752 bool ILOpNamesInited = false;
11754 void InitILOpNames()
11756 if (!ILOpNamesInited)
11758 // Initialize the array.
11759 #define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) if (s1 == 0xfe || s1 == 0xff) { int ind ((unsigned(s1) << 8) + unsigned(s2)); ind -= 0xfe00; ILOpNames[ind] = s; }
11760 #include "opcode.def"
11762 ILOpNamesInited = true;
11765 const char* Interpreter::ILOp(BYTE* m_ILCodePtr)
11768 BYTE b = *m_ILCodePtr;
11771 return ILOpNames[*(m_ILCodePtr + 1)];
11775 return ILOpNames[(0x1 << 8) + b];
11778 const char* Interpreter::ILOp1Byte(unsigned short ilInstrVal)
11781 return ILOpNames[(0x1 << 8) + ilInstrVal];
11783 const char* Interpreter::ILOp2Byte(unsigned short ilInstrVal)
11786 return ILOpNames[ilInstrVal];
11789 void Interpreter::PrintOStack()
11791 if (m_curStackHt == 0)
11793 fprintf(GetLogFile(), " <empty>\n");
11797 for (unsigned k = 0; k < m_curStackHt; k++)
11799 CorInfoType cit = OpStackTypeGet(k).ToCorInfoType();
11800 _ASSERTE(IsStackNormalType(cit));
11801 fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
11802 PrintOStackValue(k);
11803 fprintf(GetLogFile(), "\n");
11806 fflush(GetLogFile());
11809 void Interpreter::PrintOStackValue(unsigned index)
11811 _ASSERTE_MSG(index < m_curStackHt, "precondition");
11812 InterpreterType it = OpStackTypeGet(index);
11813 if (it.IsLargeStruct(&m_interpCeeInfo))
11815 PrintValue(it, OpStackGet<BYTE*>(index));
11819 PrintValue(it, reinterpret_cast<BYTE*>(OpStackGetAddr(index, it.Size(&m_interpCeeInfo))));
11823 void Interpreter::PrintLocals()
11825 if (m_methInfo->m_numLocals == 0)
11827 fprintf(GetLogFile(), " <no locals>\n");
11831 for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
11833 InterpreterType it = m_methInfo->m_localDescs[i].m_type;
11834 CorInfoType cit = it.ToCorInfoType();
11835 void* localPtr = NULL;
11836 if (it.IsLargeStruct(&m_interpCeeInfo))
11838 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
11839 localPtr = *reinterpret_cast<void**>(structPtr);
11843 localPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
11845 fprintf(GetLogFile(), " loc%-4d: %10s: ", i, CorInfoTypeNames[cit]);
11846 PrintValue(it, reinterpret_cast<BYTE*>(localPtr));
11847 fprintf(GetLogFile(), "\n");
11850 fflush(GetLogFile());
11853 void Interpreter::PrintArgs()
11855 for (unsigned k = 0; k < m_methInfo->m_numArgs; k++)
11857 CorInfoType cit = GetArgType(k).ToCorInfoType();
11858 fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
11860 fprintf(GetLogFile(), "\n");
11862 fprintf(GetLogFile(), "\n");
11863 fflush(GetLogFile());
11866 void Interpreter::PrintArgValue(unsigned argNum)
11868 _ASSERTE_MSG(argNum < m_methInfo->m_numArgs, "precondition");
11869 InterpreterType it = GetArgType(argNum);
11870 PrintValue(it, GetArgAddr(argNum));
11873 // Note that this is used to print non-stack-normal values, so
11874 // it must handle all cases.
11875 void Interpreter::PrintValue(InterpreterType it, BYTE* valAddr)
11877 switch (it.ToCorInfoType())
11879 case CORINFO_TYPE_BOOL:
11880 fprintf(GetLogFile(), "%s", ((*reinterpret_cast<INT8*>(valAddr)) ? "true" : "false"));
11882 case CORINFO_TYPE_BYTE:
11883 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT8*>(valAddr));
11885 case CORINFO_TYPE_UBYTE:
11886 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT8*>(valAddr));
11889 case CORINFO_TYPE_SHORT:
11890 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT16*>(valAddr));
11892 case CORINFO_TYPE_USHORT: case CORINFO_TYPE_CHAR:
11893 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT16*>(valAddr));
11896 case CORINFO_TYPE_INT:
11897 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT32*>(valAddr));
11899 case CORINFO_TYPE_UINT:
11900 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT32*>(valAddr));
11903 case CORINFO_TYPE_NATIVEINT:
11905 INT64 val = static_cast<INT64>(*reinterpret_cast<NativeInt*>(valAddr));
11906 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11909 case CORINFO_TYPE_NATIVEUINT:
11911 UINT64 val = static_cast<UINT64>(*reinterpret_cast<NativeUInt*>(valAddr));
11912 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11916 case CORINFO_TYPE_BYREF:
11917 fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
11920 case CORINFO_TYPE_LONG:
11922 INT64 val = *reinterpret_cast<INT64*>(valAddr);
11923 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11926 case CORINFO_TYPE_ULONG:
11927 fprintf(GetLogFile(), "%lld", *reinterpret_cast<UINT64*>(valAddr));
11930 case CORINFO_TYPE_CLASS:
11932 Object* obj = *reinterpret_cast<Object**>(valAddr);
11935 fprintf(GetLogFile(), "null");
11940 fprintf(GetLogFile(), "0x%p (%s) [", obj, obj->GetMethodTable()->GetDebugClassName());
11942 fprintf(GetLogFile(), "0x%p (MT=0x%p) [", obj, obj->GetMethodTable());
11944 unsigned sz = obj->GetMethodTable()->GetBaseSize();
11945 BYTE* objBytes = reinterpret_cast<BYTE*>(obj);
11946 for (unsigned i = 0; i < sz; i++)
11950 fprintf(GetLogFile(), " ");
11952 fprintf(GetLogFile(), "0x%x", objBytes[i]);
11954 fprintf(GetLogFile(), "]");
11958 case CORINFO_TYPE_VALUECLASS:
11961 fprintf(GetLogFile(), "<%s>: [", m_interpCeeInfo.getClassNameFromMetadata(it.ToClassHandle(), NULL));
11962 unsigned sz = getClassSize(it.ToClassHandle());
11963 for (unsigned i = 0; i < sz; i++)
11967 fprintf(GetLogFile(), " ");
11969 fprintf(GetLogFile(), "0x%02x", valAddr[i]);
11971 fprintf(GetLogFile(), "]");
11974 case CORINFO_TYPE_REFANY:
11975 fprintf(GetLogFile(), "<refany>");
11977 case CORINFO_TYPE_FLOAT:
11978 fprintf(GetLogFile(), "%f", *reinterpret_cast<float*>(valAddr));
11980 case CORINFO_TYPE_DOUBLE:
11981 fprintf(GetLogFile(), "%g", *reinterpret_cast<double*>(valAddr));
11983 case CORINFO_TYPE_PTR:
11984 fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
11987 _ASSERTE_MSG(false, "Unknown type in PrintValue.");
11991 #endif // INTERP_TRACING
11994 void Interpreter::AddInterpMethInfo(InterpreterMethodInfo* methInfo)
11996 typedef InterpreterMethodInfo* InterpreterMethodInfoPtr;
11997 // TODO: this requires synchronization.
11998 const unsigned InitSize = 128;
11999 if (s_interpMethInfos == NULL)
12001 s_interpMethInfos = new InterpreterMethodInfoPtr[InitSize];
12002 s_interpMethInfosAllocSize = InitSize;
12004 if (s_interpMethInfosAllocSize == s_interpMethInfosCount)
12006 unsigned newSize = s_interpMethInfosAllocSize * 2;
12007 InterpreterMethodInfoPtr* tmp = new InterpreterMethodInfoPtr[newSize];
12008 memcpy(tmp, s_interpMethInfos, s_interpMethInfosCount * sizeof(InterpreterMethodInfoPtr));
12009 delete[] s_interpMethInfos;
12010 s_interpMethInfos = tmp;
12011 s_interpMethInfosAllocSize = newSize;
12013 s_interpMethInfos[s_interpMethInfosCount] = methInfo;
12014 s_interpMethInfosCount++;
12017 int _cdecl Interpreter::CompareMethInfosByInvocations(const void* mi0in, const void* mi1in)
12019 const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
12020 const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
12021 if (mi0->m_invocations < mi1->m_invocations)
12025 else if (mi0->m_invocations == mi1->m_invocations)
12031 _ASSERTE(mi0->m_invocations > mi1->m_invocations);
12037 int _cdecl Interpreter::CompareMethInfosByILInstrs(const void* mi0in, const void* mi1in)
12039 const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
12040 const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
12041 if (mi0->m_totIlInstructionsExeced < mi1->m_totIlInstructionsExeced) return 1;
12042 else if (mi0->m_totIlInstructionsExeced == mi1->m_totIlInstructionsExeced) return 0;
12045 _ASSERTE(mi0->m_totIlInstructionsExeced > mi1->m_totIlInstructionsExeced);
12049 #endif // INTERP_PROFILE
12052 const int MIL = 1000000;
12054 // Leaving this disabled for now.
12056 unsigned __int64 ForceSigWalkCycles = 0;
12059 void Interpreter::PrintPostMortemData()
12061 if (s_PrintPostMortemFlag.val(CLRConfig::INTERNAL_InterpreterPrintPostMortem) == 0)
12067 // Let's print two things: the number of methods that are 0-10, or more, and
12068 // For each 10% of methods, cumulative % of invocations they represent. By 1% for last 10%.
12070 // First one doesn't require any sorting.
12071 const unsigned HistoMax = 11;
12072 unsigned histo[HistoMax];
12073 unsigned numExecs[HistoMax];
12074 for (unsigned k = 0; k < HistoMax; k++)
12076 histo[k] = 0; numExecs[k] = 0;
12078 for (unsigned k = 0; k < s_interpMethInfosCount; k++)
12080 unsigned invokes = s_interpMethInfos[k]->m_invocations;
12081 if (invokes > HistoMax - 1)
12083 invokes = HistoMax - 1;
12086 numExecs[invokes] += s_interpMethInfos[k]->m_invocations;
12089 fprintf(GetLogFile(), "Histogram of method executions:\n");
12090 fprintf(GetLogFile(), " # of execs | # meths (%%) | cum %% | %% cum execs\n");
12091 fprintf(GetLogFile(), " -------------------------------------------------------\n");
12092 float fTotMeths = float(s_interpMethInfosCount);
12093 float fTotExecs = float(s_totalInvocations);
12094 float numPct = 0.0f;
12095 float numExecPct = 0.0f;
12096 for (unsigned k = 0; k < HistoMax; k++)
12098 fprintf(GetLogFile(), " %10d", k);
12101 fprintf(GetLogFile(), "+ ");
12105 fprintf(GetLogFile(), " ");
12107 float pct = float(histo[k])*100.0f/fTotMeths;
12109 float execPct = float(numExecs[k])*100.0f/fTotExecs;
12110 numExecPct += execPct;
12111 fprintf(GetLogFile(), "| %7d (%5.2f%%) | %6.2f%% | %6.2f%%\n", histo[k], pct, numPct, numExecPct);
12114 // This sorts them in ascending order of number of invocations.
12115 qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByInvocations);
12117 fprintf(GetLogFile(), "\nFor methods sorted in ascending # of executions order, cumulative %% of executions:\n");
12118 if (s_totalInvocations > 0)
12120 fprintf(GetLogFile(), " %% of methods | max execs | cum %% of execs\n");
12121 fprintf(GetLogFile(), " ------------------------------------------\n");
12122 unsigned methNum = 0;
12123 unsigned nNumExecs = 0;
12124 float totExecsF = float(s_totalInvocations);
12125 for (unsigned k = 10; k < 100; k += 10)
12127 unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
12128 unsigned targLess1 = (targ > 0 ? targ - 1 : 0);
12129 while (methNum < targ)
12131 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12134 float pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12136 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12141 for (; k < 100; k++)
12143 unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
12144 while (methNum < targ)
12146 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12149 pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12151 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12155 targ = s_interpMethInfosCount;
12156 while (methNum < targ)
12158 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12161 pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12162 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12167 fprintf(GetLogFile(), "\nTotal number of calls from interpreted code: %d.\n", s_totalInterpCalls);
12168 fprintf(GetLogFile(), " Also, %d are intrinsics; %d of these are not currently handled intrinsically.\n",
12169 s_totalInterpCallsToIntrinsics, s_totalInterpCallsToIntrinsicsUnhandled);
12170 fprintf(GetLogFile(), " Of these, %d to potential property getters (%d of these dead simple), %d to setters.\n",
12171 s_totalInterpCallsToGetters, s_totalInterpCallsToDeadSimpleGetters, s_totalInterpCallsToSetters);
12172 fprintf(GetLogFile(), " Of the dead simple getter calls, %d have been short-circuited.\n",
12173 s_totalInterpCallsToDeadSimpleGettersShortCircuited);
12175 fprintf(GetLogFile(), "\nToken resolutions by category:\n");
12176 fprintf(GetLogFile(), "Category | opportunities | calls | %%\n");
12177 fprintf(GetLogFile(), "---------------------------------------------------\n");
12178 for (unsigned i = RTK_Undefined; i < RTK_Count; i++)
12181 if (s_tokenResolutionOpportunities[i] > 0)
12182 pct = 100.0f * float(s_tokenResolutionCalls[i]) / float(s_tokenResolutionOpportunities[i]);
12183 fprintf(GetLogFile(), "%12s | %15d | %9d | %6.2f%%\n",
12184 s_tokenResolutionKindNames[i], s_tokenResolutionOpportunities[i], s_tokenResolutionCalls[i], pct);
12188 fprintf(GetLogFile(), "Information on num of execs:\n");
12190 UINT64 totILInstrs = 0;
12191 for (unsigned i = 0; i < s_interpMethInfosCount; i++) totILInstrs += s_interpMethInfos[i]->m_totIlInstructionsExeced;
12193 float totILInstrsF = float(totILInstrs);
12195 fprintf(GetLogFile(), "\nTotal instructions = %lld.\n", totILInstrs);
12196 fprintf(GetLogFile(), "\nTop <=10 methods by # of IL instructions executed.\n");
12197 fprintf(GetLogFile(), "%10s | %9s | %10s | %10s | %8s | %s\n", "tot execs", "# invokes", "code size", "ratio", "% of tot", "Method");
12198 fprintf(GetLogFile(), "----------------------------------------------------------------------------\n");
12200 qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByILInstrs);
12202 for (unsigned i = 0; i < min(10, s_interpMethInfosCount); i++)
12204 unsigned ilCodeSize = unsigned(s_interpMethInfos[i]->m_ILCodeEnd - s_interpMethInfos[i]->m_ILCode);
12205 fprintf(GetLogFile(), "%10lld | %9d | %10d | %10.2f | %8.2f%% | %s:%s\n",
12206 s_interpMethInfos[i]->m_totIlInstructionsExeced,
12207 s_interpMethInfos[i]->m_invocations,
12209 float(s_interpMethInfos[i]->m_totIlInstructionsExeced) / float(ilCodeSize),
12210 float(s_interpMethInfos[i]->m_totIlInstructionsExeced) * 100.0f / totILInstrsF,
12211 s_interpMethInfos[i]->m_clsName,
12212 s_interpMethInfos[i]->m_methName);
12214 #endif // INTERP_PROFILE
12217 #if INTERP_ILINSTR_PROFILE
12218 fprintf(GetLogFile(), "\nIL instruction profiling:\n");
12219 // First, classify by categories.
12220 unsigned totInstrs = 0;
12221 #if INTERP_ILCYCLE_PROFILE
12222 unsigned __int64 totCycles = 0;
12223 unsigned __int64 perMeasurementOverhead = CycleTimer::QueryOverhead();
12224 #endif // INTERP_ILCYCLE_PROFILE
12225 for (unsigned i = 0; i < 256; i++)
12227 s_ILInstrExecsByCategory[s_ILInstrCategories[i]] += s_ILInstrExecs[i];
12228 totInstrs += s_ILInstrExecs[i];
12229 #if INTERP_ILCYCLE_PROFILE
12230 unsigned __int64 cycles = s_ILInstrCycles[i];
12231 if (cycles > s_ILInstrExecs[i] * perMeasurementOverhead) cycles -= s_ILInstrExecs[i] * perMeasurementOverhead;
12233 s_ILInstrCycles[i] = cycles;
12234 s_ILInstrCyclesByCategory[s_ILInstrCategories[i]] += cycles;
12235 totCycles += cycles;
12236 #endif // INTERP_ILCYCLE_PROFILE
12238 unsigned totInstrs2Byte = 0;
12239 #if INTERP_ILCYCLE_PROFILE
12240 unsigned __int64 totCycles2Byte = 0;
12241 #endif // INTERP_ILCYCLE_PROFILE
12242 for (unsigned i = 0; i < CountIlInstr2Byte; i++)
12244 unsigned ind = 0x100 + i;
12245 s_ILInstrExecsByCategory[s_ILInstrCategories[ind]] += s_ILInstr2ByteExecs[i];
12246 totInstrs += s_ILInstr2ByteExecs[i];
12247 totInstrs2Byte += s_ILInstr2ByteExecs[i];
12248 #if INTERP_ILCYCLE_PROFILE
12249 unsigned __int64 cycles = s_ILInstrCycles[ind];
12250 if (cycles > s_ILInstrExecs[ind] * perMeasurementOverhead) cycles -= s_ILInstrExecs[ind] * perMeasurementOverhead;
12252 s_ILInstrCycles[i] = cycles;
12253 s_ILInstrCyclesByCategory[s_ILInstrCategories[ind]] += cycles;
12254 totCycles += cycles;
12255 totCycles2Byte += cycles;
12256 #endif // INTERP_ILCYCLE_PROFILE
12259 // Now sort the categories by # of occurrences.
12261 InstrExecRecord ieps[256 + CountIlInstr2Byte];
12262 for (unsigned short i = 0; i < 256; i++)
12264 ieps[i].m_instr = i; ieps[i].m_is2byte = false; ieps[i].m_execs = s_ILInstrExecs[i];
12265 #if INTERP_ILCYCLE_PROFILE
12266 if (i == CEE_BREAK)
12268 ieps[i].m_cycles = 0;
12269 continue; // Don't count these if they occur...
12271 ieps[i].m_cycles = s_ILInstrCycles[i];
12272 _ASSERTE((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
12273 #endif // INTERP_ILCYCLE_PROFILE
12275 for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
12278 ieps[ind].m_instr = i; ieps[ind].m_is2byte = true; ieps[ind].m_execs = s_ILInstr2ByteExecs[i];
12279 #if INTERP_ILCYCLE_PROFILE
12280 ieps[ind].m_cycles = s_ILInstrCycles[ind];
12281 _ASSERTE((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
12282 #endif // INTERP_ILCYCLE_PROFILE
12285 qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
12287 fprintf(GetLogFile(), "\nInstructions (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
12288 #if INTERP_ILCYCLE_PROFILE
12289 if (s_callCycles > s_calls * perMeasurementOverhead) s_callCycles -= s_calls * perMeasurementOverhead;
12290 else s_callCycles = 0;
12291 fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte, %lld calls (%d calls, %10.2f cyc/call):\n",
12292 totCycles/MIL, (totCycles - totCycles2Byte)/MIL, s_callCycles/MIL, s_calls, float(s_callCycles)/float(s_calls));
12294 extern unsigned __int64 MetaSigCtor1Cycles;
12295 fprintf(GetLogFile(), " MetaSig(MethodDesc, TypeHandle) ctor: %lld MCycles.\n",
12296 MetaSigCtor1Cycles/MIL);
12297 fprintf(GetLogFile(), " ForceSigWalk: %lld MCycles.\n",
12298 ForceSigWalkCycles/MIL);
12300 #endif // INTERP_ILCYCLE_PROFILE
12302 PrintILProfile(&ieps[0], totInstrs
12303 #if INTERP_ILCYCLE_PROFILE
12305 #endif // INTERP_ILCYCLE_PROFILE
12308 fprintf(GetLogFile(), "\nInstructions grouped by category: (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
12309 #if INTERP_ILCYCLE_PROFILE
12310 fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte):\n",
12311 totCycles/MIL, (totCycles - totCycles2Byte)/MIL);
12312 #endif // INTERP_ILCYCLE_PROFILE
12313 for (unsigned short i = 0; i < 256 + CountIlInstr2Byte; i++)
12317 ieps[i].m_instr = i; ieps[i].m_is2byte = false;
12321 ieps[i].m_instr = i - 256; ieps[i].m_is2byte = true;
12323 ieps[i].m_execs = s_ILInstrExecsByCategory[i];
12324 #if INTERP_ILCYCLE_PROFILE
12325 ieps[i].m_cycles = s_ILInstrCyclesByCategory[i];
12326 #endif // INTERP_ILCYCLE_PROFILE
12328 qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
12329 PrintILProfile(&ieps[0], totInstrs
12330 #if INTERP_ILCYCLE_PROFILE
12332 #endif // INTERP_ILCYCLE_PROFILE
12336 // Early debugging code.
12337 fprintf(GetLogFile(), "\nInstructions grouped category mapping:\n", totInstrs, totInstrs - totInstrs2Byte);
12338 for (unsigned short i = 0; i < 256; i++)
12340 unsigned short cat = s_ILInstrCategories[i];
12342 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp1Byte(cat));
12344 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp2Byte(cat - 256));
12347 for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
12349 unsigned ind = 256 + i;
12350 unsigned short cat = s_ILInstrCategories[ind];
12352 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp1Byte(cat));
12354 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp2Byte(cat - 256));
12358 #endif // INTERP_ILINSTR_PROFILE
12361 #if INTERP_ILINSTR_PROFILE
12363 const int K = 1000;
12366 void Interpreter::PrintILProfile(Interpreter::InstrExecRecord *recs, unsigned int totInstrs
12367 #if INTERP_ILCYCLE_PROFILE
12368 , unsigned __int64 totCycles
12369 #endif // INTERP_ILCYCLE_PROFILE
12372 float fTotInstrs = float(totInstrs);
12373 fprintf(GetLogFile(), "Instruction | execs | %% | cum %%");
12374 #if INTERP_ILCYCLE_PROFILE
12375 float fTotCycles = float(totCycles);
12376 fprintf(GetLogFile(), "| KCycles | %% | cum %% | cyc/inst\n");
12377 fprintf(GetLogFile(), "--------------------------------------------------"
12378 "-----------------------------------------\n");
12380 fprintf(GetLogFile(), "\n-------------------------------------------\n");
12382 float numPct = 0.0f;
12383 #if INTERP_ILCYCLE_PROFILE
12384 float numCyclePct = 0.0f;
12385 #endif // INTERP_ILCYCLE_PROFILE
12386 for (unsigned i = 0; i < 256 + CountIlInstr2Byte; i++)
12389 if (totInstrs > 0) pct = float(recs[i].m_execs) * 100.0f / fTotInstrs;
12391 if (recs[i].m_execs > 0)
12393 fprintf(GetLogFile(), "%12s | %9d | %6.2f%% | %6.2f%%",
12394 (recs[i].m_is2byte ? ILOp2Byte(recs[i].m_instr) : ILOp1Byte(recs[i].m_instr)), recs[i].m_execs,
12396 #if INTERP_ILCYCLE_PROFILE
12398 if (totCycles > 0) pct = float(recs[i].m_cycles) * 100.0f / fTotCycles;
12399 numCyclePct += pct;
12400 float cyclesPerInst = float(recs[i].m_cycles) / float(recs[i].m_execs);
12401 fprintf(GetLogFile(), "| %12llu | %6.2f%% | %6.2f%% | %11.2f",
12402 recs[i].m_cycles/K, pct, numCyclePct, cyclesPerInst);
12403 #endif // INTERP_ILCYCLE_PROFILE
12404 fprintf(GetLogFile(), "\n");
12408 #endif // INTERP_ILINSTR_PROFILE
12410 #endif // FEATURE_INTERPRETER