1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
7 #ifdef FEATURE_INTERPRETER
9 #include "interpreter.h"
10 #include "interpreter.hpp"
16 #include "gcheaputilities.h"
18 #include "jitinterface.h"
20 #include "exceptmacros.h"
21 #include "runtimeexceptionkind.h"
22 #include "runtimehandles.h"
24 #include "cycletimer.h"
26 inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
28 return (CORINFO_CALLINFO_FLAGS) (flag1 | flag2);
31 static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
33 TypeHandle typeHnd(clsHnd);
34 return CEEInfo::asCorInfoType(typeHnd.GetInternalCorElementType(), typeHnd, NULL);
37 InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
38 : m_method(methInfo->ftn),
39 m_module(methInfo->scope),
40 m_ILCode(methInfo->ILCode),
41 m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
42 m_maxStack(methInfo->maxStack),
44 m_totIlInstructionsExeced(0),
45 m_maxIlInstructionsExeced(0),
47 m_ehClauseCount(methInfo->EHcount),
48 m_varArgHandleArgNum(NO_VA_ARGNUM),
49 m_numArgs(methInfo->args.numArgs),
50 m_numLocals(methInfo->locals.numArgs),
53 m_returnType(methInfo->args.retType),
57 // Overflow sanity check. (Can ILCodeSize ever be zero?)
58 _ASSERTE(m_ILCode <= m_ILCodeEnd);
60 // Does the calling convention indicate an implicit "this" (first arg) or generic type context arg (last arg)?
61 SetFlag<Flag_hasThisArg>((methInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) != 0);
62 if (GetFlag<Flag_hasThisArg>())
65 CORINFO_CLASS_HANDLE methClass = comp->getMethodClass(methInfo->ftn);
66 DWORD attribs = comp->getClassAttribs(methClass);
67 SetFlag<Flag_thisArgIsObjPtr>((attribs & CORINFO_FLG_VALUECLASS) == 0);
70 #if INTERP_PROFILE || defined(_DEBUG)
74 m_methName = ::eeGetMethodFullName(comp, methInfo->ftn, &clsName);
76 m_methName = comp->getMethodNameFromMetadata(methInfo->ftn, &clsName, NULL, NULL);
78 char* myClsName = new char[strlen(clsName) + 1];
79 strcpy(myClsName, clsName);
80 m_clsName = myClsName;
82 #endif // INTERP_PROFILE
84 // Do we have a ret buff? If its a struct or refany, then *maybe*, depending on architecture...
85 bool hasRetBuff = (methInfo->args.retType == CORINFO_TYPE_VALUECLASS || methInfo->args.retType == CORINFO_TYPE_REFANY);
86 #if defined(FEATURE_HFA)
87 // ... unless its an HFA type (and not varargs)...
88 if (hasRetBuff && (comp->getHFAType(methInfo->args.retTypeClass) != CORINFO_HFA_ELEM_NONE) && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG)
94 #if defined(UNIX_AMD64_ABI) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
95 // ...or it fits into two registers.
96 if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= 2 * sizeof(void*))
100 #elif defined(HOST_ARM) || defined(HOST_AMD64)|| defined(HOST_ARM64)
101 // ...or it fits into one register.
102 if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= sizeof(void*))
107 SetFlag<Flag_hasRetBuffArg>(hasRetBuff);
109 MetaSig sig(reinterpret_cast<MethodDesc*>(methInfo->ftn));
110 SetFlag<Flag_hasGenericsContextArg>((methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0);
111 SetFlag<Flag_isVarArg>((methInfo->args.callConv & CORINFO_CALLCONV_VARARG) != 0);
112 SetFlag<Flag_typeHasGenericArgs>(methInfo->args.sigInst.classInstCount > 0);
113 SetFlag<Flag_methHasGenericArgs>(methInfo->args.sigInst.methInstCount > 0);
114 _ASSERTE_MSG(!GetFlag<Flag_hasGenericsContextArg>()
115 || ((GetFlag<Flag_typeHasGenericArgs>() && !(GetFlag<Flag_hasThisArg>() && GetFlag<Flag_thisArgIsObjPtr>())) || GetFlag<Flag_methHasGenericArgs>()),
116 "If the method takes a generic parameter, is a static method of generic class (or meth of a value class), and/or itself takes generic parameters");
118 if (GetFlag<Flag_hasThisArg>())
122 if (GetFlag<Flag_hasRetBuffArg>())
126 if (GetFlag<Flag_isVarArg>())
130 if (GetFlag<Flag_hasGenericsContextArg>())
140 m_argDescs = new ArgDesc[m_numArgs];
143 // Now we'll do the locals.
144 m_localDescs = new LocalDesc[m_numLocals];
145 // Allocate space for the pinning reference bits (lazily).
146 m_localIsPinningRefBits = NULL;
148 // Now look at each local.
149 CORINFO_ARG_LIST_HANDLE localsPtr = methInfo->locals.args;
150 CORINFO_CLASS_HANDLE vcTypeRet;
151 unsigned curLargeStructOffset = 0;
152 for (unsigned k = 0; k < methInfo->locals.numArgs; k++)
154 // TODO: if this optimization succeeds, the switch below on localType
155 // can become much simpler.
156 m_localDescs[k].m_offset = 0;
160 CorInfoTypeWithMod localTypWithMod = comp->getArgType(&methInfo->locals, localsPtr, &vcTypeRet);
161 // If the local vars is a pinning reference, set the bit to indicate this.
162 if ((localTypWithMod & CORINFO_TYPE_MOD_PINNED) != 0)
167 CorInfoType localType = strip(localTypWithMod);
170 case CORINFO_TYPE_VALUECLASS:
171 case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
173 InterpreterType tp = InterpreterType(comp, vcTypeRet);
174 unsigned size = static_cast<unsigned>(tp.Size(comp));
175 size = max(size, sizeof(void*));
176 m_localDescs[k].m_type = tp;
177 if (tp.IsLargeStruct(comp))
179 m_localDescs[k].m_offset = curLargeStructOffset;
180 curLargeStructOffset += size;
185 case CORINFO_TYPE_VAR:
186 NYI_INTERP("argument of generic parameter type"); // Should not happen;
190 m_localDescs[k].m_type = InterpreterType(localType);
193 m_localDescs[k].m_typeStackNormal = m_localDescs[k].m_type.StackNormalize();
194 localsPtr = comp->getArgNext(localsPtr);
196 m_largeStructLocalSize = curLargeStructOffset;
199 void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_)
201 unsigned numSigArgsPlusThis = methInfo->args.numArgs;
202 if (GetFlag<Flag_hasThisArg>())
204 numSigArgsPlusThis++;
207 // The m_argDescs array is constructed in the following "canonical" order:
209 // 2. signature arguments
211 // 4. type parameter -or- vararg cookie
213 // argOffsets_ is passed in this order, and serves to establish the offsets to arguments
214 // when the interpreter is invoked using the native calling convention (i.e., not directly).
216 // When the interpreter is invoked directly, the arguments will appear in the same order
217 // and form as arguments passed to MethodDesc::CallDescr(). This ordering is as follows:
220 // 3. signature arguments
222 // MethodDesc::CallDescr() does not support generic parameters or varargs functions.
224 _ASSERTE_MSG((methInfo->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
225 "Don't yet handle EXPLICITTHIS calling convention modifier.");
226 switch (methInfo->args.callConv & CORINFO_CALLCONV_MASK)
228 case CORINFO_CALLCONV_DEFAULT:
229 case CORINFO_CALLCONV_VARARG:
232 ARG_SLOT* directOffset = NULL;
233 short directRetBuffOffset = 0;
234 short directVarArgOffset = 0;
235 short directTypeParamOffset = 0;
237 // If there's a "this" argument, handle it.
238 if (GetFlag<Flag_hasThisArg>())
240 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_UNDEF);
241 #ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
242 MethodDesc *pMD = reinterpret_cast<MethodDesc*>(methInfo->ftn);
243 // The signature of the ILStubs may be misleading.
244 // If a StubTarget is ever set, we'll find the correct type by inspecting the
245 // target, rather than the stub.
249 if (pMD->AsDynamicMethodDesc()->IsUnboxingILStub())
251 // This is an unboxing stub where the thisptr is passed as a boxed VT.
252 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
256 MethodDesc *pTargetMD = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
257 if (pTargetMD != NULL)
259 if (pTargetMD->GetMethodTable()->IsValueType())
261 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
265 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
272 #endif // FEATURE_INSTANTIATINGSTUB_AS_IL
273 if (m_argDescs[k].m_type == InterpreterType(CORINFO_TYPE_UNDEF))
275 CORINFO_CLASS_HANDLE cls = comp->getMethodClass(methInfo->ftn);
276 DWORD attribs = comp->getClassAttribs(cls);
277 if (attribs & CORINFO_FLG_VALUECLASS)
279 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
283 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
286 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
287 m_argDescs[k].m_nativeOffset = argOffsets_[k];
288 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
293 // If there is a return buffer, it will appear next in the arguments list for a direct call.
294 // Reserve its offset now, for use after the explicit arguments.
295 #if defined(HOST_ARM)
296 // On ARM, for direct calls we always treat HFA return types as having ret buffs.
297 // So figure out if we have an HFA return type.
299 methInfo->args.retType == CORINFO_TYPE_VALUECLASS
300 && (comp->getHFAType(methInfo->args.retTypeClass) != CORINFO_HFA_ELEM_NONE)
301 && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG;
302 #endif // defined(HOST_ARM)
304 if (GetFlag<Flag_hasRetBuffArg>()
305 #if defined(HOST_ARM)
306 // On ARM, for direct calls we always treat HFA return types as having ret buffs.
308 #endif // defined(HOST_ARM)
311 directRetBuffOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
314 #if defined(HOST_AMD64)
315 if (GetFlag<Flag_isVarArg>())
317 directVarArgOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
320 if (GetFlag<Flag_hasGenericsContextArg>())
322 directTypeParamOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
327 // Now record the argument types for the rest of the arguments.
329 CORINFO_CLASS_HANDLE vcTypeRet;
330 CORINFO_ARG_LIST_HANDLE argPtr = methInfo->args.args;
331 for (; k < numSigArgsPlusThis; k++)
333 CorInfoTypeWithMod argTypWithMod = comp->getArgType(&methInfo->args, argPtr, &vcTypeRet);
334 CorInfoType argType = strip(argTypWithMod);
337 case CORINFO_TYPE_VALUECLASS:
338 case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
339 it = InterpreterType(comp, vcTypeRet);
342 // Everything else is just encoded as a shifted CorInfoType.
343 it = InterpreterType(argType);
346 m_argDescs[k].m_type = it;
347 m_argDescs[k].m_typeStackNormal = it.StackNormalize();
348 m_argDescs[k].m_nativeOffset = argOffsets_[k];
349 // When invoking the interpreter directly, large value types are always passed by reference.
350 if (it.IsLargeStruct(comp))
352 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, sizeof(void*))));
356 m_argDescs[k].m_directOffset = static_cast<short>(reinterpret_cast<intptr_t>(ArgSlotEndiannessFixup(directOffset, it.Size(comp))));
358 argPtr = comp->getArgNext(argPtr);
362 if (GetFlag<Flag_hasRetBuffArg>())
364 // The generic type context is an unmanaged pointer (native int).
365 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
366 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
367 m_argDescs[k].m_nativeOffset = argOffsets_[k];
368 m_argDescs[k].m_directOffset = directRetBuffOffset;
372 if (GetFlag<Flag_hasGenericsContextArg>())
374 // The vararg cookie is an unmanaged pointer (native int).
375 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
376 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
377 m_argDescs[k].m_nativeOffset = argOffsets_[k];
378 m_argDescs[k].m_directOffset = directTypeParamOffset;
382 if (GetFlag<Flag_isVarArg>())
384 // The generic type context is an unmanaged pointer (native int).
385 m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
386 m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
387 m_argDescs[k].m_nativeOffset = argOffsets_[k];
388 m_argDescs[k].m_directOffset = directVarArgOffset;
394 case IMAGE_CEE_CS_CALLCONV_C:
395 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_C");
398 case IMAGE_CEE_CS_CALLCONV_STDCALL:
399 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_STDCALL");
402 case IMAGE_CEE_CS_CALLCONV_THISCALL:
403 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_THISCALL");
406 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
407 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- IMAGE_CEE_CS_CALLCONV_FASTCALL");
410 case CORINFO_CALLCONV_FIELD:
411 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FIELD");
414 case CORINFO_CALLCONV_LOCAL_SIG:
415 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_LOCAL_SIG");
418 case CORINFO_CALLCONV_PROPERTY:
419 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_PROPERTY");
422 case CORINFO_CALLCONV_UNMANAGED:
423 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_UNMANAGED");
426 case CORINFO_CALLCONV_NATIVEVARARG:
427 NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_NATIVEVARARG");
431 _ASSERTE_ALL_BUILDS(false); // shouldn't get here
435 InterpreterMethodInfo::~InterpreterMethodInfo()
437 if (m_methodCache != NULL)
439 delete reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
443 void InterpreterMethodInfo::AllocPinningBitsIfNeeded()
445 if (m_localIsPinningRefBits != NULL)
448 unsigned numChars = (m_numLocals + 7) / 8;
449 m_localIsPinningRefBits = new char[numChars];
450 for (unsigned i = 0; i < numChars; i++)
452 m_localIsPinningRefBits[i] = char(0);
457 void InterpreterMethodInfo::SetPinningBit(unsigned locNum)
459 _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
460 AllocPinningBitsIfNeeded();
462 unsigned ind = locNum / 8;
463 unsigned bitNum = locNum - (ind * 8);
464 m_localIsPinningRefBits[ind] |= (1 << bitNum);
467 bool InterpreterMethodInfo::GetPinningBit(unsigned locNum)
469 _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
470 if (m_localIsPinningRefBits == NULL)
473 unsigned ind = locNum / 8;
474 unsigned bitNum = locNum - (ind * 8);
475 return (m_localIsPinningRefBits[ind] & (1 << bitNum)) != 0;
478 void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noReg, bool twoSlotAlign)
480 #if defined(HOST_AMD64)
482 _ASSERTE(!twoSlotAlign);
483 AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/false);
486 #if defined(HOST_X86) || defined(HOST_ARM64)
487 _ASSERTE(!twoSlotAlign); // Shouldn't use this flag on x86 (it wouldn't work right in the stack, at least).
489 // If the argument requires two-slot alignment, make sure we have it. This is the
490 // ARM model: both in regs and on the stack.
493 if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
495 if ((numRegArgs % 2) != 0)
502 if ((callerArgStackSlots % 2) != 0)
504 callerArgStackSlots++;
509 #if defined(HOST_ARM64)
510 // On ARM64 we're not going to place an argument 'partially' on the stack
511 // if all slots fits into registers, they go into registers, otherwise they go into stack.
512 if (!noReg && numRegArgs+numSlots <= NumberOfIntegerRegArgs())
514 if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
517 argIsReg[canonIndex] = ARS_IntReg;
518 argOffsets[canonIndex] = numRegArgs * sizeof(void*);
519 numRegArgs += numSlots;
520 // If we overflowed the regs, we consume some stack arg space.
521 if (numRegArgs > NumberOfIntegerRegArgs())
523 callerArgStackSlots += (numRegArgs - NumberOfIntegerRegArgs());
528 #if defined(HOST_X86)
529 // On X86, stack args are pushed in order. We will add the total size of the arguments to this offset,
530 // so we set this to a negative number relative to the SP before the first arg push.
531 callerArgStackSlots += numSlots;
532 ClrSafeInt<short> offset(-callerArgStackSlots);
533 #elif defined(HOST_ARM) || defined(HOST_ARM64)
534 // On ARM, args are pushed in *reverse* order. So we will create an offset relative to the address
535 // of the first stack arg; later, we will add the size of the non-stack arguments.
536 ClrSafeInt<short> offset(callerArgStackSlots);
537 #elif defined(HOST_LOONGARCH64)
538 callerArgStackSlots += numSlots;
539 ClrSafeInt<short> offset(-callerArgStackSlots);
540 #elif defined(HOST_RISCV64)
541 callerArgStackSlots += numSlots;
542 ClrSafeInt<short> offset(-callerArgStackSlots);
544 offset *= static_cast<short>(sizeof(void*));
545 _ASSERTE(!offset.IsOverflow());
546 argOffsets[canonIndex] = offset.Value();
547 #if defined(HOST_ARM) || defined(HOST_ARM64)
548 callerArgStackSlots += numSlots;
551 #endif // !HOST_AMD64
554 #if defined(HOST_AMD64)
556 #if defined(UNIX_AMD64_ABI)
557 void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
559 int regSlots = numFPRegArgSlots + numRegArgs;
560 if (isFloatingType && numFPRegArgSlots + 1 < MaxNumFPRegArgSlots)
562 _ASSERTE(numSlots == 1);
563 argIsReg[canonIndex] = ARS_FloatReg;
564 argOffsets[canonIndex] = regSlots * sizeof(void*);
565 fpArgsUsed |= (0x1 << regSlots);
566 numFPRegArgSlots += 1;
569 else if (numSlots < 3 && (numRegArgs + numSlots <= NumberOfIntegerRegArgs()))
571 argIsReg[canonIndex] = ARS_IntReg;
572 argOffsets[canonIndex] = regSlots * sizeof(void*);
573 numRegArgs += numSlots;
577 argIsReg[canonIndex] = ARS_NotReg;
578 ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
579 _ASSERTE(!offset.IsOverflow());
580 argOffsets[canonIndex] = offset.Value();
581 callerArgStackSlots += numSlots;
585 // Windows AMD64 calling convention allows any type that can be contained in 64 bits to be passed in registers,
586 // if not contained or they are of a size not a power of 2, then they are passed by reference on the stack.
587 // RCX, RDX, R8, R9 are the int arg registers. XMM0-3 overlap with the integer registers and are used
588 // for floating point arguments.
589 void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
591 // If floating type and there are slots use a float reg slot.
592 if (isFloatingType && (numFPRegArgSlots < MaxNumFPRegArgSlots))
594 _ASSERTE(numSlots == 1);
595 argIsReg[canonIndex] = ARS_FloatReg;
596 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
597 fpArgsUsed |= (0x1 << (numFPRegArgSlots + 1));
598 numFPRegArgSlots += 1;
599 numRegArgs += 1; // Increment int reg count due to shadowing.
603 // If we have an integer/aligned-struct arg or a reference of a struct that got copied on
604 // to the stack, it would go into a register or a stack slot.
605 if (numRegArgs != NumberOfIntegerRegArgs())
607 argIsReg[canonIndex] = ARS_IntReg;
608 argOffsets[canonIndex] = numRegArgs * sizeof(void*);
610 numFPRegArgSlots += 1; // Increment FP reg count due to shadowing.
614 argIsReg[canonIndex] = ARS_NotReg;
615 ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
616 _ASSERTE(!offset.IsOverflow());
617 argOffsets[canonIndex] = offset.Value();
618 callerArgStackSlots += 1;
621 #endif //UNIX_AMD64_ABI
624 void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlots, bool twoSlotAlign)
626 #if defined(HOST_AMD64)
627 _ASSERTE(!twoSlotAlign);
628 _ASSERTE(numSlots == 1);
629 AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/ true);
630 #elif defined(HOST_X86)
631 _ASSERTE(false); // Don't call this on x86; we pass all FP on the stack.
632 #elif defined(HOST_ARM)
633 // We require "numSlots" alignment.
634 _ASSERTE(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
635 argIsReg[canonIndex] = ARS_FloatReg;
639 // If we require two slot alignment, the number of slots must be a multiple of two.
640 _ASSERTE((numSlots % 2) == 0);
642 // Skip a slot if necessary.
643 if ((numFPRegArgSlots % 2) != 0)
647 // We always use new slots for two slot aligned args precision...
648 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
649 for (unsigned short i = 0; i < numSlots/2; i++)
651 fpArgsUsed |= (0x3 << (numFPRegArgSlots + i));
653 numFPRegArgSlots += numSlots;
659 // A single-precision (float) argument. We must do "back-filling" where possible, searching
660 // for previous unused registers.
662 while (slot < 32 && (fpArgsUsed & (1 << slot))) slot++;
663 _ASSERTE(slot < 32); // Search succeeded.
664 _ASSERTE(slot <= numFPRegArgSlots); // No bits at or above numFPRegArgSlots are set (regs used).
665 argOffsets[canonIndex] = slot * sizeof(void*);
666 fpArgsUsed |= (0x1 << slot);
667 if (slot == numFPRegArgSlots)
668 numFPRegArgSlots += numSlots;
672 // We can always allocate at after the last used slot.
673 argOffsets[numFPRegArgSlots] = numFPRegArgSlots * sizeof(void*);
674 for (unsigned i = 0; i < numSlots; i++)
676 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
678 numFPRegArgSlots += numSlots;
681 #elif defined(HOST_ARM64)
683 _ASSERTE(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
684 _ASSERTE(!twoSlotAlign);
685 argIsReg[canonIndex] = ARS_FloatReg;
687 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
688 for (unsigned i = 0; i < numSlots; i++)
690 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
692 numFPRegArgSlots += numSlots;
694 #elif defined(HOST_LOONGARCH64)
696 assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
697 assert(!twoSlotAlign);
698 argIsReg[canonIndex] = ARS_FloatReg;
700 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
701 for (unsigned i = 0; i < numSlots; i++)
703 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
705 numFPRegArgSlots += numSlots;
706 #elif defined(HOST_RISCV64)
707 assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
708 assert(!twoSlotAlign);
709 argIsReg[canonIndex] = ARS_FloatReg;
711 argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
712 for (unsigned i = 0; i < numSlots; i++)
714 fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
716 numFPRegArgSlots += numSlots;
718 #error "Unsupported architecture"
724 CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
725 CORINFO_METHOD_INFO* info,
726 /*OUT*/ BYTE **nativeEntry,
727 /*OUT*/ ULONG *nativeSizeOfCode,
728 InterpreterMethodInfo** ppInterpMethodInfo,
732 // First, ensure that the compiler-specific statics are initialized.
735 InitializeCompilerStatics(comp);
738 // Next, use switches and IL scanning to determine whether to interpret this method.
742 #define TRACE_SKIPPED(cls, meth, reason) \
743 if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs)) { \
744 fprintf(GetLogFile(), "Skipping %s:%s (%s).\n", cls, meth, reason); \
747 #define TRACE_SKIPPED(cls, meth, reason)
751 // If jmpCall, we only need to do computations involving method info.
755 const char* methName = comp->getMethodNameFromMetadata(info->ftn, &clsName, NULL, NULL);
756 if ( !s_InterpretMeths.contains(methName, clsName, info->args.pSig)
757 || s_InterpretMethsExclude.contains(methName, clsName, info->args.pSig))
759 TRACE_SKIPPED(clsName, methName, "not in set of methods to interpret");
760 return CORJIT_SKIPPED;
763 unsigned methHash = comp->getMethodHash(info->ftn);
764 if ( methHash < s_InterpretMethHashMin.val(CLRConfig::INTERNAL_InterpreterMethHashMin)
765 || methHash > s_InterpretMethHashMax.val(CLRConfig::INTERNAL_InterpreterMethHashMax))
767 TRACE_SKIPPED(clsName, methName, "hash not within range to interpret");
768 return CORJIT_SKIPPED;
771 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
776 TRACE_SKIPPED(clsName, methName, "interop stubs not supported");
777 return CORJIT_SKIPPED;
780 #endif // !INTERP_ILSTUBS
782 if (!s_InterpreterDoLoopMethods && MethodMayHaveLoop(info->ILCode, info->ILCodeSize))
784 TRACE_SKIPPED(clsName, methName, "has loop, not interpreting loop methods.");
785 return CORJIT_SKIPPED;
788 s_interpreterStubNum++;
791 if (s_interpreterStubNum < s_InterpreterStubMin.val(CLRConfig::INTERNAL_InterpreterStubMin)
792 || s_interpreterStubNum > s_InterpreterStubMax.val(CLRConfig::INTERNAL_InterpreterStubMax))
794 TRACE_SKIPPED(clsName, methName, "stub num not in range, not interpreting.");
795 return CORJIT_SKIPPED;
798 if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs))
800 unsigned hash = comp->getMethodHash(info->ftn);
801 fprintf(GetLogFile(), "Generating interpretation stub (# %d = 0x%x, hash = 0x%x) for %s:%s.\n",
802 s_interpreterStubNum, s_interpreterStubNum, hash, clsName, methName);
803 fflush(GetLogFile());
809 // Finally, generate an interpreter entry-point stub.
812 // @TODO: this structure clearly needs some sort of lifetime management. It is the moral equivalent
813 // of compiled code, and should be associated with an app domain. In addition, when I get to it, we should
814 // delete it when/if we actually compile the method. (Actually, that's complicated, since there may be
815 // VSD stubs still bound to the interpreter stub. The check there will get to the jitted code, but we want
816 // to eventually clean those up at some safe point...)
817 InterpreterMethodInfo* interpMethInfo = new InterpreterMethodInfo(comp, info);
818 if (ppInterpMethodInfo != nullptr)
820 *ppInterpMethodInfo = interpMethInfo;
822 interpMethInfo->m_stubNum = s_interpreterStubNum;
823 MethodDesc* methodDesc = reinterpret_cast<MethodDesc*>(info->ftn);
826 interpMethInfo = RecordInterpreterMethodInfoForMethodHandle(info->ftn, interpMethInfo);
829 #if FEATURE_INTERPRETER_DEADSIMPLE_OPT
831 if (IsDeadSimpleGetter(comp, methodDesc, &offsetOfLd))
833 interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>(true);
834 if (offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg)
836 interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>(true);
840 _ASSERTE(offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt);
843 #endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
845 // Used to initialize the arg offset information.
848 // We assume that the stack contains (with addresses growing upwards, assuming a downwards-growing stack):
852 // [Non-reg arg <# of reg args>]
855 // Then push the register args to get:
859 // [Non-reg arg <# of reg args>]
861 // [reg arg <# of reg args>-1]
865 // Pass the address of this argument array, and the MethodDesc pointer for the method, as arguments to
868 // So the structure of the code will look like this (in the non-ILstub case):
870 #if defined(HOST_X86) || defined(HOST_AMD64)
873 // [if there are register arguments in ecx or edx, push them]
874 // ecx := addr of InterpretMethodInfo for the method to be interpreted.
875 // edx = esp /*pointer to argument structure*/
876 // call to Interpreter::InterpretMethod
877 // [if we pushed register arguments, increment esp by the right amount.]
879 // ret <n> ; where <n> is the number of argument stack slots in the call to the stub.
880 #elif defined (HOST_ARM)
884 // TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
885 // at least, we could have a small per-method stub that puts the address of the method-specific
886 // InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
887 // always push all integer args on x86, as we do already on ARM. On ARM, we'd need several versions
888 // of the shared stub, for different numbers of floating point register args, cross different kinds of
889 // HFA return values. But these could still be shared, and the per-method stub would decide which of
892 // In the IL stub case, which uses eax, it would be problematic to do this sharing.
895 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
899 #if defined(HOST_X86) || defined(HOST_AMD64)
900 #if defined(HOST_X86)
901 sl.X86EmitPushReg(kEBP);
902 sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
904 #elif defined(HOST_ARM)
905 // On ARM we use R12 as a "scratch" register -- callee-trashed, not used
907 ThumbReg r11 = ThumbReg(11);
908 ThumbReg r12 = ThumbReg(12);
910 #elif defined(HOST_ARM64)
911 // x8 through x15 are scratch registers on ARM64.
912 IntReg x8 = IntReg(8);
913 IntReg x9 = IntReg(9);
915 #elif defined(HOST_RISCV64)
917 #error unsupported platform
921 MetaSig sig(methodDesc);
923 unsigned totalArgs = info->args.numArgs;
924 unsigned sigArgsPlusThis = totalArgs;
925 bool hasThis = false;
926 bool hasRetBuff = false;
927 bool isVarArg = false;
928 bool hasGenericsContextArg = false;
930 // Below, we will increment "totalArgs" for any of the "this" argument,
931 // a ret buff argument, and/or a generics context argument.
933 // There will be four arrays allocated below, each with this increased "totalArgs" elements:
934 // argOffsets, argIsReg, argPerm, and, later, m_argDescs.
936 // They will be indexed in the order (0-based, [] indicating optional)
938 // [this] sigArgs [retBuff] [VASigCookie] [genCtxt]
940 // We will call this "canonical order". It is architecture-independent, and
941 // does not necessarily correspond to the architecture-dependent physical order
942 // in which the registers are actually passed. (That's actually the purpose of
943 // "argPerm": to record the correspondence between canonical order and physical
944 // order.) We could have chosen any order for the first three of these, but it's
945 // simplest to let m_argDescs have all the passed IL arguments passed contiguously
946 // at the beginning, allowing it to be indexed by IL argument number.
948 int genericsContextArgIndex = 0;
949 int retBuffArgIndex = 0;
950 int vaSigCookieIndex = 0;
954 _ASSERTE(info->args.callConv & CORINFO_CALLCONV_HASTHIS);
956 totalArgs++; sigArgsPlusThis++;
959 if (methodDesc->HasRetBuffArg())
962 retBuffArgIndex = totalArgs;
966 if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG)
969 vaSigCookieIndex = totalArgs;
973 if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE)
975 _ASSERTE(info->args.callConv & CORINFO_CALLCONV_PARAMTYPE);
976 hasGenericsContextArg = true;
977 genericsContextArgIndex = totalArgs;
981 // The non-this sig args have indices starting after these.
983 // We will first encode the arg offsets as *negative* offsets from the address above the first
984 // stack arg, and later add in the total size of the stack args to get a positive offset.
985 // The first sigArgsPlusThis elements are the offsets of the IL-addressable arguments. After that,
986 // there may be up to two more: generics context arg, if present, and return buff pointer, if present.
987 // (Note that the latter is actually passed after the "this" pointer, or else first if no "this" pointer
988 // is present. We re-arrange to preserve the easy IL-addressability.)
989 ArgState argState(totalArgs);
991 // This is the permutation that translates from an index in the argOffsets/argIsReg arrays to
992 // the platform-specific order in which the arguments are passed.
993 unsigned* argPerm = new unsigned[totalArgs];
995 // The number of register argument slots we end up pushing.
996 unsigned short regArgsFound = 0;
998 unsigned physArgIndex = 0;
1000 #if defined(HOST_ARM)
1001 // The stub linker has a weird little limitation: all stubs it's used
1002 // for on ARM push some callee-saved register, so the unwind info
1003 // code was written assuming at least one would be pushed. I don't know how to
1004 // fix it, so I'm meeting this requirement, by pushing one callee-save.
1005 #define STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH 1
1007 #if STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH
1008 const int NumberOfCalleeSaveRegsToPush = 1;
1010 const int NumberOfCalleeSaveRegsToPush = 0;
1012 // The "1" here is for the return address.
1013 const int NumberOfFixedPushes = 1 + NumberOfCalleeSaveRegsToPush;
1014 #elif defined(HOST_ARM64)
1016 const int NumberOfFixedPushes = 2;
1019 #if defined(FEATURE_HFA)
1020 #if defined(HOST_ARM) || defined(HOST_ARM64)
1021 // On ARM, a non-retBuffArg method that returns a struct type might be an HFA return. Figure
1023 unsigned HFARetTypeSize = 0;
1025 #if defined(HOST_ARM64)
1026 unsigned cHFAVars = 0;
1028 if (info->args.retType == CORINFO_TYPE_VALUECLASS
1029 && (comp->getHFAType(info->args.retTypeClass) != CORINFO_HFA_ELEM_NONE)
1030 && info->args.getCallConv() != CORINFO_CALLCONV_VARARG)
1032 HFARetTypeSize = getClassSize(info->args.retTypeClass);
1033 #if defined(HOST_ARM)
1034 // Round up to a double boundary;
1035 HFARetTypeSize = ((HFARetTypeSize+ sizeof(double) - 1) / sizeof(double)) * sizeof(double);
1036 #elif defined(HOST_ARM64)
1037 // We don't need to round it up to double. Unlike ARM, whether it's a float or a double each field will
1038 // occupy one slot. We'll handle the stack alignment in the prolog where we have all the information about
1039 // what is going to be pushed on the stack.
1040 // Instead on ARM64 we'll need to know how many slots we'll need.
1041 // for instance a VT with two float fields will have the same size as a VT with 1 double field. (ARM64TODO: Verify it)
1042 // It works on ARM because the overlapping layout of the floating point registers
1043 // but it won't work on ARM64.
1044 cHFAVars = (comp->getHFAType(info->args.retTypeClass) == CORINFO_HFA_ELEM_FLOAT) ? HFARetTypeSize/sizeof(float) : HFARetTypeSize/sizeof(double);
1048 #endif // defined(FEATURE_HFA)
1050 _ASSERTE_MSG((info->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
1051 "Don't yet handle EXPLICITTHIS calling convention modifier.");
1053 switch (info->args.callConv & CORINFO_CALLCONV_MASK)
1055 case CORINFO_CALLCONV_DEFAULT:
1056 case CORINFO_CALLCONV_VARARG:
1058 unsigned firstSigArgIndex = 0;
1061 argPerm[0] = physArgIndex; physArgIndex++;
1068 argPerm[retBuffArgIndex] = physArgIndex; physArgIndex++;
1069 argState.AddArg(retBuffArgIndex);
1074 argPerm[vaSigCookieIndex] = physArgIndex; physArgIndex++;
1075 interpMethInfo->m_varArgHandleArgNum = vaSigCookieIndex;
1076 argState.AddArg(vaSigCookieIndex);
1079 #if defined(HOST_ARM) || defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_RISCV64)
1080 // Generics context comes before args on ARM. Would be better if I factored this out as a call,
1081 // to avoid large swatches of duplicate code.
1082 if (hasGenericsContextArg)
1084 argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
1085 argState.AddArg(genericsContextArgIndex);
1087 #endif // HOST_ARM || HOST_AMD64 || HOST_ARM64 || HOST_RISCV64
1089 CORINFO_ARG_LIST_HANDLE argPtr = info->args.args;
1090 // Some arguments are have been passed in registers, some in memory. We must generate code that
1091 // moves the register arguments to memory, and determines a pointer into the stack from which all
1092 // the arguments can be accessed, according to the offsets in "argOffsets."
1094 // In the first pass over the arguments, we will label and count the register arguments, and
1095 // initialize entries in "argOffsets" for the non-register arguments -- relative to the SP at the
1096 // time of the call. Then when we have counted the number of register arguments, we will adjust
1097 // the offsets for the non-register arguments to account for those. Then, in the second pass, we
1098 // will push the register arguments on the stack, and capture the final stack pointer value as
1099 // the argument vector pointer.
1100 CORINFO_CLASS_HANDLE vcTypeRet;
1101 // This iteration starts at the first signature argument, and iterates over all the
1102 // canonical indices for the signature arguments.
1103 for (unsigned k = firstSigArgIndex; k < sigArgsPlusThis; k++)
1105 argPerm[k] = physArgIndex; physArgIndex++;
1107 CorInfoTypeWithMod argTypWithMod = comp->getArgType(&info->args, argPtr, &vcTypeRet);
1108 CorInfoType argType = strip(argTypWithMod);
1111 case CORINFO_TYPE_UNDEF:
1112 case CORINFO_TYPE_VOID:
1113 case CORINFO_TYPE_VAR:
1114 _ASSERTE_ALL_BUILDS(false); // Should not happen;
1117 // One integer slot arguments:
1118 case CORINFO_TYPE_BOOL:
1119 case CORINFO_TYPE_CHAR:
1120 case CORINFO_TYPE_BYTE:
1121 case CORINFO_TYPE_UBYTE:
1122 case CORINFO_TYPE_SHORT:
1123 case CORINFO_TYPE_USHORT:
1124 case CORINFO_TYPE_INT:
1125 case CORINFO_TYPE_UINT:
1126 case CORINFO_TYPE_NATIVEINT:
1127 case CORINFO_TYPE_NATIVEUINT:
1128 case CORINFO_TYPE_BYREF:
1129 case CORINFO_TYPE_CLASS:
1130 case CORINFO_TYPE_STRING:
1131 case CORINFO_TYPE_PTR:
1135 // Two integer slot arguments.
1136 case CORINFO_TYPE_LONG:
1137 case CORINFO_TYPE_ULONG:
1138 #if defined(HOST_X86)
1139 // Longs are always passed on the stack -- with no obvious alignment.
1140 argState.AddArg(k, 2, /*noReg*/true);
1141 #elif defined(HOST_ARM)
1142 // LONGS have 2-reg alignment; inc reg if necessary.
1143 argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true);
1144 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1147 #error unknown platform
1151 // One float slot args:
1152 case CORINFO_TYPE_FLOAT:
1153 #if defined(HOST_X86)
1154 argState.AddArg(k, 1, /*noReg*/true);
1155 #elif defined(HOST_ARM)
1156 argState.AddFPArg(k, 1, /*twoSlotAlign*/false);
1157 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1158 argState.AddFPArg(k, 1, false);
1160 #error unknown platform
1164 // Two float slot args
1165 case CORINFO_TYPE_DOUBLE:
1166 #if defined(HOST_X86)
1167 argState.AddArg(k, 2, /*noReg*/true);
1168 #elif defined(HOST_ARM)
1169 argState.AddFPArg(k, 2, /*twoSlotAlign*/true);
1170 #elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
1171 argState.AddFPArg(k, 1, false);
1173 #error unknown platform
1177 // Value class args:
1178 case CORINFO_TYPE_VALUECLASS:
1179 case CORINFO_TYPE_REFANY:
1181 unsigned sz = getClassSize(vcTypeRet);
1182 unsigned szSlots = max(1, sz / sizeof(void*));
1183 #if defined(HOST_X86)
1184 argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/true);
1185 #elif defined(HOST_AMD64)
1186 argState.AddArg(k, static_cast<short>(szSlots));
1187 #elif defined(HOST_ARM) || defined(HOST_ARM64)
1188 // TODO: handle Vector64, Vector128 types
1189 CorInfoHFAElemType hfaType = comp->getHFAType(vcTypeRet);
1190 if (CorInfoTypeIsFloatingPoint(hfaType))
1192 argState.AddFPArg(k, szSlots,
1193 #if defined(HOST_ARM)
1194 /*twoSlotAlign*/ (hfaType == CORINFO_HFA_ELEM_DOUBLE)
1195 #elif defined(HOST_ARM64)
1196 /*twoSlotAlign*/ false // unlike ARM32 FP args always consume 1 slot on ARM64
1202 unsigned align = comp->getClassAlignmentRequirement(vcTypeRet, FALSE);
1203 argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/false,
1204 #if defined(HOST_ARM)
1205 /*twoSlotAlign*/ (align == 8)
1206 #elif defined(HOST_ARM64)
1207 /*twoSlotAlign*/ false
1211 #elif defined(HOST_LOONGARCH64)
1212 argState.AddArg(k, static_cast<short>(szSlots));
1213 #elif defined(HOST_RISCV64)
1214 argState.AddArg(k, static_cast<short>(szSlots));
1216 #error unknown platform
1223 _ASSERTE_MSG(false, "should not reach here, unknown arg type");
1225 argPtr = comp->getArgNext(argPtr);
1228 #if defined(HOST_X86)
1229 // Generics context comes last on HOST_X86. Would be better if I factored this out as a call,
1230 // to avoid large swatches of duplicate code.
1231 if (hasGenericsContextArg)
1233 argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
1234 argState.AddArg(genericsContextArgIndex);
1237 // Now we have counted the number of register arguments, so we can update the offsets for the
1238 // non-register arguments. "+ 2" below is to account for the return address from the call, and
1240 unsigned short stackArgBaseOffset = (argState.numRegArgs + 2 + argState.callerArgStackSlots) * sizeof(void*);
1241 unsigned intRegArgBaseOffset = 0;
1243 #elif defined(HOST_ARM)
1245 // We're choosing to always push all arg regs on ARM -- this is the only option
1246 // that ThumbEmitProlog currently gives.
1247 argState.numRegArgs = 4;
1249 // On ARM, we push the (integer) arg regs before we push the return address, so we don't add an
1250 // extra constant. And the offset is the address of the last pushed argument, which is the first
1251 // stack argument in signature order.
1253 // Round up to a double boundary...
1254 unsigned fpStackSlots = ((argState.numFPRegArgSlots + 1) / 2) * 2;
1255 unsigned intRegArgBaseOffset = (fpStackSlots + NumberOfFixedPushes) * sizeof(void*);
1256 unsigned short stackArgBaseOffset = intRegArgBaseOffset + (argState.numRegArgs) * sizeof(void*);
1257 #elif defined(HOST_ARM64)
1259 // See StubLinkerCPU::EmitProlog for the layout of the stack
1260 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1261 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1262 #elif defined(UNIX_AMD64_ABI)
1263 unsigned intRegArgBaseOffset = 0;
1264 unsigned short stackArgBaseOffset = (2 + argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*);
1265 #elif defined(HOST_AMD64)
1266 unsigned short stackArgBaseOffset = (argState.numRegArgs) * sizeof(void*);
1267 #elif defined(HOST_LOONGARCH64)
1268 // See StubLinkerCPU::EmitProlog for the layout of the stack
1269 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1270 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1271 #elif defined(HOST_RISCV64)
1272 unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
1273 unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
1275 #error unsupported platform
1278 #if defined(HOST_ARM)
1279 WORD regArgMask = 0;
1280 #endif // defined(HOST_ARM)
1281 // argPerm maps from an index into the argOffsets/argIsReg arrays to
1282 // the order that the arguments are passed.
1283 unsigned* argPermInverse = new unsigned[totalArgs];
1284 for (unsigned t = 0; t < totalArgs; t++)
1286 argPermInverse[argPerm[t]] = t;
1289 for (unsigned kk = 0; kk < totalArgs; kk++)
1291 // Let "k" be the index of the kk'th input in the argOffsets and argIsReg arrays.
1292 // To compute "k" we need to invert argPerm permutation -- determine the "k" such
1293 // that argPerm[k] == kk.
1294 unsigned k = argPermInverse[kk];
1296 _ASSERTE(k < totalArgs);
1298 if (argState.argIsReg[k] == ArgState::ARS_IntReg)
1301 // If any int reg args are used on ARM, we push them all (in ThumbEmitProlog)
1302 #if defined(HOST_X86)
1303 if (regArgsFound == 1)
1305 if (!jmpCall) { sl.X86EmitPushReg(kECX); }
1306 argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*); // General form, good for general # of reg args.
1310 _ASSERTE(regArgsFound == 2);
1311 if (!jmpCall) { sl.X86EmitPushReg(kEDX); }
1312 argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*);
1314 #elif defined(HOST_ARM) || defined(HOST_ARM64) || defined(UNIX_AMD64_ABI)
1315 argState.argOffsets[k] += intRegArgBaseOffset;
1316 #elif defined(HOST_AMD64)
1317 // First home the register arguments in the stack space allocated by the caller.
1318 // Refer to Stack Allocation on x64 [http://msdn.microsoft.com/en-US/library/ew5tede7(v=vs.80).aspx]
1319 X86Reg argRegs[] = { kECX, kEDX, kR8, kR9 };
1320 if (!jmpCall) { sl.X86EmitIndexRegStoreRSP(regArgsFound * sizeof(void*), argRegs[regArgsFound - 1]); }
1321 argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
1322 #elif defined(HOST_LOONGARCH64)
1323 argState.argOffsets[k] += intRegArgBaseOffset;
1324 #elif defined(HOST_RISCV64)
1325 argState.argOffsets[k] += intRegArgBaseOffset;
1327 #error unsupported platform
1330 #if defined(HOST_AMD64) && !defined(UNIX_AMD64_ABI)
1331 else if (argState.argIsReg[k] == ArgState::ARS_FloatReg)
1333 // Increment regArgsFound since float/int arguments have overlapping registers.
1335 // Home the float arguments.
1336 X86Reg argRegs[] = { kXMM0, kXMM1, kXMM2, kXMM3 };
1337 if (!jmpCall) { sl.X64EmitMovSDToMem(argRegs[regArgsFound - 1], static_cast<X86Reg>(kESP_Unsafe), regArgsFound * sizeof(void*)); }
1338 argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
1341 else if (argState.argIsReg[k] == ArgState::ARS_NotReg)
1343 argState.argOffsets[k] += stackArgBaseOffset;
1345 // So far, x86 doesn't have any FP reg args, and ARM and ARM64 puts them at offset 0, so no
1346 // adjustment is necessary (yet) for arguments passed in those registers.
1348 delete[] argPermInverse;
1352 case IMAGE_CEE_CS_CALLCONV_C:
1353 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_C");
1356 case IMAGE_CEE_CS_CALLCONV_STDCALL:
1357 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_STDCALL");
1360 case IMAGE_CEE_CS_CALLCONV_THISCALL:
1361 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_THISCALL");
1364 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
1365 NYI_INTERP("GenerateInterpreterStub -- IMAGE_CEE_CS_CALLCONV_FASTCALL");
1368 case CORINFO_CALLCONV_FIELD:
1369 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FIELD");
1372 case CORINFO_CALLCONV_LOCAL_SIG:
1373 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_LOCAL_SIG");
1376 case CORINFO_CALLCONV_PROPERTY:
1377 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_PROPERTY");
1380 case CORINFO_CALLCONV_UNMANAGED:
1381 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_UNMANAGED");
1384 case CORINFO_CALLCONV_NATIVEVARARG:
1385 NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_NATIVEVARARG");
1389 _ASSERTE_ALL_BUILDS(false); // shouldn't get here
1394 PCODE interpretMethodFunc;
1397 switch (info->args.retType)
1399 case CORINFO_TYPE_FLOAT:
1400 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodFloat);
1402 case CORINFO_TYPE_DOUBLE:
1403 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodDouble);
1406 interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethod);
1409 // The argument registers have been pushed by now, so we can use them.
1410 #if defined(HOST_X86)
1411 // First arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
1412 sl.X86EmitMovRegReg(kEDX, static_cast<X86Reg>(kESP_Unsafe));
1413 // InterpretMethod uses F_CALL_CONV == __fastcall; pass 2 args in regs.
1415 if (pMD->IsILStub())
1417 // Third argument is stubcontext, in eax.
1418 sl.X86EmitPushReg(kEAX);
1423 // For a non-ILStub method, push NULL as the StubContext argument.
1424 sl.X86EmitZeroOutReg(kECX);
1425 sl.X86EmitPushReg(kECX);
1427 // sl.X86EmitAddReg(kECX, reinterpret_cast<UINT>(interpMethInfo));
1428 sl.X86EmitRegLoad(kECX, reinterpret_cast<UINT>(interpMethInfo));
1429 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1430 // Now we will deallocate the stack slots we pushed to hold register arguments.
1431 if (argState.numRegArgs > 0)
1433 sl.X86EmitAddEsp(argState.numRegArgs * sizeof(void*));
1435 sl.X86EmitPopReg(kEBP);
1436 sl.X86EmitReturn(static_cast<WORD>(argState.callerArgStackSlots * sizeof(void*)));
1437 #elif defined(UNIX_AMD64_ABI)
1438 bool hasTwoRetSlots = info->args.retType == CORINFO_TYPE_VALUECLASS &&
1439 getClassSize(info->args.retTypeClass) == 16;
1441 int fixedTwoSlotSize = 16;
1443 int argSize = (argState.numFPRegArgSlots + argState.numRegArgs) * sizeof(void*);
1445 int stackSize = argSize + fixedTwoSlotSize; // Fixed two slot for possible "retbuf", access address by "m_ilArgs-16"
1447 if (stackSize % 16 == 0) { // for $rsp align requirement
1451 sl.X86EmitSubEsp(stackSize);
1453 X86Reg intArgsRegs[] = {ARGUMENT_kREG1, ARGUMENT_kREG2, kRDX, kRCX, kR8, kR9};
1456 for (int i = 0; i < argState.numRegArgs + argState.numFPRegArgSlots; i++)
1458 int offs = i * sizeof(void*) + 16;
1459 if (argState.fpArgsUsed & (1 << i))
1461 sl.X64EmitMovSDToMem(static_cast<X86Reg>(indexFP), static_cast<X86Reg>(kESP_Unsafe), offs);
1466 sl.X86EmitIndexRegStoreRSP(offs, intArgsRegs[indexGP]);
1471 // Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg.
1472 sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast<X86Reg>(kESP_Unsafe), fixedTwoSlotSize);
1474 // If we have IL stubs pass the stub context in R10 or else pass NULL.
1476 if (pMD->IsILStub())
1478 sl.X86EmitMovRegReg(kRDX, kR10);
1483 // For a non-ILStub method, push NULL as the StubContext argument.
1484 sl.X86EmitZeroOutReg(ARGUMENT_kREG1);
1485 sl.X86EmitMovRegReg(kRDX, ARGUMENT_kREG1);
1487 sl.X86EmitRegLoad(ARGUMENT_kREG1, reinterpret_cast<UINT_PTR>(interpMethInfo));
1489 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1490 if (hasTwoRetSlots) {
1491 sl.X86EmitEspOffset(0x8b, kRAX, 0);
1492 sl.X86EmitEspOffset(0x8b, kRDX, 8);
1494 sl.X86EmitAddEsp(stackSize);
1495 sl.X86EmitReturn(0);
1496 #elif defined(HOST_AMD64)
1497 // Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg
1498 sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast<X86Reg>(kESP_Unsafe), 8);
1500 // Allocate space for homing callee's (InterpretMethod's) arguments.
1501 // Calling convention requires a default allocation space of 4,
1502 // but to double align the stack frame, we'd allocate 5.
1503 int interpMethodArgSize = 5 * sizeof(void*);
1504 sl.X86EmitSubEsp(interpMethodArgSize);
1506 // If we have IL stubs pass the stub context in R10 or else pass NULL.
1508 if (pMD->IsILStub())
1510 sl.X86EmitMovRegReg(kR8, kR10);
1515 // For a non-ILStub method, push NULL as the StubContext argument.
1516 sl.X86EmitZeroOutReg(ARGUMENT_kREG1);
1517 sl.X86EmitMovRegReg(kR8, ARGUMENT_kREG1);
1519 sl.X86EmitRegLoad(ARGUMENT_kREG1, reinterpret_cast<UINT_PTR>(interpMethInfo));
1520 sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
1521 sl.X86EmitAddEsp(interpMethodArgSize);
1522 sl.X86EmitReturn(0);
1523 #elif defined(HOST_ARM)
1525 // We have to maintain 8-byte stack alignment. So if the number of
1526 // slots we would normally push is not a multiple of two, add a random
1527 // register. (We will not pop this register, but rather, increment
1528 // sp by an amount that includes it.)
1529 bool oddPushes = (((argState.numRegArgs + NumberOfFixedPushes) % 2) != 0);
1531 UINT stackFrameSize = 0;
1532 if (oddPushes) stackFrameSize = sizeof(void*);
1533 // Now, if any FP regs are used as arguments, we will copy those to the stack; reserve space for that here.
1534 // (We push doubles to keep the stack aligned...)
1535 unsigned short doublesToPush = (argState.numFPRegArgSlots + 1)/2;
1536 stackFrameSize += (doublesToPush*2*sizeof(void*));
1538 // The last argument here causes this to generate code to push all int arg regs.
1539 sl.ThumbEmitProlog(/*cCalleeSavedRegs*/NumberOfCalleeSaveRegsToPush, /*cbStackFrame*/stackFrameSize, /*fPushArgRegs*/TRUE);
1541 // Now we will generate code to copy the floating point registers to the stack frame.
1542 if (doublesToPush > 0)
1544 sl.ThumbEmitStoreMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, doublesToPush*2);
1548 if (pMD->IsILStub())
1550 // Third argument is stubcontext, in r12.
1551 sl.ThumbEmitMovRegReg(ThumbReg(2), ThumbReg(12));
1556 // For a non-ILStub method, push NULL as the third StubContext argument.
1557 sl.ThumbEmitMovConstant(ThumbReg(2), 0);
1559 // Second arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
1560 sl.ThumbEmitMovRegReg(ThumbReg(1), thumbRegSp);
1562 // First arg is the pointer to the interpMethInfo structure.
1563 sl.ThumbEmitMovConstant(ThumbReg(0), reinterpret_cast<int>(interpMethInfo));
1565 // If there's an HFA return, add space for that.
1566 if (HFARetTypeSize > 0)
1568 sl.ThumbEmitSubSp(HFARetTypeSize);
1571 // Now we can call the right method.
1572 // No "direct call" instruction, so load into register first. Can use R3.
1573 sl.ThumbEmitMovConstant(ThumbReg(3), static_cast<int>(interpretMethodFunc));
1574 sl.ThumbEmitCallRegister(ThumbReg(3));
1576 // If there's an HFA return, copy to FP regs, and deallocate the stack space.
1577 if (HFARetTypeSize > 0)
1579 sl.ThumbEmitLoadMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, HFARetTypeSize/sizeof(void*));
1580 sl.ThumbEmitAddSp(HFARetTypeSize);
1583 sl.ThumbEmitEpilog();
1585 #elif defined(HOST_ARM64)
1587 UINT stackFrameSize = argState.numFPRegArgSlots;
1589 sl.EmitProlog(argState.numRegArgs, argState.numFPRegArgSlots, 0 /*cCalleeSavedRegs*/, static_cast<unsigned short>(cHFAVars*sizeof(void*)));
1592 if (pMD->IsILStub())
1594 // Third argument is stubcontext, in x12 (METHODDESC_REGISTER)
1595 sl.EmitMovReg(IntReg(2), IntReg(12));
1600 // For a non-ILStub method, push NULL as the third stubContext argument
1601 sl.EmitMovConstant(IntReg(2), 0);
1604 // Second arg is pointer to the basei of the ILArgs -- i.e., the current stack value
1605 sl.EmitAddImm(IntReg(1), RegSp, sl.GetSavedRegArgsOffset());
1607 // First arg is the pointer to the interpMethodInfo structure
1609 if (!pMD->IsILStub())
1612 // interpMethodInfo is already in x8, so copy it from x8
1613 sl.EmitMovReg(IntReg(0), IntReg(8));
1618 // We didn't do the short-circuiting, therefore interpMethInfo is
1619 // not stored in a register (x8) before. so do it now.
1620 sl.EmitMovConstant(IntReg(0), reinterpret_cast<UINT64>(interpMethInfo));
1624 sl.EmitCallLabel(sl.NewExternalCodeLabel((LPVOID)interpretMethodFunc), FALSE, FALSE);
1626 // If there's an HFA return, copy to FP regs
1629 for (unsigned i=0; i<=(cHFAVars/2)*2;i+=2)
1630 sl.EmitLoadStoreRegPairImm(StubLinkerCPU::eLOAD, VecReg(i), VecReg(i+1), RegSp, i*sizeof(void*));
1631 if ((cHFAVars % 2) == 1)
1632 sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD,VecReg(cHFAVars-1), RegSp, cHFAVars*sizeof(void*));
1638 #elif defined(HOST_LOONGARCH64)
1639 assert(!"unimplemented on LOONGARCH yet");
1640 #elif defined(HOST_RISCV64)
1641 bool hasTwoRetSlots = info->args.retType == CORINFO_TYPE_VALUECLASS &&
1642 getClassSize(info->args.retTypeClass) == 16;
1644 UINT stackFrameSize = argState.numFPRegArgSlots;
1646 sl.EmitProlog(argState.numRegArgs, argState.numFPRegArgSlots, hasTwoRetSlots ? 2 * sizeof(void*) : 0);
1649 if (pMD->IsILStub())
1651 // Third argument is stubcontext, in t2 (METHODDESC_REGISTER).
1652 sl.EmitMovReg(IntReg(12), IntReg(7));
1657 // For a non-ILStub method, push NULL as the third StubContext argument.
1658 sl.EmitMovConstant(IntReg(12), 0);
1660 // Second arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
1661 sl.EmitAddImm(IntReg(11), RegSp, sl.GetSavedRegArgsOffset());
1663 // First arg is the pointer to the interpMethodInfo structure
1664 sl.EmitMovConstant(IntReg(10), reinterpret_cast<UINT64>(interpMethInfo));
1666 sl.EmitCallLabel(sl.NewExternalCodeLabel((LPVOID)interpretMethodFunc), FALSE, FALSE);
1669 // TODO: handle return registers to use int or float registers
1670 sl.EmitLoad(IntReg(10), RegSp, 0);
1671 sl.EmitLoad(IntReg(11), RegSp, sizeof(void*));
1676 #error unsupported platform
1678 stub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
1680 *nativeSizeOfCode = static_cast<ULONG>(stub->GetNumCodeBytes());
1681 // TODO: manage reference count of interpreter stubs. Look for examples...
1682 *nativeEntry = dac_cast<BYTE*>(stub->GetEntryPoint());
1685 // Initialize the arg offset information.
1686 interpMethInfo->InitArgInfo(comp, info, argState.argOffsets);
1689 AddInterpMethInfo(interpMethInfo);
1693 // Remember the mapping between code address and MethodDesc*.
1694 RecordInterpreterStubForMethodDesc(info->ftn, *nativeEntry);
1698 #undef TRACE_SKIPPED
1701 size_t Interpreter::GetFrameSize(InterpreterMethodInfo* interpMethInfo)
1703 size_t sz = interpMethInfo->LocalMemSize();
1704 #if COMBINE_OPSTACK_VAL_TYPE
1705 sz += (interpMethInfo->m_maxStack * sizeof(OpStackValAndType));
1707 sz += (interpMethInfo->m_maxStack * (sizeof(INT64) + sizeof(InterpreterType*)));
1713 ARG_SLOT Interpreter::ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, _Out_ bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedToken)
1715 #define INTERP_DYNAMIC_CONTRACTS 1
1716 #if INTERP_DYNAMIC_CONTRACTS
1723 // Dynamic contract occupies too much stack.
1724 STATIC_CONTRACT_THROWS;
1725 STATIC_CONTRACT_GC_TRIGGERS;
1726 STATIC_CONTRACT_MODE_COOPERATIVE;
1729 size_t sizeWithGS = GetFrameSize(interpMethInfo) + sizeof(GSCookie);
1730 BYTE* frameMemoryGS = static_cast<BYTE*>(_alloca(sizeWithGS));
1732 ARG_SLOT retVal = 0;
1733 unsigned jmpCallToken = 0;
1735 Interpreter interp(interpMethInfo, directCall, ilArgs, stubContext, frameMemoryGS);
1737 // Make sure we can do a GC Scan properly.
1738 FrameWithCookie<InterpreterFrame> interpFrame(&interp);
1740 // Update the interpretation count.
1741 InterlockedIncrement(reinterpret_cast<LONG *>(&interpMethInfo->m_invocations));
1743 // Need to wait until this point to do this JITting, since it may trigger a GC.
1744 JitMethodIfAppropriate(interpMethInfo);
1746 // Pass buffers to get jmpCall flag and the token, if necessary.
1747 interp.ExecuteMethod(&retVal, pDoJmpCall, &jmpCallToken);
1752 interp.ResolveToken(pResolvedToken, jmpCallToken, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
1759 // TODO: Add GSCookie checks
1762 inline ARG_SLOT Interpreter::InterpretMethodBody(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext)
1764 #if INTERP_DYNAMIC_CONTRACTS
1771 // Dynamic contract occupies too much stack.
1772 STATIC_CONTRACT_THROWS;
1773 STATIC_CONTRACT_GC_TRIGGERS;
1774 STATIC_CONTRACT_MODE_COOPERATIVE;
1777 CEEInfo* jitInfo = NULL;
1778 for (bool doJmpCall = true; doJmpCall; )
1780 unsigned jmpCallToken = 0;
1781 CORINFO_RESOLVED_TOKEN methTokPtr;
1782 ARG_SLOT retVal = ExecuteMethodWrapper(interpMethInfo, directCall, ilArgs, stubContext, &doJmpCall, &methTokPtr);
1783 // Clear any allocated jitInfo.
1786 // Nothing to do if the recent method asks not to do a jmpCall.
1792 // The recently executed method wants us to perform a jmpCall.
1793 MethodDesc* pMD = GetMethod(methTokPtr.hMethod);
1794 interpMethInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(pMD));
1796 // Allocate a new jitInfo and also a new interpMethInfo.
1797 if (interpMethInfo == NULL)
1799 _ASSERTE(doJmpCall);
1800 jitInfo = new CEEInfo(pMD, true);
1802 CORINFO_METHOD_INFO methInfo;
1805 jitInfo->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo, NULL);
1806 GenerateInterpreterStub(jitInfo, &methInfo, NULL, 0, &interpMethInfo, true);
1812 void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force)
1820 unsigned int MaxInterpretCount = s_InterpreterJITThreshold.val(CLRConfig::INTERNAL_InterpreterJITThreshold);
1821 bool scheduleTieringBackgroundWork = false;
1822 TieredCompilationManager *tieredCompilationManager = GetAppDomain()->GetTieredCompilationManager();
1824 if (force || interpMethInfo->m_invocations > MaxInterpretCount)
1827 MethodDesc *md = reinterpret_cast<MethodDesc *>(interpMethInfo->m_method);
1828 PCODE stub = md->GetNativeCode();
1830 if (InterpretationStubToMethodInfo(stub) == md)
1833 if (s_TraceInterpreterJITTransitionFlag.val(CLRConfig::INTERNAL_TraceInterpreterJITTransition))
1835 fprintf(GetLogFile(), "JITting method %s:%s.\n", md->m_pszDebugClassName, md->m_pszDebugMethodName);
1837 #endif // INTERP_TRACING
1838 CORJIT_FLAGS jitFlags(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
1839 NewHolder<COR_ILMETHOD_DECODER> pDecoder(NULL);
1840 // Dynamic methods (e.g., IL stubs) do not have an IL decoder but may
1841 // require additional flags. Ordinary methods require the opposite.
1842 if (md->IsDynamicMethod())
1844 jitFlags.Add(md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags());
1848 COR_ILMETHOD_DECODER::DecoderStatus status;
1849 pDecoder = new COR_ILMETHOD_DECODER(md->GetILHeader(TRUE),
1853 // This used to be a synchronous jit and could be made so again if desired,
1854 // but using ASP .NET MusicStore as an example scenario the performance is
1855 // better doing the JIT asynchronously. Given the not-on-by-default nature of the
1856 // interpreter I didn't wring my hands too much trying to determine the ideal
1858 #ifdef FEATURE_TIERED_COMPILATION
1859 CodeVersionManager::LockHolder _lockHolder;
1860 NativeCodeVersion activeCodeVersion = md->GetCodeVersionManager()->GetActiveILCodeVersion(md).GetActiveNativeCodeVersion(md);
1861 ILCodeVersion ilCodeVersion = activeCodeVersion.GetILCodeVersion();
1862 if (!activeCodeVersion.IsFinalTier() &&
1863 !ilCodeVersion.HasAnyOptimizedNativeCodeVersion(activeCodeVersion))
1865 tieredCompilationManager->AsyncPromoteToTier1(activeCodeVersion, &scheduleTieringBackgroundWork);
1868 #error FEATURE_INTERPRETER depends on FEATURE_TIERED_COMPILATION now
1873 if (scheduleTieringBackgroundWork)
1875 tieredCompilationManager->TryScheduleBackgroundWorkerWithoutGCTrigger_Locked();
1880 HCIMPL3(float, InterpretMethodFloat, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1884 ARG_SLOT retVal = 0;
1886 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1887 retVal = (ARG_SLOT)Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1888 HELPER_METHOD_FRAME_END();
1890 return *reinterpret_cast<float*>(ArgSlotEndiannessFixup(&retVal, sizeof(float)));
1895 HCIMPL3(double, InterpretMethodDouble, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1899 ARG_SLOT retVal = 0;
1901 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1902 retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1903 HELPER_METHOD_FRAME_END();
1905 return *reinterpret_cast<double*>(ArgSlotEndiannessFixup(&retVal, sizeof(double)));
1910 HCIMPL3(INT64, InterpretMethod, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
1914 ARG_SLOT retVal = 0;
1916 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
1917 retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
1918 HELPER_METHOD_FRAME_END();
1920 return static_cast<INT64>(retVal);
1924 bool Interpreter::IsInCalleesFrames(void* stackPtr)
1926 // We assume a downwards_growing stack.
1927 return stackPtr < (m_localVarMemory - sizeof(GSCookie));
1930 // I want an enumeration with values for the second byte of 2-byte opcodes.
1932 #define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) TWOBYTE_##c = unsigned(s2),
1933 #include "opcode.def"
1937 // Optimize the interpreter loop for speed.
1939 #pragma optimize("t", on)
1942 // Duplicating code from JitHelpers for MonEnter,MonExit,MonEnter_Static,
1943 // MonExit_Static because it sets up helper frame for the JIT.
1944 static void MonitorEnter(Object* obj, BYTE* pbLockTaken)
1947 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1951 COMPlusThrow(kArgumentNullException);
1953 GCPROTECT_BEGININTERIOR(pbLockTaken);
1955 if (GET_THREAD()->CatchAtSafePointOpportunistic())
1957 GET_THREAD()->PulseGCMode();
1959 objRef->EnterObjMonitor();
1961 if (pbLockTaken != 0) *pbLockTaken = 1;
1966 static void MonitorExit(Object* obj, BYTE* pbLockTaken)
1968 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1971 COMPlusThrow(kArgumentNullException);
1973 if (!objRef->LeaveObjMonitor())
1974 COMPlusThrow(kSynchronizationLockException);
1976 if (pbLockTaken != 0) *pbLockTaken = 0;
1978 if (GET_THREAD()->IsAbortRequested()) {
1979 GET_THREAD()->HandleThreadAbort();
1983 static void MonitorEnterStatic(AwareLock *lock, BYTE* pbLockTaken)
1986 MONHELPER_STATE(*pbLockTaken = 1;)
1989 static void MonitorExitStatic(AwareLock *lock, BYTE* pbLockTaken)
1991 // Error, yield or contention
1993 COMPlusThrow(kSynchronizationLockException);
1995 if (GET_THREAD()->IsAbortRequested()) {
1996 GET_THREAD()->HandleThreadAbort();
2001 AwareLock* Interpreter::GetMonitorForStaticMethod()
2003 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2004 CORINFO_LOOKUP_KIND kind;
2007 m_interpCeeInfo.getLocationOfThisType(m_methInfo->m_method, &kind);
2009 if (!kind.needsRuntimeLookup)
2011 OBJECTREF ref = pMD->GetMethodTable()->GetManagedClassObject();
2012 return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
2016 CORINFO_CLASS_HANDLE classHnd = nullptr;
2017 switch (kind.runtimeLookupKind)
2019 case CORINFO_LOOKUP_CLASSPARAM:
2021 CORINFO_CONTEXT_HANDLE ctxHnd = GetPreciseGenericsContext();
2022 _ASSERTE_MSG((((size_t)ctxHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS), "Precise context not class context");
2023 classHnd = (CORINFO_CLASS_HANDLE) ((size_t)ctxHnd & ~CORINFO_CONTEXTFLAGS_CLASS);
2026 case CORINFO_LOOKUP_METHODPARAM:
2028 CORINFO_CONTEXT_HANDLE ctxHnd = GetPreciseGenericsContext();
2029 _ASSERTE_MSG((((size_t)ctxHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD), "Precise context not method context");
2030 MethodDesc* pMD = (MethodDesc*) (CORINFO_METHOD_HANDLE) ((size_t)ctxHnd & ~CORINFO_CONTEXTFLAGS_METHOD);
2031 classHnd = (CORINFO_CLASS_HANDLE) pMD->GetMethodTable();
2035 NYI_INTERP("Unknown lookup for synchronized methods");
2038 MethodTable* pMT = GetMethodTableFromClsHnd(classHnd);
2039 OBJECTREF ref = pMT->GetManagedClassObject();
2041 return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
2045 void Interpreter::DoMonitorEnterWork()
2047 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2048 if (pMD->IsSynchronized())
2050 if (pMD->IsStatic())
2052 AwareLock* lock = GetMonitorForStaticMethod();
2053 MonitorEnterStatic(lock, &m_monAcquired);
2057 MonitorEnter((Object*) m_thisArg, &m_monAcquired);
2062 void Interpreter::DoMonitorExitWork()
2064 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2065 if (pMD->IsSynchronized())
2067 if (pMD->IsStatic())
2069 AwareLock* lock = GetMonitorForStaticMethod();
2070 MonitorExitStatic(lock, &m_monAcquired);
2074 MonitorExit((Object*) m_thisArg, &m_monAcquired);
2080 void Interpreter::ExecuteMethod(ARG_SLOT* retVal, _Out_ bool* pDoJmpCall, _Out_ unsigned* pJmpCallToken)
2082 #if INTERP_DYNAMIC_CONTRACTS
2089 // Dynamic contract occupies too much stack.
2090 STATIC_CONTRACT_THROWS;
2091 STATIC_CONTRACT_GC_TRIGGERS;
2092 STATIC_CONTRACT_MODE_COOPERATIVE;
2095 *pDoJmpCall = false;
2097 // Normally I'd prefer to declare these in small case-block scopes, but most C++ compilers
2098 // do not realize that their lifetimes do not overlap, so that makes for a large stack frame.
2099 // So I avoid that by outside declarations (sigh).
2101 unsigned char argNumc;
2102 unsigned short argNums;
2110 // Make sure that the .cctor for the current method's class has been run.
2111 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
2112 EnsureClassInit(pMD->GetMethodTable());
2115 const char* methName = eeGetMethodFullName(m_methInfo->m_method);
2116 unsigned ilOffset = 0;
2118 unsigned curInvocation = InterlockedIncrement(&s_totalInvocations);
2119 if (s_TraceInterpreterEntriesFlag.val(CLRConfig::INTERNAL_TraceInterpreterEntries))
2121 fprintf(GetLogFile(), "Entering method #%d (= 0x%x): %s.\n", curInvocation, curInvocation, methName);
2122 fprintf(GetLogFile(), " arguments:\n");
2125 #endif // INTERP_TRACING
2127 #if LOOPS_VIA_INSTRS
2128 unsigned instrs = 0;
2131 unsigned instrs = 0;
2137 // Catch any exceptions raised.
2139 // Optional features...
2140 #define INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT 1
2142 #if INTERP_ILCYCLE_PROFILE
2143 m_instr = CEE_COUNT; // Flag to indicate first instruction.
2145 #endif // INTERP_ILCYCLE_PROFILE
2147 DoMonitorEnterWork();
2149 INTERPLOG("START %d, %s\n", m_methInfo->m_stubNum, methName);
2152 // TODO: verify that m_ILCodePtr is legal, and we haven't walked off the end of the IL array? (i.e., bad IL).
2153 // Note that ExecuteBranch() should be called for every branch. That checks that we aren't either before or
2154 // after the IL range. Here, we would only need to check that we haven't gone past the end (not before the beginning)
2155 // because everything that doesn't call ExecuteBranch() should only add to m_ILCodePtr.
2158 ilOffset = CurOffset();
2161 if (s_TraceInterpreterOstackFlag.val(CLRConfig::INTERNAL_TraceInterpreterOstack))
2165 #if INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT
2166 _ASSERTE_MSG(LargeStructStackHeightIsValid(), "Large structure stack height invariant violated."); // Check the large struct stack invariant.
2168 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2170 fprintf(GetLogFile(), " %#4x: %s\n", ilOffset, ILOp(m_ILCodePtr));
2171 fflush(GetLogFile());
2173 #endif // INTERP_TRACING
2174 #if LOOPS_VIA_INSTRS
2182 #if INTERP_ILINSTR_PROFILE
2183 #if INTERP_ILCYCLE_PROFILE
2185 #endif // INTERP_ILCYCLE_PROFILE
2187 InterlockedIncrement(&s_ILInstrExecs[*m_ILCodePtr]);
2188 #endif // INTERP_ILINSTR_PROFILE
2190 switch (*m_ILCodePtr)
2195 case CEE_BREAK: // TODO: interact with the debugger?
2237 argNumc = *m_ILCodePtr;
2242 argNumc = *m_ILCodePtr;
2247 argNumc = *m_ILCodePtr;
2251 argNumc = *(m_ILCodePtr + 1);
2257 argNumc = *m_ILCodePtr;
2261 argNumc = *(m_ILCodePtr + 1);
2300 valc = getI1(m_ILCodePtr + 1);
2305 vali = getI4LittleEndian(m_ILCodePtr + 1);
2310 vall = getI8LittleEndian(m_ILCodePtr + 1);
2315 // We use I4 here because we just care about the bit pattern.
2316 // LdR4Con will push the right InterpreterType.
2317 vali = getI4LittleEndian(m_ILCodePtr + 1);
2322 // We use I4 here because we just care about the bit pattern.
2323 // LdR8Con will push the right InterpreterType.
2324 vall = getI8LittleEndian(m_ILCodePtr + 1);
2329 _ASSERTE(m_curStackHt > 0);
2330 it = OpStackTypeGet(m_curStackHt - 1);
2331 OpStackTypeSet(m_curStackHt, it);
2332 if (it.IsLargeStruct(&m_interpCeeInfo))
2334 sz = it.Size(&m_interpCeeInfo);
2335 void* dest = LargeStructOperandStackPush(sz);
2336 memcpy(dest, OpStackGet<void*>(m_curStackHt - 1), sz);
2337 OpStackSet<void*>(m_curStackHt, dest);
2341 OpStackSet<INT64>(m_curStackHt, OpStackGet<INT64>(m_curStackHt - 1));
2346 _ASSERTE(m_curStackHt > 0);
2348 it = OpStackTypeGet(m_curStackHt);
2349 if (it.IsLargeStruct(&m_interpCeeInfo))
2351 LargeStructOperandStackPop(it.Size(&m_interpCeeInfo), OpStackGet<void*>(m_curStackHt));
2356 *pJmpCallToken = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
2361 DoCall(/*virtualCall*/false);
2363 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2365 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2367 #endif // INTERP_TRACING
2371 DoCall(/*virtualCall*/true);
2373 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2375 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2377 #endif // INTERP_TRACING
2386 if (m_methInfo->m_returnType == CORINFO_TYPE_VOID)
2388 _ASSERTE(m_curStackHt == 0);
2392 _ASSERTE(m_curStackHt == 1);
2393 InterpreterType retValIt = OpStackTypeGet(0);
2394 bool looseInt = s_InterpreterLooseRules &&
2395 CorInfoTypeIsIntegral(m_methInfo->m_returnType) &&
2396 (CorInfoTypeIsIntegral(retValIt.ToCorInfoType()) || CorInfoTypeIsPointer(retValIt.ToCorInfoType())) &&
2397 (m_methInfo->m_returnType != retValIt.ToCorInfoType());
2399 bool looseFloat = s_InterpreterLooseRules &&
2400 CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
2401 CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()) &&
2402 (m_methInfo->m_returnType != retValIt.ToCorInfoType());
2404 // Make sure that the return value "matches" (which allows certain relaxations) the declared return type.
2405 _ASSERTE((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2406 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2407 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY) ||
2408 (looseInt || looseFloat) ||
2409 InterpreterType(m_methInfo->m_returnType).StackNormalize().Matches(retValIt, &m_interpCeeInfo));
2411 size_t sz = retValIt.Size(&m_interpCeeInfo);
2412 #if defined(FEATURE_HFA)
2413 CorInfoHFAElemType cit = CORINFO_HFA_ELEM_NONE;
2416 if(m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS)
2417 cit = m_interpCeeInfo.getHFAType(retValIt.ToClassHandle());
2420 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
2422 _ASSERTE((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2423 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
2424 (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY));
2425 if (retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY)
2427 InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
2428 TypedByRef* ptr = OpStackGet<TypedByRef*>(0);
2429 *((TypedByRef*) m_retBufArg) = *ptr;
2431 else if (retValIt.IsLargeStruct(&m_interpCeeInfo))
2433 MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
2434 // The ostack value is a pointer to the struct value.
2435 CopyValueClassUnchecked(m_retBufArg, OpStackGet<void*>(0), clsMt);
2439 MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
2440 // The ostack value *is* the struct value.
2441 CopyValueClassUnchecked(m_retBufArg, OpStackGetAddr(0, sz), clsMt);
2444 #if defined(FEATURE_HFA)
2446 else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
2447 && (cit != CORINFO_HFA_ELEM_NONE)
2448 && (MetaSig(reinterpret_cast<MethodDesc*>(m_methInfo->m_method)).GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG) == 0)
2450 if (retValIt.IsLargeStruct(&m_interpCeeInfo))
2452 // The ostack value is a pointer to the struct value.
2453 memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGet<void*>(0), sz);
2457 // The ostack value *is* the struct value.
2458 memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGetAddr(0, sz), sz);
2461 #elif defined(UNIX_AMD64_ABI)
2462 // Is it an struct contained in $rax and $rdx
2463 else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
2466 //The Fixed Two slot return buffer address
2467 memcpy(m_ilArgs-16, OpStackGet<void*>(0), sz);
2469 #elif defined(TARGET_RISCV64)
2470 // Is it an struct contained in two slots
2471 else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
2474 //The Fixed Two slot return buffer address
2475 memcpy(m_ilArgs-32, OpStackGet<void*>(0), sz);
2478 else if (CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
2479 CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()))
2481 double val = (sz <= sizeof(INT32)) ? OpStackGet<float>(0) : OpStackGet<double>(0);
2482 if (m_methInfo->m_returnType == CORINFO_TYPE_DOUBLE)
2484 memcpy(retVal, &val, sizeof(double));
2488 float val2 = (float) val;
2489 memcpy(retVal, &val2, sizeof(float));
2494 if (sz <= sizeof(INT32))
2496 *retVal = OpStackGet<INT32>(0);
2500 // If looseInt is true, we are relying on auto-downcast in case *retVal
2501 // is small (but this is guaranteed not to happen by def'n of ARG_SLOT.)
2503 // Note structs of size 5, 6, 7 may be returned as 8 byte ints.
2504 _ASSERTE(sz <= sizeof(INT64));
2505 *retVal = OpStackGet<INT64>(0);
2512 // We're not capturing instructions executed in a method that terminates via exception,
2514 m_methInfo->RecordExecInstrs(instrs);
2517 // We keep this live until we leave.
2519 #endif // INTERP_TRACING
2521 #if INTERP_ILCYCLE_PROFILE
2522 // Finish off accounting for the "RET" before we return
2524 #endif // INTERP_ILCYCLE_PROFILE
2530 offsetc = *m_ILCodePtr;
2531 // The offset is wrt the beginning of the following instruction, so the +1 is to get to that
2532 // m_ILCodePtr value before adding the offset.
2533 ExecuteBranch(m_ILCodePtr + offsetc + 1);
2534 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2537 // LEAVE empties the operand stack.
2539 m_largeStructOperandStackHt = 0;
2540 offsetc = getI1(m_ILCodePtr + 1);
2543 // The offset is wrt the beginning of the following instruction, so the +2 is to get to that
2544 // m_ILCodePtr value before adding the offset.
2545 BYTE* leaveTarget = m_ILCodePtr + offsetc + 2;
2546 unsigned leaveOffset = CurOffset();
2547 m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
2548 if (!SearchForCoveringFinally())
2550 m_leaveInfoStack.Pop();
2551 ExecuteBranch(leaveTarget);
2554 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2556 // Abstract the next pair out to something common with templates.
2558 BrOnValue<false, 1>();
2562 BrOnValue<true, 1>();
2566 BrOnComparison<CO_EQ, false, 1>();
2569 _ASSERTE(m_curStackHt >= 2);
2570 // ECMA spec gives different semantics for different operand types:
2571 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2573 case CORINFO_TYPE_FLOAT:
2574 case CORINFO_TYPE_DOUBLE:
2575 BrOnComparison<CO_LT_UN, true, 1>();
2578 BrOnComparison<CO_LT, true, 1>();
2583 BrOnComparison<CO_GT, false, 1>();
2586 _ASSERTE(m_curStackHt >= 2);
2587 // ECMA spec gives different semantics for different operand types:
2588 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2590 case CORINFO_TYPE_FLOAT:
2591 case CORINFO_TYPE_DOUBLE:
2592 BrOnComparison<CO_GT_UN, true, 1>();
2595 BrOnComparison<CO_GT, true, 1>();
2600 BrOnComparison<CO_LT, false, 1>();
2603 BrOnComparison<CO_EQ, true, 1>();
2606 _ASSERTE(m_curStackHt >= 2);
2607 // ECMA spec gives different semantics for different operand types:
2608 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2610 case CORINFO_TYPE_FLOAT:
2611 case CORINFO_TYPE_DOUBLE:
2612 BrOnComparison<CO_LT, true, 1>();
2615 BrOnComparison<CO_LT_UN, true, 1>();
2620 BrOnComparison<CO_GT_UN, false, 1>();
2623 _ASSERTE(m_curStackHt >= 2);
2624 // ECMA spec gives different semantics for different operand types:
2625 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2627 case CORINFO_TYPE_FLOAT:
2628 case CORINFO_TYPE_DOUBLE:
2629 BrOnComparison<CO_GT, true, 1>();
2632 BrOnComparison<CO_GT_UN, true, 1>();
2637 BrOnComparison<CO_LT_UN, false, 1>();
2642 vali = getI4LittleEndian(m_ILCodePtr);
2643 vali += 4; // +4 for the length of the offset.
2644 ExecuteBranch(m_ILCodePtr + vali);
2647 // Backwards branch -- enable caching.
2648 BackwardsBranchActions(vali);
2654 // LEAVE empties the operand stack.
2656 m_largeStructOperandStackHt = 0;
2657 vali = getI4LittleEndian(m_ILCodePtr + 1);
2660 // The offset is wrt the beginning of the following instruction, so the +5 is to get to that
2661 // m_ILCodePtr value before adding the offset.
2662 BYTE* leaveTarget = m_ILCodePtr + (vali + 5);
2663 unsigned leaveOffset = CurOffset();
2664 m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
2665 if (!SearchForCoveringFinally())
2667 (void)m_leaveInfoStack.Pop();
2670 // Backwards branch -- enable caching.
2671 BackwardsBranchActions(vali);
2673 ExecuteBranch(leaveTarget);
2676 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
2679 BrOnValue<false, 4>();
2682 BrOnValue<true, 4>();
2686 BrOnComparison<CO_EQ, false, 4>();
2689 _ASSERTE(m_curStackHt >= 2);
2690 // ECMA spec gives different semantics for different operand types:
2691 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2693 case CORINFO_TYPE_FLOAT:
2694 case CORINFO_TYPE_DOUBLE:
2695 BrOnComparison<CO_LT_UN, true, 4>();
2698 BrOnComparison<CO_LT, true, 4>();
2703 BrOnComparison<CO_GT, false, 4>();
2706 _ASSERTE(m_curStackHt >= 2);
2707 // ECMA spec gives different semantics for different operand types:
2708 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2710 case CORINFO_TYPE_FLOAT:
2711 case CORINFO_TYPE_DOUBLE:
2712 BrOnComparison<CO_GT_UN, true, 4>();
2715 BrOnComparison<CO_GT, true, 4>();
2720 BrOnComparison<CO_LT, false, 4>();
2723 BrOnComparison<CO_EQ, true, 4>();
2726 _ASSERTE(m_curStackHt >= 2);
2727 // ECMA spec gives different semantics for different operand types:
2728 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2730 case CORINFO_TYPE_FLOAT:
2731 case CORINFO_TYPE_DOUBLE:
2732 BrOnComparison<CO_LT, true, 4>();
2735 BrOnComparison<CO_LT_UN, true, 4>();
2740 BrOnComparison<CO_GT_UN, false, 4>();
2743 _ASSERTE(m_curStackHt >= 2);
2744 // ECMA spec gives different semantics for different operand types:
2745 switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
2747 case CORINFO_TYPE_FLOAT:
2748 case CORINFO_TYPE_DOUBLE:
2749 BrOnComparison<CO_GT, true, 4>();
2752 BrOnComparison<CO_GT_UN, true, 4>();
2757 BrOnComparison<CO_LT_UN, false, 4>();
2762 _ASSERTE(m_curStackHt > 0);
2764 #if defined(_DEBUG) || defined(HOST_AMD64)
2765 CorInfoType cit = OpStackTypeGet(m_curStackHt).ToCorInfoType();
2766 #endif // _DEBUG || HOST_AMD64
2768 _ASSERTE(cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT || cit == CORINFO_TYPE_NATIVEINT);
2770 #if defined(HOST_AMD64)
2771 UINT32 val = (cit == CORINFO_TYPE_NATIVEINT) ? (INT32) OpStackGet<NativeInt>(m_curStackHt)
2772 : OpStackGet<INT32>(m_curStackHt);
2774 UINT32 val = OpStackGet<INT32>(m_curStackHt);
2776 UINT32 n = getU4LittleEndian(m_ILCodePtr + 1);
2777 UINT32 instrSize = 1 + (n + 1)*4;
2780 vali = getI4LittleEndian(m_ILCodePtr + (5 + val * 4));
2781 ExecuteBranch(m_ILCodePtr + instrSize + vali);
2785 m_ILCodePtr += instrSize;
2791 LdIndShort<INT8, /*isUnsigned*/false>();
2794 LdIndShort<UINT8, /*isUnsigned*/true>();
2797 LdIndShort<INT16, /*isUnsigned*/false>();
2800 LdIndShort<UINT16, /*isUnsigned*/true>();
2803 LdInd<INT32, CORINFO_TYPE_INT>();
2806 LdInd<UINT32, CORINFO_TYPE_INT>();
2809 LdInd<INT64, CORINFO_TYPE_LONG>();
2812 LdInd<NativeInt, CORINFO_TYPE_NATIVEINT>();
2815 LdInd<float, CORINFO_TYPE_FLOAT>();
2818 LdInd<double, CORINFO_TYPE_DOUBLE>();
2821 LdInd<Object*, CORINFO_TYPE_CLASS>();
2845 BinaryArithOp<BA_Add>();
2849 BinaryArithOp<BA_Sub>();
2852 BinaryArithOp<BA_Mul>();
2855 BinaryArithOp<BA_Div>();
2858 BinaryIntOp<BIO_DivUn>();
2861 BinaryArithOp<BA_Rem>();
2864 BinaryIntOp<BIO_RemUn>();
2867 BinaryIntOp<BIO_And>();
2870 BinaryIntOp<BIO_Or>();
2873 BinaryIntOp<BIO_Xor>();
2882 ShiftOp<CEE_SHR_UN>();
2891 Conv<INT8, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
2894 Conv<INT16, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
2897 Conv<INT32, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
2900 Conv<INT64, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
2903 Conv<float, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_FLOAT>();
2906 Conv<double, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_DOUBLE>();
2909 Conv<UINT32, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
2912 Conv<UINT64, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
2927 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
2929 fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
2931 #endif // INTERP_TRACING
2969 case CEE_CONV_OVF_I1_UN:
2970 ConvOvfUn<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2972 case CEE_CONV_OVF_I2_UN:
2973 ConvOvfUn<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2975 case CEE_CONV_OVF_I4_UN:
2976 ConvOvfUn<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2978 case CEE_CONV_OVF_I8_UN:
2979 ConvOvfUn<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
2981 case CEE_CONV_OVF_U1_UN:
2982 ConvOvfUn<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2984 case CEE_CONV_OVF_U2_UN:
2985 ConvOvfUn<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2987 case CEE_CONV_OVF_U4_UN:
2988 ConvOvfUn<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
2990 case CEE_CONV_OVF_U8_UN:
2991 ConvOvfUn<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
2993 case CEE_CONV_OVF_I_UN:
2994 if (sizeof(NativeInt) == 4)
2996 ConvOvfUn<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3000 _ASSERTE(sizeof(NativeInt) == 8);
3001 ConvOvfUn<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3004 case CEE_CONV_OVF_U_UN:
3005 if (sizeof(NativeUInt) == 4)
3007 ConvOvfUn<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3011 _ASSERTE(sizeof(NativeUInt) == 8);
3012 ConvOvfUn<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3025 LdElem</*takeAddr*/true>();
3028 LdElemWithType<INT8, false, CORINFO_TYPE_INT>();
3031 LdElemWithType<UINT8, false, CORINFO_TYPE_INT>();
3034 LdElemWithType<INT16, false, CORINFO_TYPE_INT>();
3037 LdElemWithType<UINT16, false, CORINFO_TYPE_INT>();
3040 LdElemWithType<INT32, false, CORINFO_TYPE_INT>();
3043 LdElemWithType<UINT32, false, CORINFO_TYPE_INT>();
3046 LdElemWithType<INT64, false, CORINFO_TYPE_LONG>();
3048 // Note that the ECMA spec defines a "LDELEM_U8", but it is the same instruction number as LDELEM_I8 (since
3049 // when loading to the widest width, signed/unsigned doesn't matter).
3051 LdElemWithType<NativeInt, false, CORINFO_TYPE_NATIVEINT>();
3054 LdElemWithType<float, false, CORINFO_TYPE_FLOAT>();
3057 LdElemWithType<double, false, CORINFO_TYPE_DOUBLE>();
3059 case CEE_LDELEM_REF:
3060 LdElemWithType<Object*, true, CORINFO_TYPE_CLASS>();
3063 StElemWithType<NativeInt, false>();
3066 StElemWithType<INT8, false>();
3069 StElemWithType<INT16, false>();
3072 StElemWithType<INT32, false>();
3075 StElemWithType<INT64, false>();
3078 StElemWithType<float, false>();
3081 StElemWithType<double, false>();
3083 case CEE_STELEM_REF:
3084 StElemWithType<Object*, true>();
3087 LdElem</*takeAddr*/false>();
3095 case CEE_CONV_OVF_I1:
3096 ConvOvf<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3098 case CEE_CONV_OVF_U1:
3099 ConvOvf<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3101 case CEE_CONV_OVF_I2:
3102 ConvOvf<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3104 case CEE_CONV_OVF_U2:
3105 ConvOvf<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3107 case CEE_CONV_OVF_I4:
3108 ConvOvf<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3110 case CEE_CONV_OVF_U4:
3111 ConvOvf<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
3113 case CEE_CONV_OVF_I8:
3114 ConvOvf<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
3116 case CEE_CONV_OVF_U8:
3117 ConvOvf<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
3132 Conv<UINT16, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
3135 Conv<UINT8, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
3138 Conv<NativeInt, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
3140 case CEE_CONV_OVF_I:
3141 if (sizeof(NativeInt) == 4)
3143 ConvOvf<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3147 _ASSERTE(sizeof(NativeInt) == 8);
3148 ConvOvf<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3151 case CEE_CONV_OVF_U:
3152 if (sizeof(NativeUInt) == 4)
3154 ConvOvf<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3158 _ASSERTE(sizeof(NativeUInt) == 8);
3159 ConvOvf<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
3163 BinaryArithOvfOp<BA_Add, /*asUnsigned*/false>();
3165 case CEE_ADD_OVF_UN:
3166 BinaryArithOvfOp<BA_Add, /*asUnsigned*/true>();
3169 BinaryArithOvfOp<BA_Mul, /*asUnsigned*/false>();
3171 case CEE_MUL_OVF_UN:
3172 BinaryArithOvfOp<BA_Mul, /*asUnsigned*/true>();
3175 BinaryArithOvfOp<BA_Sub, /*asUnsigned*/false>();
3177 case CEE_SUB_OVF_UN:
3178 BinaryArithOvfOp<BA_Sub, /*asUnsigned*/true>();
3180 case CEE_ENDFINALLY:
3181 // We have just ended a finally.
3182 // If we were called during exception dispatch,
3183 // rethrow the exception on our way out.
3184 if (m_leaveInfoStack.IsEmpty())
3186 Object* finallyException = NULL;
3190 _ASSERTE(m_inFlightException != NULL);
3191 finallyException = m_inFlightException;
3192 INTERPLOG("endfinally handling for %s, %p, %p\n", methName, m_methInfo, finallyException);
3193 m_inFlightException = NULL;
3196 COMPlusThrow(ObjectToOBJECTREF(finallyException));
3199 // Otherwise, see if there's another finally block to
3200 // execute as part of processing the current LEAVE...
3201 else if (!SearchForCoveringFinally())
3203 // No, there isn't -- go to the leave target.
3204 _ASSERTE(!m_leaveInfoStack.IsEmpty());
3205 LeaveInfo li = m_leaveInfoStack.Pop();
3206 ExecuteBranch(li.m_target);
3208 // Yes, there, is, and SearchForCoveringFinally set us up to start executing it.
3209 continue; // Skip the default m_ILCodePtr++ at bottom of loop.
3215 Conv<NativeUInt, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
3218 NYI_INTERP("Unimplemented opcode: CEE_PREFIX7");
3221 NYI_INTERP("Unimplemented opcode: CEE_PREFIX6");
3224 NYI_INTERP("Unimplemented opcode: CEE_PREFIX5");
3227 NYI_INTERP("Unimplemented opcode: CEE_PREFIX4");
3230 NYI_INTERP("Unimplemented opcode: CEE_PREFIX3");
3233 NYI_INTERP("Unimplemented opcode: CEE_PREFIX2");
3236 // This is the prefix for all the 2-byte opcodes.
3237 // Figure out the second byte of the 2-byte opcode.
3238 ops = *(m_ILCodePtr + 1);
3239 #if INTERP_ILINSTR_PROFILE
3240 // Take one away from PREFIX1, which we won't count.
3241 InterlockedDecrement(&s_ILInstrExecs[CEE_PREFIX1]);
3242 // Credit instead to the 2-byte instruction index.
3243 InterlockedIncrement(&s_ILInstr2ByteExecs[ops]);
3244 #endif // INTERP_ILINSTR_PROFILE
3247 case TWOBYTE_CEE_ARGLIST:
3248 // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_ARGLIST");
3249 _ASSERTE(m_methInfo->m_varArgHandleArgNum != NO_VA_ARGNUM);
3250 LdArgA(m_methInfo->m_varArgHandleArgNum);
3254 case TWOBYTE_CEE_CEQ:
3258 case TWOBYTE_CEE_CGT:
3262 case TWOBYTE_CEE_CGT_UN:
3263 CompareOp<CO_GT_UN>();
3266 case TWOBYTE_CEE_CLT:
3270 case TWOBYTE_CEE_CLT_UN:
3271 CompareOp<CO_LT_UN>();
3275 case TWOBYTE_CEE_LDARG:
3277 argNums = getU2LittleEndian(m_ILCodePtr);
3281 case TWOBYTE_CEE_LDARGA:
3283 argNums = getU2LittleEndian(m_ILCodePtr);
3287 case TWOBYTE_CEE_STARG:
3289 argNums = getU2LittleEndian(m_ILCodePtr);
3294 case TWOBYTE_CEE_LDLOC:
3296 argNums = getU2LittleEndian(m_ILCodePtr);
3300 case TWOBYTE_CEE_LDLOCA:
3302 argNums = getU2LittleEndian(m_ILCodePtr);
3306 case TWOBYTE_CEE_STLOC:
3308 argNums = getU2LittleEndian(m_ILCodePtr);
3313 case TWOBYTE_CEE_CONSTRAINED:
3314 RecordConstrainedCall();
3317 case TWOBYTE_CEE_VOLATILE:
3318 // Set a flag that causes a memory barrier to be associated with the next load or store.
3319 m_volatileFlag = true;
3323 case TWOBYTE_CEE_LDFTN:
3327 case TWOBYTE_CEE_INITOBJ:
3331 case TWOBYTE_CEE_LOCALLOC:
3336 case TWOBYTE_CEE_LDVIRTFTN:
3340 case TWOBYTE_CEE_SIZEOF:
3344 case TWOBYTE_CEE_RETHROW:
3348 case TWOBYTE_CEE_READONLY:
3349 m_readonlyFlag = true;
3351 // A comment in importer.cpp indicates that READONLY may also apply to calls. We'll see.
3352 _ASSERTE_MSG(*m_ILCodePtr == CEE_LDELEMA, "According to the ECMA spec, READONLY may only precede LDELEMA");
3355 case TWOBYTE_CEE_INITBLK:
3359 case TWOBYTE_CEE_CPBLK:
3363 case TWOBYTE_CEE_ENDFILTER:
3367 case TWOBYTE_CEE_UNALIGNED:
3368 // Nothing to do here.
3372 case TWOBYTE_CEE_TAILCALL:
3373 // TODO: Needs revisiting when implementing tail call.
3374 // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_TAILCALL");
3378 case TWOBYTE_CEE_REFANYTYPE:
3389 NYI_INTERP("Unimplemented opcode: CEE_PREFIXREF");
3400 INTERPLOG("DONE %d, %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3404 INTERPLOG("EXCEPTION %d (throw), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3406 bool handleException = false;
3407 OBJECTREF orThrowable = NULL;
3410 orThrowable = GET_THROWABLE();
3412 if (m_filterNextScan != 0)
3414 // We are in the middle of a filter scan and an exception is thrown inside
3415 // a filter. We are supposed to swallow it and assume the filter did not
3416 // handle the exception.
3418 m_largeStructOperandStackHt = 0;
3421 handleException = true;
3425 // orThrowable must be protected. MethodHandlesException() will place orThrowable
3426 // into the operand stack (a permanently protected area) if it returns true.
3427 GCPROTECT_BEGIN(orThrowable);
3428 handleException = MethodHandlesException(orThrowable);
3432 if (handleException)
3434 GetThread()->SafeSetThrowables(orThrowable
3435 DEBUG_ARG(ThreadExceptionState::STEC_CurrentTrackerEqualNullOkForInterpreter));
3440 INTERPLOG("EXCEPTION %d (rethrow), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
3444 EX_END_CATCH(RethrowTransientExceptions)
3448 #pragma optimize("", on)
3451 void Interpreter::EndFilter()
3453 unsigned handles = OpStackGet<unsigned>(0);
3454 // If the filter decides to handle the exception, then go to the handler offset.
3457 // We decided to handle the exception, so give all EH entries a chance to
3458 // handle future exceptions. Clear scan.
3459 m_filterNextScan = 0;
3460 ExecuteBranch(m_methInfo->m_ILCode + m_filterHandlerOffset);
3462 // The filter decided not to handle the exception, ask if there is some other filter
3463 // lined up to try to handle it or some other catch/finally handlers will handle it.
3464 // If no one handles the exception, rethrow and be done with it.
3467 bool handlesEx = false;
3469 OBJECTREF orThrowable = ObjectToOBJECTREF(m_inFlightException);
3470 GCPROTECT_BEGIN(orThrowable);
3471 handlesEx = MethodHandlesException(orThrowable);
3476 // Just clear scan before rethrowing to give any EH entry a chance to handle
3478 m_filterNextScan = 0;
3479 Object* filterException = NULL;
3482 _ASSERTE(m_inFlightException != NULL);
3483 filterException = m_inFlightException;
3484 INTERPLOG("endfilter handling for %s, %p, %p\n", m_methInfo->m_methName, m_methInfo, filterException);
3485 m_inFlightException = NULL;
3488 COMPlusThrow(ObjectToOBJECTREF(filterException));
3493 // Let it do another round of filter:end-filter or handler block.
3494 // During the next end filter, we will reuse m_filterNextScan and
3495 // continue searching where we left off. Note however, while searching,
3496 // any of the filters could throw an exception. But this is supposed to
3497 // be swallowed and endfilter should be called with a value of 0 on the
3503 bool Interpreter::MethodHandlesException(OBJECTREF orThrowable)
3511 bool handlesEx = false;
3513 if (orThrowable != NULL)
3515 // Don't catch ThreadAbort and other uncatchable exceptions
3516 if (!IsUncatchable(&orThrowable))
3518 // Does the current method catch this? The clauses are defined by offsets, so get that.
3519 // However, if we are in the middle of a filter scan, make sure we get the offset of the
3520 // excepting code, rather than the offset of the filter body.
3521 DWORD curOffset = (m_filterNextScan != 0) ? m_filterExcILOffset : CurOffset();
3522 TypeHandle orThrowableTH = TypeHandle(orThrowable->GetMethodTable());
3524 GCPROTECT_BEGIN(orThrowable);
3527 // Perform a filter scan or regular walk of the EH Table. Filter scan is performed when
3528 // we are evaluating a series of filters to handle the exception until the first handler
3529 // (filter's or otherwise) that will handle the exception.
3530 for (unsigned XTnum = m_filterNextScan; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
3532 CORINFO_EH_CLAUSE clause;
3533 m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
3534 _ASSERTE(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
3536 // First, is the current offset in the try block?
3537 if (clause.TryOffset <= curOffset && curOffset < clause.TryOffset + clause.TryLength)
3539 unsigned handlerOffset = 0;
3540 // CORINFO_EH_CLAUSE_NONE represents 'catch' blocks
3541 if (clause.Flags == CORINFO_EH_CLAUSE_NONE)
3543 // Now, does the catch block handle the thrown exception type?
3544 CORINFO_CLASS_HANDLE excType = FindClass(clause.ClassToken InterpTracingArg(RTK_CheckHandlesException));
3545 if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orThrowableTH))
3548 // Push the exception object onto the operand stack.
3549 OpStackSet<OBJECTREF>(0, orThrowable);
3550 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3552 m_largeStructOperandStackHt = 0;
3553 handlerOffset = clause.HandlerOffset;
3555 m_filterNextScan = 0;
3560 // Handle a wrapped exception.
3561 OBJECTREF orUnwrapped = PossiblyUnwrapThrowable(orThrowable, GetMethodDesc()->GetAssembly());
3562 if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orUnwrapped->GetTypeHandle()))
3564 // Push the exception object onto the operand stack.
3565 OpStackSet<OBJECTREF>(0, orUnwrapped);
3566 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3568 m_largeStructOperandStackHt = 0;
3569 handlerOffset = clause.HandlerOffset;
3571 m_filterNextScan = 0;
3575 else if (clause.Flags == CORINFO_EH_CLAUSE_FILTER)
3578 // Push the exception object onto the operand stack.
3579 OpStackSet<OBJECTREF>(0, orThrowable);
3580 OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
3582 m_largeStructOperandStackHt = 0;
3583 handlerOffset = clause.FilterOffset;
3584 m_inFlightException = OBJECTREFToObject(orThrowable);
3586 m_filterHandlerOffset = clause.HandlerOffset;
3587 m_filterNextScan = XTnum + 1;
3588 m_filterExcILOffset = curOffset;
3590 else if (clause.Flags == CORINFO_EH_CLAUSE_FAULT ||
3591 clause.Flags == CORINFO_EH_CLAUSE_FINALLY)
3594 // Save the exception object to rethrow.
3595 m_inFlightException = OBJECTREFToObject(orThrowable);
3596 // Empty the operand stack.
3598 m_largeStructOperandStackHt = 0;
3599 handlerOffset = clause.HandlerOffset;
3601 m_filterNextScan = 0;
3604 // Reset the interpreter loop in preparation of calling the handler.
3607 // Set the IL offset of the handler.
3608 ExecuteBranch(m_methInfo->m_ILCode + handlerOffset);
3610 // If an exception occurs while attempting to leave a protected scope,
3611 // we empty the 'leave' info stack upon entering the handler.
3612 while (!m_leaveInfoStack.IsEmpty())
3614 m_leaveInfoStack.Pop();
3617 // Some things are set up before a call, and must be cleared on an exception caught be the caller.
3618 // A method that returns a struct allocates local space for the return value, and "registers" that
3619 // space and the type so that it's scanned if a GC happens. "Unregister" it if we throw an exception
3620 // in the call, and handle it in the caller. (If it's not handled by the caller, the Interpreter is
3621 // deallocated, so it's value doesn't matter.)
3622 m_structRetValITPtr = NULL;
3623 m_callThisArg = NULL;
3634 DoMonitorExitWork();
3640 static unsigned OpFormatExtraSize(opcode_format_t format) {
3648 case InlineBrTarget:
3664 return 0; // We'll handle this specially.
3666 case ShortInlineVar:
3668 case ShortInlineBrTarget:
3679 static unsigned opSizes1Byte[CEE_COUNT];
3680 static bool opSizes1ByteInit = false;
3682 static void OpSizes1ByteInit()
3684 if (opSizes1ByteInit) return;
3685 #define OPDEF(name, stringname, stackpop, stackpush, params, kind, len, byte1, byte2, ctrl) \
3686 opSizes1Byte[name] = len + OpFormatExtraSize(params);
3687 #include "opcode.def"
3689 opSizes1ByteInit = true;
3693 bool Interpreter::MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize)
3697 BYTE* ilCodeLim = ilCode + codeSize;
3698 while (ilCode < ilCodeLim)
3700 unsigned op = *ilCode;
3703 case CEE_BR_S: case CEE_BRFALSE_S: case CEE_BRTRUE_S:
3704 case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S:
3705 case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S:
3707 delta = getI1(ilCode + 1);
3708 if (delta < 0) return true;
3712 case CEE_BR: case CEE_BRFALSE: case CEE_BRTRUE:
3713 case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT:
3714 case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN:
3716 delta = getI4LittleEndian(ilCode + 1);
3717 if (delta < 0) return true;
3723 UINT32 n = getU4LittleEndian(ilCode + 1);
3724 UINT32 instrSize = 1 + (n + 1)*4;
3725 for (unsigned i = 0; i < n; i++) {
3726 delta = getI4LittleEndian(ilCode + (5 + i * 4));
3727 if (delta < 0) return true;
3729 ilCode += instrSize;
3734 op = *(ilCode + 1) + 0x100;
3735 _ASSERTE(op < CEE_COUNT); // Bounds check for below.
3736 // deliberate fall-through here.
3739 // For the rest of the 1-byte instructions, we'll use a table-driven approach.
3740 ilCode += opSizes1Byte[op];
3748 void Interpreter::BackwardsBranchActions(int offset)
3750 // TODO: Figure out how to do a GC poll.
3753 bool Interpreter::SearchForCoveringFinally()
3761 _ASSERTE_MSG(!m_leaveInfoStack.IsEmpty(), "precondition");
3763 LeaveInfo& li = m_leaveInfoStack.PeekRef();
3767 for (unsigned XTnum = li.m_nextEHIndex; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
3769 CORINFO_EH_CLAUSE clause;
3770 m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
3771 _ASSERTE(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
3773 // First, is the offset of the leave instruction in the try block?
3774 unsigned tryEndOffset = clause.TryOffset + clause.TryLength;
3775 if (clause.TryOffset <= li.m_offset && li.m_offset < tryEndOffset)
3777 // Yes: is it a finally, and is its target outside the try block?
3778 size_t targOffset = (li.m_target - m_methInfo->m_ILCode);
3779 if (clause.Flags == CORINFO_EH_CLAUSE_FINALLY
3780 && !(clause.TryOffset <= targOffset && targOffset < tryEndOffset))
3782 m_ILCodePtr = m_methInfo->m_ILCode + clause.HandlerOffset;
3783 li.m_nextEHIndex = XTnum + 1;
3789 // Caller will handle popping the leave info stack.
3794 void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0)
3796 Interpreter* interp = reinterpret_cast<Interpreter*>(interp0);
3797 interp->GCScanRoots(pf, sc);
3800 void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc)
3802 // Report inbound arguments, if the interpreter has not been invoked directly.
3803 // (In the latter case, the arguments are reported by the calling method.)
3806 for (unsigned i = 0; i < m_methInfo->m_numArgs; i++)
3808 GCScanRootAtLoc(reinterpret_cast<Object**>(GetArgAddr(i)), GetArgType(i), pf, sc);
3812 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
3814 if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>())
3816 GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3820 GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3824 // This is the "this" argument passed in to DoCallWork. (Note that we treat this as a byref; it
3825 // might be, for a struct instance method, and this covers the object pointer case as well.)
3826 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_callThisArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3828 // Scan the exception object that we'll rethrow at the end of the finally block.
3829 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_inFlightException), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3831 // A retBufArg, may, in some cases, be a byref into the heap.
3832 if (m_retBufArg != NULL)
3834 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_retBufArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
3837 if (m_structRetValITPtr != NULL)
3839 GCScanRootAtLoc(reinterpret_cast<Object**>(m_structRetValTempSpace), *m_structRetValITPtr, pf, sc);
3842 // We'll conservatively assume that we might have a security object.
3843 GCScanRootAtLoc(reinterpret_cast<Object**>(&m_securityObject), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
3846 for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
3848 InterpreterType it = m_methInfo->m_localDescs[i].m_type;
3849 void* localPtr = NULL;
3850 if (it.IsLargeStruct(&m_interpCeeInfo))
3852 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
3853 localPtr = *reinterpret_cast<void**>(structPtr);
3857 localPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
3859 GCScanRootAtLoc(reinterpret_cast<Object**>(localPtr), it, pf, sc, m_methInfo->GetPinningBit(i));
3862 // Do current ostack.
3863 for (unsigned i = 0; i < m_curStackHt; i++)
3865 InterpreterType it = OpStackTypeGet(i);
3866 if (it.IsLargeStruct(&m_interpCeeInfo))
3868 Object** structPtr = reinterpret_cast<Object**>(OpStackGet<void*>(i));
3869 // If the ostack value is a pointer to a local var value, don't scan, since we already
3870 // scanned the variable value above.
3871 if (!IsInLargeStructLocalArea(structPtr))
3873 GCScanRootAtLoc(structPtr, it, pf, sc);
3878 void* stackPtr = OpStackGetAddr(i, it.Size(&m_interpCeeInfo));
3879 GCScanRootAtLoc(reinterpret_cast<Object**>(stackPtr), it, pf, sc);
3883 // Any outgoing arguments for a call in progress.
3884 for (unsigned i = 0; i < m_argsSize; i++)
3886 // If a call has a large struct argument, we'll have pushed a pointer to the entry for that argument on the
3887 // largeStructStack of the current Interpreter. That will be scanned by the code above, so just skip it.
3888 InterpreterType undef(CORINFO_TYPE_UNDEF);
3889 InterpreterType it = m_argTypes[i];
3890 if (it != undef && !it.IsLargeStruct(&m_interpCeeInfo))
3892 BYTE* argPtr = ArgSlotEndiannessFixup(&m_args[i], it.Size(&m_interpCeeInfo));
3893 GCScanRootAtLoc(reinterpret_cast<Object**>(argPtr), it, pf, sc);
3898 void Interpreter::GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc, bool pinningRef)
3900 switch (it.ToCorInfoType())
3902 case CORINFO_TYPE_CLASS:
3903 case CORINFO_TYPE_STRING:
3906 if (pinningRef) flags |= GC_CALL_PINNED;
3907 (*pf)(loc, sc, flags);
3911 case CORINFO_TYPE_BYREF:
3912 case CORINFO_TYPE_REFANY:
3914 DWORD flags = GC_CALL_INTERIOR;
3915 if (pinningRef) flags |= GC_CALL_PINNED;
3916 (*pf)(loc, sc, flags);
3920 case CORINFO_TYPE_VALUECLASS:
3921 _ASSERTE(!pinningRef);
3922 GCScanValueClassRootAtLoc(loc, it.ToClassHandle(), pf, sc);
3926 _ASSERTE(!pinningRef);
3931 void Interpreter::GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc)
3933 MethodTable* valClsMT = GetMethodTableFromClsHnd(valueClsHnd);
3934 ReportPointersFromValueType(pf, sc, valClsMT, loc);
3937 // Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
3938 // are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
3939 bool IsStackNormalType(CorInfoType cit)
3941 LIMITED_METHOD_CONTRACT;
3945 case CORINFO_TYPE_UNDEF:
3946 case CORINFO_TYPE_VOID:
3947 case CORINFO_TYPE_BOOL:
3948 case CORINFO_TYPE_CHAR:
3949 case CORINFO_TYPE_BYTE:
3950 case CORINFO_TYPE_UBYTE:
3951 case CORINFO_TYPE_SHORT:
3952 case CORINFO_TYPE_USHORT:
3953 case CORINFO_TYPE_UINT:
3954 case CORINFO_TYPE_NATIVEUINT:
3955 case CORINFO_TYPE_ULONG:
3956 case CORINFO_TYPE_VAR:
3957 case CORINFO_TYPE_STRING:
3958 case CORINFO_TYPE_PTR:
3961 case CORINFO_TYPE_INT:
3962 case CORINFO_TYPE_NATIVEINT:
3963 case CORINFO_TYPE_BYREF:
3964 case CORINFO_TYPE_CLASS:
3965 case CORINFO_TYPE_LONG:
3966 case CORINFO_TYPE_VALUECLASS:
3967 case CORINFO_TYPE_REFANY:
3968 // I chose to consider both float and double stack-normal; together these comprise
3969 // the "F" type of the ECMA spec. This means I have to consider these to freely
3971 case CORINFO_TYPE_FLOAT:
3972 case CORINFO_TYPE_DOUBLE:
3980 CorInfoType CorInfoTypeStackNormalize(CorInfoType cit)
3982 LIMITED_METHOD_CONTRACT;
3986 case CORINFO_TYPE_UNDEF:
3987 return CORINFO_TYPE_UNDEF;
3989 case CORINFO_TYPE_VOID:
3990 case CORINFO_TYPE_VAR:
3991 _ASSERTE_MSG(false, "Type that cannot be on the ostack.");
3992 return CORINFO_TYPE_UNDEF;
3994 case CORINFO_TYPE_BOOL:
3995 case CORINFO_TYPE_CHAR:
3996 case CORINFO_TYPE_BYTE:
3997 case CORINFO_TYPE_UBYTE:
3998 case CORINFO_TYPE_SHORT:
3999 case CORINFO_TYPE_USHORT:
4000 case CORINFO_TYPE_UINT:
4001 return CORINFO_TYPE_INT;
4003 case CORINFO_TYPE_NATIVEUINT:
4004 case CORINFO_TYPE_PTR:
4005 return CORINFO_TYPE_NATIVEINT;
4007 case CORINFO_TYPE_ULONG:
4008 return CORINFO_TYPE_LONG;
4010 case CORINFO_TYPE_STRING:
4011 return CORINFO_TYPE_CLASS;
4013 case CORINFO_TYPE_INT:
4014 case CORINFO_TYPE_NATIVEINT:
4015 case CORINFO_TYPE_BYREF:
4016 case CORINFO_TYPE_CLASS:
4017 case CORINFO_TYPE_LONG:
4018 case CORINFO_TYPE_VALUECLASS:
4019 case CORINFO_TYPE_REFANY:
4020 // I chose to consider both float and double stack-normal; together these comprise
4021 // the "F" type of the ECMA spec. This means I have to consider these to freely
4023 case CORINFO_TYPE_FLOAT:
4024 case CORINFO_TYPE_DOUBLE:
4025 _ASSERTE(IsStackNormalType(cit));
4033 InterpreterType InterpreterType::StackNormalize() const
4035 LIMITED_METHOD_CONTRACT;
4037 switch (ToCorInfoType())
4039 case CORINFO_TYPE_BOOL:
4040 case CORINFO_TYPE_CHAR:
4041 case CORINFO_TYPE_BYTE:
4042 case CORINFO_TYPE_UBYTE:
4043 case CORINFO_TYPE_SHORT:
4044 case CORINFO_TYPE_USHORT:
4045 case CORINFO_TYPE_UINT:
4046 return InterpreterType(CORINFO_TYPE_INT);
4048 case CORINFO_TYPE_NATIVEUINT:
4049 case CORINFO_TYPE_PTR:
4050 return InterpreterType(CORINFO_TYPE_NATIVEINT);
4052 case CORINFO_TYPE_ULONG:
4053 return InterpreterType(CORINFO_TYPE_LONG);
4055 case CORINFO_TYPE_STRING:
4056 return InterpreterType(CORINFO_TYPE_CLASS);
4058 case CORINFO_TYPE_INT:
4059 case CORINFO_TYPE_NATIVEINT:
4060 case CORINFO_TYPE_BYREF:
4061 case CORINFO_TYPE_CLASS:
4062 case CORINFO_TYPE_LONG:
4063 case CORINFO_TYPE_VALUECLASS:
4064 case CORINFO_TYPE_REFANY:
4065 case CORINFO_TYPE_FLOAT:
4066 case CORINFO_TYPE_DOUBLE:
4067 return *const_cast<InterpreterType*>(this);
4069 case CORINFO_TYPE_UNDEF:
4070 case CORINFO_TYPE_VOID:
4071 case CORINFO_TYPE_VAR:
4073 _ASSERTE_MSG(false, "should not reach here");
4074 return *const_cast<InterpreterType*>(this);
4079 bool InterpreterType::MatchesWork(const InterpreterType it2, CEEInfo* info) const
4087 if (*this == it2) return true;
4090 CorInfoType cit1 = ToCorInfoType();
4091 CorInfoType cit2 = it2.ToCorInfoType();
4095 // An approximation: valueclasses of the same size match.
4096 if (cit1 == CORINFO_TYPE_VALUECLASS &&
4097 cit2 == CORINFO_TYPE_VALUECLASS &&
4098 Size(info) == it2.Size(info))
4103 // NativeInt matches byref. (In unsafe code).
4104 if ((cit1 == CORINFO_TYPE_BYREF && cit2 == CORINFO_TYPE_NATIVEINT))
4107 // apparently the VM may do the optimization of reporting the return type of a method that
4108 // returns a struct of a single nativeint field *as* nativeint; and similarly with at least some other primitive types.
4109 // So weaken this check to allow that.
4110 // (The check is actually a little weaker still, since I don't want to crack the return type and make sure
4111 // that it has only a single nativeint member -- so I just ensure that the total size is correct).
4114 case CORINFO_TYPE_NATIVEINT:
4115 case CORINFO_TYPE_NATIVEUINT:
4116 _ASSERTE(sizeof(NativeInt) == sizeof(NativeUInt));
4117 if (it2.Size(info) == sizeof(NativeInt))
4121 case CORINFO_TYPE_INT:
4122 case CORINFO_TYPE_UINT:
4123 _ASSERTE(sizeof(INT32) == sizeof(UINT32));
4124 if (it2.Size(info) == sizeof(INT32))
4132 // See if the second is a value type synonym for a primitive.
4133 if (cit2 == CORINFO_TYPE_VALUECLASS)
4135 CorInfoType cit2prim = info->getTypeForPrimitiveValueClass(it2.ToClassHandle());
4136 if (cit2prim != CORINFO_TYPE_UNDEF)
4138 InterpreterType it2prim(cit2prim);
4139 if (*this == it2prim.StackNormalize())
4150 size_t CorInfoTypeSizeArray[] =
4152 /*CORINFO_TYPE_UNDEF = 0x0*/0,
4153 /*CORINFO_TYPE_VOID = 0x1*/0,
4154 /*CORINFO_TYPE_BOOL = 0x2*/1,
4155 /*CORINFO_TYPE_CHAR = 0x3*/2,
4156 /*CORINFO_TYPE_BYTE = 0x4*/1,
4157 /*CORINFO_TYPE_UBYTE = 0x5*/1,
4158 /*CORINFO_TYPE_SHORT = 0x6*/2,
4159 /*CORINFO_TYPE_USHORT = 0x7*/2,
4160 /*CORINFO_TYPE_INT = 0x8*/4,
4161 /*CORINFO_TYPE_UINT = 0x9*/4,
4162 /*CORINFO_TYPE_LONG = 0xa*/8,
4163 /*CORINFO_TYPE_ULONG = 0xb*/8,
4164 /*CORINFO_TYPE_NATIVEINT = 0xc*/sizeof(void*),
4165 /*CORINFO_TYPE_NATIVEUINT = 0xd*/sizeof(void*),
4166 /*CORINFO_TYPE_FLOAT = 0xe*/4,
4167 /*CORINFO_TYPE_DOUBLE = 0xf*/8,
4168 /*CORINFO_TYPE_STRING = 0x10*/sizeof(void*),
4169 /*CORINFO_TYPE_PTR = 0x11*/sizeof(void*),
4170 /*CORINFO_TYPE_BYREF = 0x12*/sizeof(void*),
4171 /*CORINFO_TYPE_VALUECLASS = 0x13*/0,
4172 /*CORINFO_TYPE_CLASS = 0x14*/sizeof(void*),
4173 /*CORINFO_TYPE_REFANY = 0x15*/sizeof(void*)*2,
4174 /*CORINFO_TYPE_VAR = 0x16*/0,
4177 bool CorInfoTypeIsUnsigned(CorInfoType cit)
4179 LIMITED_METHOD_CONTRACT;
4183 case CORINFO_TYPE_UINT:
4184 case CORINFO_TYPE_NATIVEUINT:
4185 case CORINFO_TYPE_ULONG:
4186 case CORINFO_TYPE_UBYTE:
4187 case CORINFO_TYPE_USHORT:
4188 case CORINFO_TYPE_CHAR:
4196 bool CorInfoTypeIsIntegral(CorInfoType cit)
4198 LIMITED_METHOD_CONTRACT;
4202 case CORINFO_TYPE_UINT:
4203 case CORINFO_TYPE_NATIVEUINT:
4204 case CORINFO_TYPE_ULONG:
4205 case CORINFO_TYPE_UBYTE:
4206 case CORINFO_TYPE_USHORT:
4207 case CORINFO_TYPE_INT:
4208 case CORINFO_TYPE_NATIVEINT:
4209 case CORINFO_TYPE_LONG:
4210 case CORINFO_TYPE_BYTE:
4211 case CORINFO_TYPE_BOOL:
4212 case CORINFO_TYPE_SHORT:
4220 bool CorInfoTypeIsFloatingPoint(CorInfoType cit)
4222 return cit == CORINFO_TYPE_FLOAT || cit == CORINFO_TYPE_DOUBLE;
4225 bool CorInfoTypeIsFloatingPoint(CorInfoHFAElemType cihet)
4227 return cihet == CORINFO_HFA_ELEM_FLOAT || cihet == CORINFO_HFA_ELEM_DOUBLE;
4230 bool CorElemTypeIsUnsigned(CorElementType cet)
4232 LIMITED_METHOD_CONTRACT;
4236 case ELEMENT_TYPE_U1:
4237 case ELEMENT_TYPE_U2:
4238 case ELEMENT_TYPE_U4:
4239 case ELEMENT_TYPE_U8:
4240 case ELEMENT_TYPE_U:
4248 bool CorInfoTypeIsPointer(CorInfoType cit)
4250 LIMITED_METHOD_CONTRACT;
4253 case CORINFO_TYPE_PTR:
4254 case CORINFO_TYPE_BYREF:
4255 case CORINFO_TYPE_NATIVEINT:
4256 case CORINFO_TYPE_NATIVEUINT:
4259 // It seems like the ECMA spec doesn't allow this, but (at least) the managed C++
4260 // compiler expects the explicitly-sized pointer type of the platform pointer size to work:
4261 case CORINFO_TYPE_INT:
4262 case CORINFO_TYPE_UINT:
4263 return sizeof(NativeInt) == sizeof(INT32);
4264 case CORINFO_TYPE_LONG:
4265 case CORINFO_TYPE_ULONG:
4266 return sizeof(NativeInt) == sizeof(INT64);
4273 void Interpreter::LdArg(int argNum)
4281 LdFromMemAddr(GetArgAddr(argNum), GetArgType(argNum));
4284 void Interpreter::LdArgA(int argNum)
4292 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
4293 OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(GetArgAddr(argNum)));
4297 void Interpreter::StArg(int argNum)
4305 StToLocalMemAddr(GetArgAddr(argNum), GetArgType(argNum));
4309 void Interpreter::LdLocA(int locNum)
4317 InterpreterType tp = m_methInfo->m_localDescs[locNum].m_type;
4319 if (tp.IsLargeStruct(&m_interpCeeInfo))
4321 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), sizeof(void**));
4322 addr = *reinterpret_cast<void**>(structPtr);
4326 addr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), tp.Size(&m_interpCeeInfo));
4328 // The "addr" above, while a byref, is never a heap pointer, so we're robust if
4329 // any of these were to cause a GC.
4330 OpStackSet<void*>(m_curStackHt, addr);
4331 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
4335 void Interpreter::LdIcon(INT32 c)
4343 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
4344 OpStackSet<INT32>(m_curStackHt, c);
4348 void Interpreter::LdR4con(INT32 c)
4356 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_FLOAT));
4357 OpStackSet<INT32>(m_curStackHt, c);
4361 void Interpreter::LdLcon(INT64 c)
4369 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_LONG));
4370 OpStackSet<INT64>(m_curStackHt, c);
4374 void Interpreter::LdR8con(INT64 c)
4382 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_DOUBLE));
4383 OpStackSet<INT64>(m_curStackHt, c);
4387 void Interpreter::LdNull()
4395 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
4396 OpStackSet<void*>(m_curStackHt, NULL);
4400 template<typename T, CorInfoType cit>
4401 void Interpreter::LdInd()
4403 _ASSERTE(TOSIsPtr());
4404 _ASSERTE(IsStackNormalType(cit));
4405 unsigned curStackInd = m_curStackHt-1;
4406 T* ptr = OpStackGet<T*>(curStackInd);
4407 ThrowOnInvalidPointer(ptr);
4408 OpStackSet<T>(curStackInd, *ptr);
4409 OpStackTypeSet(curStackInd, InterpreterType(cit));
4410 BarrierIfVolatile();
4413 template<typename T, bool isUnsigned>
4414 void Interpreter::LdIndShort()
4416 _ASSERTE(TOSIsPtr());
4417 _ASSERTE(sizeof(T) < 4);
4418 unsigned curStackInd = m_curStackHt-1;
4419 T* ptr = OpStackGet<T*>(curStackInd);
4420 ThrowOnInvalidPointer(ptr);
4423 OpStackSet<UINT32>(curStackInd, *ptr);
4427 OpStackSet<INT32>(curStackInd, *ptr);
4429 // All short integers are normalized to INT as their stack type.
4430 OpStackTypeSet(curStackInd, InterpreterType(CORINFO_TYPE_INT));
4431 BarrierIfVolatile();
4434 template<typename T>
4435 void Interpreter::StInd()
4437 _ASSERTE(m_curStackHt >= 2);
4438 _ASSERTE(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
4439 BarrierIfVolatile();
4440 unsigned stackInd0 = m_curStackHt-2;
4441 unsigned stackInd1 = m_curStackHt-1;
4442 T val = OpStackGet<T>(stackInd1);
4443 T* ptr = OpStackGet<T*>(stackInd0);
4444 ThrowOnInvalidPointer(ptr);
4449 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
4454 #endif // INTERP_TRACING
4457 void Interpreter::StInd_Ref()
4459 _ASSERTE(m_curStackHt >= 2);
4460 _ASSERTE(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
4461 BarrierIfVolatile();
4462 unsigned stackInd0 = m_curStackHt-2;
4463 unsigned stackInd1 = m_curStackHt-1;
4464 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(stackInd1));
4465 OBJECTREF* ptr = OpStackGet<OBJECTREF*>(stackInd0);
4466 ThrowOnInvalidPointer(ptr);
4467 SetObjectReference(ptr, val);
4471 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
4476 #endif // INTERP_TRACING
4481 void Interpreter::BinaryArithOp()
4489 _ASSERTE(m_curStackHt >= 2);
4490 unsigned op1idx = m_curStackHt - 2;
4491 unsigned op2idx = m_curStackHt - 1;
4492 InterpreterType t1 = OpStackTypeGet(op1idx);
4493 _ASSERTE(IsStackNormalType(t1.ToCorInfoType()));
4494 // Looking at the generated code, it does seem to save some instructions to use the "shifted
4495 // types," though the effect on end-to-end time is variable. So I'll leave it set.
4496 InterpreterType t2 = OpStackTypeGet(op2idx);
4497 _ASSERTE(IsStackNormalType(t2.ToCorInfoType()));
4499 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
4500 switch (t1.ToCorInfoTypeShifted())
4502 case CORINFO_TYPE_SHIFTED_INT:
4506 INT32 val1 = OpStackGet<INT32>(op1idx);
4507 INT32 val2 = OpStackGet<INT32>(op2idx);
4508 BinaryArithOpWork<op, INT32, /*IsIntType*/true, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4512 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4513 if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
4515 // Int op NativeInt = NativeInt
4516 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4517 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4518 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4520 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4522 // Int op Long = Long
4523 INT64 val1 = static_cast<INT64>(OpStackGet<INT32>(op1idx));
4524 INT64 val2 = OpStackGet<INT64>(op2idx);
4525 BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/false>(val1, val2);
4527 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4529 if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
4531 // Int + ByRef = ByRef
4532 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4533 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4534 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4538 VerificationError("Operation not permitted on int and managed pointer.");
4543 VerificationError("Binary arithmetic operation type mismatch (int and ?)");
4548 case CORINFO_TYPE_SHIFTED_NATIVEINT:
4550 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4553 // NativeInt op NativeInt = NativeInt
4554 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4555 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4559 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4560 if (cits2 == CORINFO_TYPE_SHIFTED_INT)
4562 // NativeInt op Int = NativeInt
4563 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4564 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4566 // CLI spec does not allow adding a native int and an int64. So use loose rules.
4567 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4569 // NativeInt op Int = NativeInt
4570 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
4571 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4573 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4575 if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
4577 // NativeInt + ByRef = ByRef
4578 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4579 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4583 VerificationError("Operation not permitted on native int and managed pointer.");
4588 VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
4594 case CORINFO_TYPE_SHIFTED_LONG:
4596 bool looseLong = false;
4597 #if defined(HOST_AMD64)
4598 looseLong = (s_InterpreterLooseRules && (t2.ToCorInfoType() == CORINFO_TYPE_NATIVEINT ||
4599 t2.ToCorInfoType() == CORINFO_TYPE_BYREF));
4601 if (t1 == t2 || looseLong)
4603 // Long op Long = Long
4604 INT64 val1 = OpStackGet<INT64>(op1idx);
4605 INT64 val2 = OpStackGet<INT64>(op2idx);
4606 BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4610 VerificationError("Binary arithmetic operation type mismatch (long and ?)");
4615 case CORINFO_TYPE_SHIFTED_FLOAT:
4619 // Float op Float = Float
4620 float val1 = OpStackGet<float>(op1idx);
4621 float val2 = OpStackGet<float>(op2idx);
4622 BinaryArithOpWork<op, float, /*IsIntType*/false, CORINFO_TYPE_FLOAT, /*TypeIsUnchanged*/true>(val1, val2);
4626 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4627 if (cits2 == CORINFO_TYPE_SHIFTED_DOUBLE)
4629 // Float op Double = Double
4630 double val1 = static_cast<double>(OpStackGet<float>(op1idx));
4631 double val2 = OpStackGet<double>(op2idx);
4632 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/false>(val1, val2);
4636 VerificationError("Binary arithmetic operation type mismatch (float and ?)");
4642 case CORINFO_TYPE_SHIFTED_DOUBLE:
4646 // Double op Double = Double
4647 double val1 = OpStackGet<double>(op1idx);
4648 double val2 = OpStackGet<double>(op2idx);
4649 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
4653 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4654 if (cits2 == CORINFO_TYPE_SHIFTED_FLOAT)
4656 // Double op Float = Double
4657 double val1 = OpStackGet<double>(op1idx);
4658 double val2 = static_cast<double>(OpStackGet<float>(op2idx));
4659 BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
4663 VerificationError("Binary arithmetic operation type mismatch (double and ?)");
4669 case CORINFO_TYPE_SHIFTED_BYREF:
4671 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4672 CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
4673 if (cits2 == CORINFO_TYPE_SHIFTED_INT)
4675 if (op == BA_Add || op == BA_Sub)
4677 // ByRef +- Int = ByRef
4678 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4679 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4683 VerificationError("May only add/subtract managed pointer and integral value.");
4686 else if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
4688 if (op == BA_Add || op == BA_Sub)
4690 // ByRef +- NativeInt = ByRef
4691 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4692 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4696 VerificationError("May only add/subtract managed pointer and integral value.");
4699 else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
4703 // ByRef - ByRef = NativeInt
4704 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4705 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4709 VerificationError("May only subtract managed pointer values.");
4712 // CLI spec does not allow adding a native int and an int64. So use loose rules.
4713 else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
4715 // NativeInt op Int = NativeInt
4716 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
4717 BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4721 VerificationError("Binary arithmetic operation not permitted on byref");
4726 case CORINFO_TYPE_SHIFTED_CLASS:
4727 VerificationError("Can't do binary arithmetic on object references.");
4731 _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
4738 template<int op, bool asUnsigned>
4739 void Interpreter::BinaryArithOvfOp()
4747 _ASSERTE(m_curStackHt >= 2);
4748 unsigned op1idx = m_curStackHt - 2;
4749 unsigned op2idx = m_curStackHt - 1;
4751 InterpreterType t1 = OpStackTypeGet(op1idx);
4752 CorInfoType cit1 = t1.ToCorInfoType();
4753 _ASSERTE(IsStackNormalType(cit1));
4755 InterpreterType t2 = OpStackTypeGet(op2idx);
4756 CorInfoType cit2 = t2.ToCorInfoType();
4757 _ASSERTE(IsStackNormalType(cit2));
4759 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
4762 case CORINFO_TYPE_INT:
4763 if (cit2 == CORINFO_TYPE_INT)
4767 // UnsignedInt op UnsignedInt = UnsignedInt
4768 UINT32 val1 = OpStackGet<UINT32>(op1idx);
4769 UINT32 val2 = OpStackGet<UINT32>(op2idx);
4770 BinaryArithOvfOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4775 INT32 val1 = OpStackGet<INT32>(op1idx);
4776 INT32 val2 = OpStackGet<INT32>(op2idx);
4777 BinaryArithOvfOpWork<op, INT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
4780 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4784 // UnsignedInt op UnsignedNativeInt = UnsignedNativeInt
4785 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
4786 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4787 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4791 // Int op NativeInt = NativeInt
4792 NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
4793 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4794 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4797 else if (cit2 == CORINFO_TYPE_BYREF)
4799 if (asUnsigned && op == BA_Add)
4801 // UnsignedInt + ByRef = ByRef
4802 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
4803 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4804 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4808 VerificationError("Illegal arithmetic overflow operation for int and byref.");
4813 VerificationError("Binary arithmetic overflow operation type mismatch (int and ?)");
4817 case CORINFO_TYPE_NATIVEINT:
4818 if (cit2 == CORINFO_TYPE_INT)
4822 // UnsignedNativeInt op UnsignedInt = UnsignedNativeInt
4823 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4824 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<UINT32>(op2idx));
4825 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4829 // NativeInt op Int = NativeInt
4830 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4831 NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
4832 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4835 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4839 // UnsignedNativeInt op UnsignedNativeInt = UnsignedNativeInt
4840 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4841 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4842 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4846 // NativeInt op NativeInt = NativeInt
4847 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
4848 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
4849 BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
4852 else if (cit2 == CORINFO_TYPE_BYREF)
4854 if (asUnsigned && op == BA_Add)
4856 // UnsignedNativeInt op ByRef = ByRef
4857 NativeUInt val1 = OpStackGet<UINT32>(op1idx);
4858 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4859 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
4863 VerificationError("Illegal arithmetic overflow operation for native int and byref.");
4868 VerificationError("Binary arithmetic overflow operation type mismatch (native int and ?)");
4872 case CORINFO_TYPE_LONG:
4873 if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
4877 // UnsignedLong op UnsignedLong = UnsignedLong
4878 UINT64 val1 = OpStackGet<UINT64>(op1idx);
4879 UINT64 val2 = OpStackGet<UINT64>(op2idx);
4880 BinaryArithOvfOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4884 // Long op Long = Long
4885 INT64 val1 = OpStackGet<INT64>(op1idx);
4886 INT64 val2 = OpStackGet<INT64>(op2idx);
4887 BinaryArithOvfOpWork<op, INT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
4892 VerificationError("Binary arithmetic overflow operation type mismatch (long and ?)");
4896 case CORINFO_TYPE_BYREF:
4897 if (asUnsigned && (op == BA_Add || op == BA_Sub))
4899 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
4900 if (cit2 == CORINFO_TYPE_INT)
4902 // ByRef +- UnsignedInt = ByRef
4903 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
4904 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4906 else if (cit2 == CORINFO_TYPE_NATIVEINT)
4908 // ByRef +- UnsignedNativeInt = ByRef
4909 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4910 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
4912 else if (cit2 == CORINFO_TYPE_BYREF)
4916 // ByRef - ByRef = UnsignedNativeInt
4917 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
4918 BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
4922 VerificationError("Illegal arithmetic overflow operation for byref and byref: may only subtract managed pointer values.");
4927 VerificationError("Binary arithmetic overflow operation not permitted on byref");
4934 VerificationError("Signed binary arithmetic overflow operation not permitted on managed pointer values.");
4938 _ASSERTE_MSG(op == BA_Mul, "Must be an overflow operation; tested for Add || Sub above.");
4939 VerificationError("Cannot multiply managed pointer values.");
4945 _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
4952 template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
4953 void Interpreter::BinaryArithOvfOpWork(T val1, T val2)
4962 ClrSafeInt<T> safeV1(val1);
4963 ClrSafeInt<T> safeV2(val2);
4966 res = safeV1 + safeV2;
4968 else if (op == BA_Sub)
4970 res = safeV1 - safeV2;
4972 else if (op == BA_Mul)
4974 res = safeV1 * safeV2;
4978 _ASSERTE_MSG(false, "op should be one of the overflow ops...");
4981 if (res.IsOverflow())
4983 ThrowOverflowException();
4986 unsigned residx = m_curStackHt - 2;
4987 OpStackSet<T>(residx, res.Value());
4988 if (!TypeIsUnchanged)
4990 OpStackTypeSet(residx, InterpreterType(cit));
4995 void Interpreter::BinaryIntOp()
5003 _ASSERTE(m_curStackHt >= 2);
5004 unsigned op1idx = m_curStackHt - 2;
5005 unsigned op2idx = m_curStackHt - 1;
5007 InterpreterType t1 = OpStackTypeGet(op1idx);
5008 CorInfoType cit1 = t1.ToCorInfoType();
5009 _ASSERTE(IsStackNormalType(cit1));
5011 InterpreterType t2 = OpStackTypeGet(op2idx);
5012 CorInfoType cit2 = t2.ToCorInfoType();
5013 _ASSERTE(IsStackNormalType(cit2));
5015 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
5018 case CORINFO_TYPE_INT:
5019 if (cit2 == CORINFO_TYPE_INT)
5022 UINT32 val1 = OpStackGet<UINT32>(op1idx);
5023 UINT32 val2 = OpStackGet<UINT32>(op2idx);
5024 BinaryIntOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
5026 else if (cit2 == CORINFO_TYPE_NATIVEINT)
5028 // Int op NativeInt = NativeInt
5029 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
5030 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
5031 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
5033 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
5035 // Int op NativeUInt = NativeUInt
5036 NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
5037 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
5038 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
5042 VerificationError("Binary arithmetic operation type mismatch (int and ?)");
5046 case CORINFO_TYPE_NATIVEINT:
5047 if (cit2 == CORINFO_TYPE_NATIVEINT)
5049 // NativeInt op NativeInt = NativeInt
5050 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5051 NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
5052 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5054 else if (cit2 == CORINFO_TYPE_INT)
5056 // NativeInt op Int = NativeInt
5057 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5058 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
5059 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5061 // CLI spec does not allow adding a native int and an int64. So use loose rules.
5062 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
5064 // NativeInt op Int = NativeInt
5065 NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
5066 NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT64>(op2idx));
5067 BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
5071 VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
5075 case CORINFO_TYPE_LONG:
5076 if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
5078 // Long op Long = Long
5079 UINT64 val1 = OpStackGet<UINT64>(op1idx);
5080 UINT64 val2 = OpStackGet<UINT64>(op2idx);
5081 BinaryIntOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
5085 VerificationError("Binary arithmetic operation type mismatch (long and ?)");
5090 VerificationError("Illegal operation for non-integral data type.");
5097 template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
5098 void Interpreter::BinaryIntOpWork(T val1, T val2)
5105 else if (op == BIO_Or)
5109 else if (op == BIO_Xor)
5115 _ASSERTE(op == BIO_DivUn || op == BIO_RemUn);
5118 ThrowDivideByZero();
5120 else if (val2 == static_cast<T>(-1) && val1 == static_cast<T>(((UINT64)1) << (sizeof(T)*8 - 1))) // min int / -1 is not representable.
5122 ThrowSysArithException();
5125 if (op == BIO_DivUn)
5135 unsigned residx = m_curStackHt - 2;
5136 OpStackSet<T>(residx, res);
5137 if (!TypeIsUnchanged)
5139 OpStackTypeSet(residx, InterpreterType(cit));
5144 void Interpreter::ShiftOp()
5152 _ASSERTE(m_curStackHt >= 2);
5153 unsigned op1idx = m_curStackHt - 2;
5154 unsigned op2idx = m_curStackHt - 1;
5156 InterpreterType t1 = OpStackTypeGet(op1idx);
5157 CorInfoType cit1 = t1.ToCorInfoType();
5158 _ASSERTE(IsStackNormalType(cit1));
5160 InterpreterType t2 = OpStackTypeGet(op2idx);
5161 CorInfoType cit2 = t2.ToCorInfoType();
5162 _ASSERTE(IsStackNormalType(cit2));
5164 // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
5167 case CORINFO_TYPE_INT:
5168 ShiftOpWork<op, INT32, UINT32>(op1idx, cit2);
5171 case CORINFO_TYPE_NATIVEINT:
5172 ShiftOpWork<op, NativeInt, NativeUInt>(op1idx, cit2);
5175 case CORINFO_TYPE_LONG:
5176 ShiftOpWork<op, INT64, UINT64>(op1idx, cit2);
5180 VerificationError("Illegal value type for shift operation.");
5187 template<int op, typename T, typename UT>
5188 void Interpreter::ShiftOpWork(unsigned op1idx, CorInfoType cit2)
5190 T val = OpStackGet<T>(op1idx);
5191 unsigned op2idx = op1idx + 1;
5194 if (cit2 == CORINFO_TYPE_INT)
5196 INT32 shiftAmt = OpStackGet<INT32>(op2idx);
5199 res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
5201 else if (op == CEE_SHR)
5203 res = val >> shiftAmt;
5207 _ASSERTE(op == CEE_SHR_UN);
5208 res = (static_cast<UT>(val)) >> shiftAmt;
5211 else if (cit2 == CORINFO_TYPE_NATIVEINT)
5213 NativeInt shiftAmt = OpStackGet<NativeInt>(op2idx);
5216 res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
5218 else if (op == CEE_SHR)
5220 res = val >> shiftAmt;
5224 _ASSERTE(op == CEE_SHR_UN);
5225 res = (static_cast<UT>(val)) >> shiftAmt;
5230 VerificationError("Operand type mismatch for shift operator.");
5232 OpStackSet<T>(op1idx, res);
5236 void Interpreter::Neg()
5244 _ASSERTE(m_curStackHt >= 1);
5245 unsigned opidx = m_curStackHt - 1;
5247 InterpreterType t1 = OpStackTypeGet(opidx);
5248 CorInfoType cit1 = t1.ToCorInfoType();
5249 _ASSERTE(IsStackNormalType(cit1));
5253 case CORINFO_TYPE_INT:
5254 OpStackSet<INT32>(opidx, -OpStackGet<INT32>(opidx));
5257 case CORINFO_TYPE_NATIVEINT:
5258 OpStackSet<NativeInt>(opidx, -OpStackGet<NativeInt>(opidx));
5261 case CORINFO_TYPE_LONG:
5262 OpStackSet<INT64>(opidx, -OpStackGet<INT64>(opidx));
5265 case CORINFO_TYPE_FLOAT:
5266 OpStackSet<float>(opidx, -OpStackGet<float>(opidx));
5269 case CORINFO_TYPE_DOUBLE:
5270 OpStackSet<double>(opidx, -OpStackGet<double>(opidx));
5274 VerificationError("Illegal operand type for Neg operation.");
5278 void Interpreter::Not()
5286 _ASSERTE(m_curStackHt >= 1);
5287 unsigned opidx = m_curStackHt - 1;
5289 InterpreterType t1 = OpStackTypeGet(opidx);
5290 CorInfoType cit1 = t1.ToCorInfoType();
5291 _ASSERTE(IsStackNormalType(cit1));
5295 case CORINFO_TYPE_INT:
5296 OpStackSet<INT32>(opidx, ~OpStackGet<INT32>(opidx));
5299 case CORINFO_TYPE_NATIVEINT:
5300 OpStackSet<NativeInt>(opidx, ~OpStackGet<NativeInt>(opidx));
5303 case CORINFO_TYPE_LONG:
5304 OpStackSet<INT64>(opidx, ~OpStackGet<INT64>(opidx));
5308 VerificationError("Illegal operand type for Not operation.");
5312 template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
5313 void Interpreter::Conv()
5321 _ASSERTE(m_curStackHt >= 1);
5322 unsigned opidx = m_curStackHt - 1;
5324 InterpreterType t1 = OpStackTypeGet(opidx);
5325 CorInfoType cit1 = t1.ToCorInfoType();
5326 _ASSERTE(IsStackNormalType(cit1));
5331 case CORINFO_TYPE_INT:
5334 // Must convert the 32 bit value to unsigned first, so that we zero-extend if necessary.
5335 val = static_cast<T>(static_cast<UINT32>(OpStackGet<INT32>(opidx)));
5339 val = static_cast<T>(OpStackGet<INT32>(opidx));
5343 case CORINFO_TYPE_NATIVEINT:
5346 // NativeInt might be 32 bits, so convert to unsigned before possibly widening.
5347 val = static_cast<T>(static_cast<NativeUInt>(OpStackGet<NativeInt>(opidx)));
5351 val = static_cast<T>(OpStackGet<NativeInt>(opidx));
5355 case CORINFO_TYPE_LONG:
5356 val = static_cast<T>(OpStackGet<INT64>(opidx));
5359 // TODO: Make sure that the C++ conversions do the right thing (truncate to zero...)
5360 case CORINFO_TYPE_FLOAT:
5361 val = static_cast<T>(OpStackGet<float>(opidx));
5364 case CORINFO_TYPE_DOUBLE:
5365 val = static_cast<T>(OpStackGet<double>(opidx));
5368 case CORINFO_TYPE_BYREF:
5369 case CORINFO_TYPE_CLASS:
5370 case CORINFO_TYPE_STRING:
5371 if (!TCanHoldPtr && !s_InterpreterLooseRules)
5373 VerificationError("Conversion of pointer value to type that can't hold its value.");
5377 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5378 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...)
5379 val = static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx)));
5383 VerificationError("Illegal operand type for conv.* operation.");
5389 OpStackSet<INT32>(opidx, static_cast<INT32>(val));
5393 OpStackSet<T>(opidx, val);
5396 OpStackTypeSet(opidx, InterpreterType(cit));
5400 void Interpreter::ConvRUn()
5408 _ASSERTE(m_curStackHt >= 1);
5409 unsigned opidx = m_curStackHt - 1;
5411 InterpreterType t1 = OpStackTypeGet(opidx);
5412 CorInfoType cit1 = t1.ToCorInfoType();
5413 _ASSERTE(IsStackNormalType(cit1));
5417 case CORINFO_TYPE_INT:
5418 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT32>(opidx)));
5421 case CORINFO_TYPE_NATIVEINT:
5422 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<NativeUInt>(opidx)));
5425 case CORINFO_TYPE_LONG:
5426 OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT64>(opidx)));
5429 case CORINFO_TYPE_DOUBLE:
5433 VerificationError("Illegal operand type for conv.r.un operation.");
5436 OpStackTypeSet(opidx, InterpreterType(CORINFO_TYPE_DOUBLE));
5439 template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
5440 void Interpreter::ConvOvf()
5448 _ASSERTE(m_curStackHt >= 1);
5449 unsigned opidx = m_curStackHt - 1;
5451 InterpreterType t1 = OpStackTypeGet(opidx);
5452 CorInfoType cit1 = t1.ToCorInfoType();
5453 _ASSERTE(IsStackNormalType(cit1));
5457 case CORINFO_TYPE_INT:
5459 INT32 i4 = OpStackGet<INT32>(opidx);
5462 ThrowOverflowException();
5464 OpStackSet<T>(opidx, static_cast<T>(i4));
5468 case CORINFO_TYPE_NATIVEINT:
5470 NativeInt i = OpStackGet<NativeInt>(opidx);
5473 ThrowOverflowException();
5475 OpStackSet<T>(opidx, static_cast<T>(i));
5479 case CORINFO_TYPE_LONG:
5481 INT64 i8 = OpStackGet<INT64>(opidx);
5484 ThrowOverflowException();
5486 OpStackSet<T>(opidx, static_cast<T>(i8));
5490 // Make sure that the C++ conversions do the right thing (truncate to zero...)
5491 case CORINFO_TYPE_FLOAT:
5493 float f = OpStackGet<float>(opidx);
5494 if (!FloatFitsInIntType<TMin, TMax>(f))
5496 ThrowOverflowException();
5498 OpStackSet<T>(opidx, static_cast<T>(f));
5502 case CORINFO_TYPE_DOUBLE:
5504 double d = OpStackGet<double>(opidx);
5505 if (!DoubleFitsInIntType<TMin, TMax>(d))
5507 ThrowOverflowException();
5509 OpStackSet<T>(opidx, static_cast<T>(d));
5513 case CORINFO_TYPE_BYREF:
5514 case CORINFO_TYPE_CLASS:
5515 case CORINFO_TYPE_STRING:
5518 VerificationError("Conversion of pointer value to type that can't hold its value.");
5522 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5523 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
5524 OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
5528 VerificationError("Illegal operand type for conv.ovf.* operation.");
5531 _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
5532 OpStackTypeSet(opidx, InterpreterType(cit));
5535 template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
5536 void Interpreter::ConvOvfUn()
5544 _ASSERTE(m_curStackHt >= 1);
5545 unsigned opidx = m_curStackHt - 1;
5547 InterpreterType t1 = OpStackTypeGet(opidx);
5548 CorInfoType cit1 = t1.ToCorInfoType();
5549 _ASSERTE(IsStackNormalType(cit1));
5553 case CORINFO_TYPE_INT:
5555 UINT32 ui4 = OpStackGet<UINT32>(opidx);
5556 if (!FitsIn<T>(ui4))
5558 ThrowOverflowException();
5560 OpStackSet<T>(opidx, static_cast<T>(ui4));
5564 case CORINFO_TYPE_NATIVEINT:
5566 NativeUInt ui = OpStackGet<NativeUInt>(opidx);
5569 ThrowOverflowException();
5571 OpStackSet<T>(opidx, static_cast<T>(ui));
5575 case CORINFO_TYPE_LONG:
5577 UINT64 ui8 = OpStackGet<UINT64>(opidx);
5578 if (!FitsIn<T>(ui8))
5580 ThrowOverflowException();
5582 OpStackSet<T>(opidx, static_cast<T>(ui8));
5586 // Make sure that the C++ conversions do the right thing (truncate to zero...)
5587 case CORINFO_TYPE_FLOAT:
5589 float f = OpStackGet<float>(opidx);
5590 if (!FloatFitsInIntType<TMin, TMax>(f))
5592 ThrowOverflowException();
5594 OpStackSet<T>(opidx, static_cast<T>(f));
5598 case CORINFO_TYPE_DOUBLE:
5600 double d = OpStackGet<double>(opidx);
5601 if (!DoubleFitsInIntType<TMin, TMax>(d))
5603 ThrowOverflowException();
5605 OpStackSet<T>(opidx, static_cast<T>(d));
5609 case CORINFO_TYPE_BYREF:
5610 case CORINFO_TYPE_CLASS:
5611 case CORINFO_TYPE_STRING:
5614 VerificationError("Conversion of pointer value to type that can't hold its value.");
5618 // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
5619 // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
5620 OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
5624 VerificationError("Illegal operand type for conv.ovf.*.un operation.");
5627 _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
5628 OpStackTypeSet(opidx, InterpreterType(cit));
5631 void Interpreter::LdObj()
5639 BarrierIfVolatile();
5641 _ASSERTE(m_curStackHt > 0);
5642 unsigned ind = m_curStackHt - 1;
5645 CorInfoType cit = OpStackTypeGet(ind).ToCorInfoType();
5646 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
5650 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdObj]);
5651 #endif // INTERP_TRACING
5653 // TODO: GetTypeFromToken also uses GCX_PREEMP(); can we merge it with the getClassAttribs() block below, and do it just once?
5654 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdObj));
5658 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5661 void* src = OpStackGet<void*>(ind);
5662 ThrowOnInvalidPointer(src);
5664 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5666 LdObjValueClassWork(clsHnd, ind, src);
5670 OpStackSet<void*>(ind, *reinterpret_cast<void**>(src));
5671 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
5676 void Interpreter::LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src)
5684 // "src" is a byref, which may be into an object. GCPROTECT for the call below.
5685 GCPROTECT_BEGININTERIOR(src);
5687 InterpreterType it = InterpreterType(&m_interpCeeInfo, valueClsHnd);
5688 size_t sz = it.Size(&m_interpCeeInfo);
5689 // Note that the memcpy's below are permissible because the destination is in the operand stack.
5690 if (sz > sizeof(INT64))
5692 void* dest = LargeStructOperandStackPush(sz);
5693 memcpy(dest, src, sz);
5694 OpStackSet<void*>(ind, dest);
5698 OpStackSet<INT64>(ind, GetSmallStructValue(src, sz));
5701 OpStackTypeSet(ind, it.StackNormalize());
5706 CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk))
5716 CORINFO_RESOLVED_TOKEN typeTok;
5717 ResolveToken(&typeTok, getU4LittleEndian(codePtr), tokKind InterpTracingArg(rtk));
5718 return typeTok.hClass;
5721 bool Interpreter::IsValidPointerType(CorInfoType cit)
5723 bool isValid = (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_BYREF);
5724 #if defined(HOST_AMD64)
5725 isValid = isValid || (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG);
5730 void Interpreter::CpObj()
5738 _ASSERTE(m_curStackHt >= 2);
5739 unsigned destInd = m_curStackHt - 2;
5740 unsigned srcInd = m_curStackHt - 1;
5743 // Check that src and dest are both pointer types.
5744 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5745 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of cpobj");
5747 cit = OpStackTypeGet(srcInd).ToCorInfoType();
5748 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for src of cpobj");
5752 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CpObj]);
5753 #endif // INTERP_TRACING
5755 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_CpObj));
5759 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5762 void* dest = OpStackGet<void*>(destInd);
5763 void* src = OpStackGet<void*>(srcInd);
5765 ThrowOnInvalidPointer(dest);
5766 ThrowOnInvalidPointer(src);
5768 // dest and src are vulnerable byrefs.
5771 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5773 CopyValueClassUnchecked(dest, src, GetMethodTableFromClsHnd(clsHnd));
5777 OBJECTREF val = *reinterpret_cast<OBJECTREF*>(src);
5778 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), val);
5784 void Interpreter::StObj()
5792 _ASSERTE(m_curStackHt >= 2);
5793 unsigned destInd = m_curStackHt - 2;
5794 unsigned valInd = m_curStackHt - 1;
5797 // Check that dest is a pointer type.
5798 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5799 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of stobj");
5803 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StObj]);
5804 #endif // INTERP_TRACING
5806 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StObj));
5810 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5813 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5815 MethodTable* clsMT = GetMethodTableFromClsHnd(clsHnd);
5819 sz = getClassSize(clsHnd);
5822 // Note that "dest" might be a pointer into the heap. It is therefore important
5823 // to calculate it *after* any PREEMP transitions at which we might do a GC.
5824 void* dest = OpStackGet<void*>(destInd);
5825 ThrowOnInvalidPointer(dest);
5828 // Try and validate types
5829 InterpreterType vit = OpStackTypeGet(valInd);
5830 CorInfoType vitc = vit.ToCorInfoType();
5832 if (vitc == CORINFO_TYPE_VALUECLASS)
5834 CORINFO_CLASS_HANDLE vClsHnd = vit.ToClassHandle();
5835 const bool isClass = (vClsHnd == clsHnd);
5836 const bool isPrim = (vitc == CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(clsHnd)));
5837 bool isShared = false;
5839 // If operand type is shared we need a more complex check;
5840 // the IL type may not be shared
5841 if (!isPrim && !isClass)
5846 vClsAttribs = m_interpCeeInfo.getClassAttribs(vClsHnd);
5849 if ((vClsAttribs & CORINFO_FLG_SHAREDINST) != 0)
5851 MethodTable* clsMT2 = clsMT->GetCanonicalMethodTable();
5852 if (((CORINFO_CLASS_HANDLE) clsMT2) == vClsHnd)
5859 _ASSERTE(isClass || isPrim || isShared);
5863 const bool isSz = s_InterpreterLooseRules && sz <= sizeof(dest);
5871 if (sz > sizeof(INT64))
5873 // Large struct case -- ostack entry is pointer.
5874 void* src = OpStackGet<void*>(valInd);
5875 CopyValueClassUnchecked(dest, src, clsMT);
5876 LargeStructOperandStackPop(sz, src);
5880 // The ostack entry contains the struct value.
5881 CopyValueClassUnchecked(dest, OpStackGetAddr(valInd, sz), clsMT);
5886 // The ostack entry is an object reference.
5887 _ASSERTE(OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
5889 // Note that "dest" might be a pointer into the heap. It is therefore important
5890 // to calculate it *after* any PREEMP transitions at which we might do a GC. (Thus,
5891 // we have to duplicate this code with the case above.
5892 void* dest = OpStackGet<void*>(destInd);
5893 ThrowOnInvalidPointer(dest);
5897 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
5898 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), val);
5904 BarrierIfVolatile();
5907 void Interpreter::InitObj()
5915 _ASSERTE(m_curStackHt >= 1);
5916 unsigned destInd = m_curStackHt - 1;
5918 // Check that src and dest are both pointer types.
5919 CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
5920 _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
5924 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_InitObj]);
5925 #endif // INTERP_TRACING
5927 CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_InitObj));
5928 size_t valueClassSz = 0;
5933 clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
5934 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5936 valueClassSz = getClassSize(clsHnd);
5940 void* dest = OpStackGet<void*>(destInd);
5941 ThrowOnInvalidPointer(dest);
5943 // dest is a vulnerable byref.
5946 if (clsAttribs & CORINFO_FLG_VALUECLASS)
5948 memset(dest, 0, valueClassSz);
5952 // The ostack entry is an object reference.
5953 SetObjectReference(reinterpret_cast<OBJECTREF*>(dest), NULL);
5959 void Interpreter::LdStr()
5967 OBJECTHANDLE res = ConstructStringLiteral(m_methInfo->m_module, getU4LittleEndian(m_ILCodePtr + 1));
5970 OpStackSet<Object*>(m_curStackHt, *reinterpret_cast<Object**>(res));
5971 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS)); // Stack-normal type for "string"
5977 void Interpreter::NewObj()
5979 #if INTERP_DYNAMIC_CONTRACTS
5986 // Dynamic contract occupies too much stack.
5987 STATIC_CONTRACT_THROWS;
5988 STATIC_CONTRACT_GC_TRIGGERS;
5989 STATIC_CONTRACT_MODE_COOPERATIVE;
5992 unsigned ctorTok = getU4LittleEndian(m_ILCodePtr + 1);
5995 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewObj]);
5996 #endif // INTERP_TRACING
5998 CORINFO_CALL_INFO callInfo;
5999 CORINFO_RESOLVED_TOKEN methTok;
6003 ResolveToken(&methTok, ctorTok, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_NewObj));
6004 m_interpCeeInfo.getCallInfo(&methTok, NULL,
6005 m_methInfo->m_method,
6006 CORINFO_CALLINFO_FLAGS(0),
6010 unsigned mflags = callInfo.methodFlags;
6012 if ((mflags & (CORINFO_FLG_STATIC|CORINFO_FLG_ABSTRACT)) != 0)
6014 VerificationError("newobj on static or abstract method");
6017 unsigned clsFlags = callInfo.classFlags;
6020 // What class are we allocating?
6021 const char* clsName;
6025 clsName = m_interpCeeInfo.getClassNameFromMetadata(methTok.hClass, NULL);
6029 // There are four cases:
6030 // 1) Value types (ordinary constructor, resulting VALUECLASS pushed)
6031 // 2) String (var-args constructor, result automatically pushed)
6032 // 3) MDArray (var-args constructor, resulting OBJECTREF pushed)
6033 // 4) Reference types (ordinary constructor, resulting OBJECTREF pushed)
6034 if (clsFlags & CORINFO_FLG_VALUECLASS)
6037 INT64 smallTempDest = 0;
6041 sz = getClassSize(methTok.hClass);
6043 if (sz > sizeof(INT64))
6045 // TODO: Make sure this is deleted in the face of exceptions.
6046 tempDest = new BYTE[sz];
6050 tempDest = &smallTempDest;
6052 memset(tempDest, 0, sz);
6053 InterpreterType structValRetIT(&m_interpCeeInfo, methTok.hClass);
6054 m_structRetValITPtr = &structValRetIT;
6055 m_structRetValTempSpace = tempDest;
6057 DoCallWork(/*virtCall*/false, tempDest, &methTok, &callInfo);
6059 if (sz > sizeof(INT64))
6061 void* dest = LargeStructOperandStackPush(sz);
6062 memcpy(dest, tempDest, sz);
6063 delete[] reinterpret_cast<BYTE*>(tempDest);
6064 OpStackSet<void*>(m_curStackHt, dest);
6068 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(tempDest, sz));
6070 if (m_structRetValITPtr->IsStruct())
6072 OpStackTypeSet(m_curStackHt, *m_structRetValITPtr);
6076 // Must stack-normalize primitive types.
6077 OpStackTypeSet(m_curStackHt, m_structRetValITPtr->StackNormalize());
6079 // "Unregister" the temp space for GC scanning...
6080 m_structRetValITPtr = NULL;
6083 else if ((clsFlags & CORINFO_FLG_VAROBJSIZE) && !(clsFlags & CORINFO_FLG_ARRAY))
6085 // For a VAROBJSIZE class (currently == String), pass NULL as this to "pseudo-constructor."
6086 void* specialFlagArg = reinterpret_cast<void*>(0x1); // Special value for "thisArg" argument of "DoCallWork": push NULL that's not on op stack.
6087 DoCallWork(/*virtCall*/false, specialFlagArg, &methTok, &callInfo); // pushes result automatically
6091 OBJECTREF thisArgObj = NULL;
6092 GCPROTECT_BEGIN(thisArgObj);
6094 if (clsFlags & CORINFO_FLG_ARRAY)
6096 _ASSERTE(clsFlags & CORINFO_FLG_VAROBJSIZE);
6098 MethodDesc* methDesc = GetMethod(methTok.hMethod);
6100 PCCOR_SIGNATURE pSig;
6102 methDesc->GetSig(&pSig, &cbSigSize);
6103 MetaSig msig(pSig, cbSigSize, methDesc->GetModule(), NULL);
6105 unsigned dwNumArgs = msig.NumFixedArgs();
6106 _ASSERTE(m_curStackHt >= dwNumArgs);
6107 m_curStackHt -= dwNumArgs;
6109 INT32* args = (INT32*)_alloca(dwNumArgs * sizeof(INT32));
6112 for (dwArg = 0; dwArg < dwNumArgs; dwArg++)
6114 unsigned stkInd = m_curStackHt + dwArg;
6115 bool loose = s_InterpreterLooseRules && (OpStackTypeGet(stkInd).ToCorInfoType() == CORINFO_TYPE_NATIVEINT);
6116 if (OpStackTypeGet(stkInd).ToCorInfoType() != CORINFO_TYPE_INT && !loose)
6118 VerificationError("MD array dimension bounds and sizes must be int.");
6120 args[dwArg] = loose ? (INT32) OpStackGet<NativeInt>(stkInd) : OpStackGet<INT32>(stkInd);
6123 thisArgObj = AllocateArrayEx(TypeHandle(methTok.hClass), args, dwNumArgs);
6127 CorInfoHelpFunc newHelper;
6131 newHelper = m_interpCeeInfo.getNewHelper(methTok.hClass, &sideEffect);
6134 MethodTable * pNewObjMT = GetMethodTableFromClsHnd(methTok.hClass);
6137 case CORINFO_HELP_NEWFAST:
6139 thisArgObj = AllocateObject(pNewObjMT);
6143 DoCallWork(/*virtCall*/false, OBJECTREFToObject(thisArgObj), &methTok, &callInfo);
6148 OpStackSet<Object*>(m_curStackHt, OBJECTREFToObject(thisArgObj));
6149 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
6152 GCPROTECT_END(); // For "thisArgObj"
6158 void Interpreter::NewArr()
6166 _ASSERTE(m_curStackHt > 0);
6167 unsigned stkInd = m_curStackHt-1;
6168 CorInfoType cit = OpStackTypeGet(stkInd).ToCorInfoType();
6172 case CORINFO_TYPE_INT:
6173 sz = static_cast<NativeInt>(OpStackGet<INT32>(stkInd));
6175 case CORINFO_TYPE_NATIVEINT:
6176 sz = OpStackGet<NativeInt>(stkInd);
6179 VerificationError("Size operand of 'newarr' must be int or native int.");
6182 unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
6184 CORINFO_CLASS_HANDLE elemClsHnd;
6187 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewArr]);
6188 #endif // INTERP_TRACING
6190 CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
6194 ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Newarr InterpTracingArg(RTK_NewArr));
6195 elemClsHnd = elemTypeResolvedTok.hClass;
6201 COMPlusThrow(kOverflowException);
6205 // Even though ECMA allows using a native int as the argument to newarr instruction
6206 // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
6207 // platforms we can't create an array whose size exceeds 32 bits.
6210 EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
6214 TypeHandle th(elemClsHnd);
6215 MethodTable* pArrayMT = th.GetMethodTable();
6216 pArrayMT->CheckRunClassInitThrowing();
6218 INT32 size32 = (INT32)sz;
6219 Object* newarray = OBJECTREFToObject(AllocateSzArray(pArrayMT, size32));
6222 OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
6223 OpStackSet<Object*>(stkInd, newarray);
6229 void Interpreter::IsInst()
6238 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_IsInst]);
6239 #endif // INTERP_TRACING
6241 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_IsInst));
6243 _ASSERTE(m_curStackHt >= 1);
6244 unsigned idx = m_curStackHt - 1;
6246 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6247 _ASSERTE(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
6250 Object * pObj = OpStackGet<Object*>(idx);
6253 if (!ObjIsInstanceOf(pObj, TypeHandle(cls)))
6254 OpStackSet<Object*>(idx, NULL);
6257 // Type stack stays unmodified.
6262 void Interpreter::CastClass()
6271 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CastClass]);
6272 #endif // INTERP_TRACING
6274 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_CastClass));
6276 _ASSERTE(m_curStackHt >= 1);
6277 unsigned idx = m_curStackHt - 1;
6279 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6280 _ASSERTE(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
6283 Object * pObj = OpStackGet<Object*>(idx);
6286 if (!ObjIsInstanceOf(pObj, TypeHandle(cls), TRUE))
6288 UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
6293 // Type stack stays unmodified.
6298 void Interpreter::LocAlloc()
6306 _ASSERTE(m_curStackHt >= 1);
6307 unsigned idx = m_curStackHt - 1;
6308 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6310 if (cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT)
6312 sz = static_cast<NativeUInt>(OpStackGet<UINT32>(idx));
6314 else if (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_NATIVEUINT)
6316 sz = OpStackGet<NativeUInt>(idx);
6318 else if (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG)
6320 sz = (NativeUInt) OpStackGet<INT64>(idx);
6324 VerificationError("localloc requires int or nativeint argument.");
6328 OpStackSet<void*>(idx, NULL);
6332 void* res = GetLocAllocData()->Alloc(sz);
6333 if (res == NULL) ThrowStackOverflow();
6334 OpStackSet<void*>(idx, res);
6336 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_NATIVEINT));
6339 void Interpreter::MkRefany()
6348 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_MkRefAny]);
6349 #endif // INTERP_TRACING
6351 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_MkRefAny));
6352 _ASSERTE(m_curStackHt >= 1);
6353 unsigned idx = m_curStackHt - 1;
6355 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6356 if (!(cit == CORINFO_TYPE_BYREF || cit == CORINFO_TYPE_NATIVEINT))
6357 VerificationError("MkRefany requires byref or native int (pointer) on the stack.");
6359 void* ptr = OpStackGet<void*>(idx);
6361 InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
6363 #if defined(HOST_AMD64)
6364 _ASSERTE(typedRefIT.IsLargeStruct(&m_interpCeeInfo));
6365 tbr = (TypedByRef*) LargeStructOperandStackPush(GetTypedRefSize(&m_interpCeeInfo));
6366 OpStackSet<void*>(idx, tbr);
6367 #elif defined(HOST_X86) || defined(HOST_ARM)
6368 _ASSERTE(!typedRefIT.IsLargeStruct(&m_interpCeeInfo));
6369 tbr = OpStackGetAddr<TypedByRef>(idx);
6370 #elif defined(HOST_ARM64)
6372 NYI_INTERP("Unimplemented code: MkRefAny");
6373 #elif defined(HOST_LOONGARCH64)
6375 NYI_INTERP("Unimplemented code: MkRefAny on LOONGARCH");
6376 #elif defined(HOST_RISCV64)
6378 NYI_INTERP("Unimplemented code: MkRefAny on RISCV64");
6380 #error "unsupported platform"
6383 tbr->type = TypeHandle(cls);
6384 OpStackTypeSet(idx, typedRefIT);
6389 void Interpreter::RefanyType()
6397 _ASSERTE(m_curStackHt > 0);
6398 unsigned idx = m_curStackHt - 1;
6400 if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
6401 VerificationError("RefAnyVal requires a TypedRef on the stack.");
6403 TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
6404 LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
6406 TypeHandle* pth = &ptbr->type;
6409 OBJECTREF classobj = TypeHandleToTypeRef(pth);
6411 OpStackSet<Object*>(idx, OBJECTREFToObject(classobj));
6412 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_CLASS));
6417 // This (unfortunately) duplicates code in JIT_GetRuntimeTypeHandle, which
6418 // isn't callable because it sets up a Helper Method Frame.
6419 OBJECTREF Interpreter::TypeHandleToTypeRef(TypeHandle* pth)
6421 OBJECTREF typePtr = NULL;
6422 if (!pth->IsTypeDesc())
6424 // Most common... and fastest case
6425 typePtr = pth->AsMethodTable()->GetManagedClassObjectIfExists();
6426 if (typePtr == NULL)
6428 typePtr = pth->GetManagedClassObject();
6433 typePtr = pth->GetManagedClassObject();
6438 CorInfoType Interpreter::GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd)
6448 return m_interpCeeInfo.getTypeForPrimitiveValueClass(clsHnd);
6451 void Interpreter::RefanyVal()
6459 _ASSERTE(m_curStackHt > 0);
6460 unsigned idx = m_curStackHt - 1;
6462 if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
6463 VerificationError("RefAnyVal requires a TypedRef on the stack.");
6466 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_RefAnyVal]);
6467 #endif // INTERP_TRACING
6469 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_RefAnyVal));
6470 TypeHandle expected(cls);
6472 TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
6473 LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
6474 if (expected != ptbr->type) ThrowInvalidCastException();
6476 OpStackSet<void*>(idx, static_cast<void*>(ptbr->data));
6477 OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_BYREF));
6482 void Interpreter::CkFinite()
6490 _ASSERTE(m_curStackHt > 0);
6491 unsigned idx = m_curStackHt - 1;
6493 CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
6498 case CORINFO_TYPE_FLOAT:
6499 val = (double)OpStackGet<float>(idx);
6501 case CORINFO_TYPE_DOUBLE:
6502 val = OpStackGet<double>(idx);
6505 VerificationError("CkFinite requires a floating-point value on the stack.");
6510 ThrowSysArithException();
6513 void Interpreter::LdToken()
6521 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 1);
6524 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdToken]);
6525 #endif // INTERP_TRACING
6528 CORINFO_RESOLVED_TOKEN tok;
6531 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_LdToken));
6534 // To save duplication of the factored code at the bottom, I don't do GCX_FORBID for
6535 // these Object* values, but this comment documents the intent.
6536 if (tok.hMethod != NULL)
6538 MethodDesc* pMethod = (MethodDesc*)tok.hMethod;
6539 Object* objPtr = OBJECTREFToObject((OBJECTREF)pMethod->GetStubMethodInfo());
6540 OpStackSet<Object*>(m_curStackHt, objPtr);
6542 else if (tok.hField != NULL)
6544 FieldDesc * pField = (FieldDesc *)tok.hField;
6545 Object* objPtr = OBJECTREFToObject((OBJECTREF)pField->GetStubFieldInfo());
6546 OpStackSet<Object*>(m_curStackHt, objPtr);
6550 TypeHandle th(tok.hClass);
6551 Object* objPtr = OBJECTREFToObject(th.GetManagedClassObject());
6552 OpStackSet<Object*>(m_curStackHt, objPtr);
6557 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
6564 void Interpreter::LdFtn()
6572 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
6575 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFtn]);
6576 #endif // INTERP_TRACING
6578 CORINFO_RESOLVED_TOKEN tok;
6579 CORINFO_CALL_INFO callInfo;
6582 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdFtn));
6583 m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
6584 combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
6588 switch (callInfo.kind)
6592 PCODE pCode = ((MethodDesc *)callInfo.hMethod)->GetMultiCallableAddrOfCode();
6593 OpStackSet<void*>(m_curStackHt, (void *)pCode);
6594 GetFunctionPointerStack()[m_curStackHt] = callInfo.hMethod;
6597 case CORINFO_CALL_CODE_POINTER:
6598 NYI_INTERP("Indirect code pointer.");
6601 _ASSERTE_MSG(false, "Should not reach here: unknown call kind.");
6604 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
6609 void Interpreter::LdVirtFtn()
6617 _ASSERTE(m_curStackHt >= 1);
6618 unsigned ind = m_curStackHt - 1;
6620 unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
6623 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdVirtFtn]);
6624 #endif // INTERP_TRACING
6626 CORINFO_RESOLVED_TOKEN tok;
6627 CORINFO_CALL_INFO callInfo;
6628 CORINFO_CLASS_HANDLE classHnd;
6629 CORINFO_METHOD_HANDLE methodHnd;
6632 ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdVirtFtn));
6633 m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
6634 combine(CORINFO_CALLINFO_CALLVIRT,
6635 combine(CORINFO_CALLINFO_SECURITYCHECKS,
6636 CORINFO_CALLINFO_LDFTN)),
6640 classHnd = tok.hClass;
6641 methodHnd = tok.hMethod;
6644 MethodDesc * pMD = (MethodDesc *)methodHnd;
6646 if (pMD->IsVtableMethod())
6648 Object* obj = OpStackGet<Object*>(ind);
6649 ThrowOnInvalidPointer(obj);
6651 OBJECTREF objRef = ObjectToOBJECTREF(obj);
6652 GCPROTECT_BEGIN(objRef);
6653 pCode = pMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, TypeHandle(classHnd));
6656 pMD = Entry2MethodDesc(pCode, TypeHandle(classHnd).GetMethodTable());
6660 pCode = pMD->GetMultiCallableAddrOfCode();
6662 OpStackSet<void*>(ind, (void *)pCode);
6663 GetFunctionPointerStack()[ind] = (CORINFO_METHOD_HANDLE)pMD;
6665 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_NATIVEINT));
6669 void Interpreter::Sizeof()
6678 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Sizeof]);
6679 #endif // INTERP_TRACING
6681 CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Sizeof));
6685 CorInfoType cit = ::asCorInfoType(cls);
6686 // For class types, the ECMA spec says to return the size of the object reference, not the referent
6687 // object. Everything else should be a value type, for which we can just return the size as reported
6691 case CORINFO_TYPE_CLASS:
6692 sz = sizeof(Object*);
6695 sz = getClassSize(cls);
6700 OpStackSet<UINT32>(m_curStackHt, sz);
6701 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
6708 bool Interpreter::s_initialized = false;
6709 bool Interpreter::s_compilerStaticsInitialized = false;
6710 size_t Interpreter::s_TypedRefSize;
6711 CORINFO_CLASS_HANDLE Interpreter::s_TypedRefClsHnd;
6712 InterpreterType Interpreter::s_TypedRefIT;
6714 // Must call GetTypedRefIT
6715 size_t Interpreter::GetTypedRefSize(CEEInfo* info)
6717 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6718 return s_TypedRefSize;
6721 InterpreterType Interpreter::GetTypedRefIT(CEEInfo* info)
6723 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6724 return s_TypedRefIT;
6727 CORINFO_CLASS_HANDLE Interpreter::GetTypedRefClsHnd(CEEInfo* info)
6729 _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
6730 return s_TypedRefClsHnd;
6733 void Interpreter::Initialize()
6735 _ASSERTE(!s_initialized);
6737 s_InterpretMeths.ensureInit(CLRConfig::INTERNAL_Interpret);
6738 s_InterpretMethsExclude.ensureInit(CLRConfig::INTERNAL_InterpretExclude);
6739 s_InterpreterUseCaching = (s_InterpreterUseCachingFlag.val(CLRConfig::INTERNAL_InterpreterUseCaching) != 0);
6740 s_InterpreterLooseRules = (s_InterpreterLooseRulesFlag.val(CLRConfig::INTERNAL_InterpreterLooseRules) != 0);
6741 s_InterpreterDoLoopMethods = (s_InterpreterDoLoopMethodsFlag.val(CLRConfig::INTERNAL_InterpreterDoLoopMethods) != 0);
6743 // Initialize the lock used to protect method locks.
6744 // TODO: it would be better if this were a reader/writer lock.
6745 s_methodCacheLock.Init(CrstLeafLock, CRST_DEFAULT);
6747 // Similarly, initialize the lock used to protect the map from
6748 // interpreter stub addresses to their method descs.
6749 s_interpStubToMDMapLock.Init(CrstLeafLock, CRST_DEFAULT);
6751 s_initialized = true;
6753 #if INTERP_ILINSTR_PROFILE
6754 SetILInstrCategories();
6755 #endif // INTERP_ILINSTR_PROFILE
6758 void Interpreter::InitializeCompilerStatics(CEEInfo* info)
6760 if (!s_compilerStaticsInitialized)
6762 // TODO: I believe I need no synchronization around this on x86, but I do
6763 // on more permissive memory models. (Why it's OK on x86: each thread executes this
6764 // before any access to the initialized static variables; if several threads do
6765 // so, they perform idempotent initializing writes to the statics.
6767 s_TypedRefClsHnd = info->getBuiltinClass(CLASSID_TYPED_BYREF);
6768 s_TypedRefIT = InterpreterType(info, s_TypedRefClsHnd);
6769 s_TypedRefSize = getClassSize(s_TypedRefClsHnd);
6770 s_compilerStaticsInitialized = true;
6771 // TODO: Need store-store memory barrier here.
6775 void Interpreter::Terminate()
6779 s_methodCacheLock.Destroy();
6780 s_interpStubToMDMapLock.Destroy();
6781 s_initialized = false;
6785 #if INTERP_ILINSTR_PROFILE
6786 void Interpreter::SetILInstrCategories()
6788 // Start with the indentity maps
6789 for (unsigned short instr = 0; instr < 512; instr++) s_ILInstrCategories[instr] = instr;
6790 // Now make exceptions.
6791 for (unsigned instr = CEE_LDARG_0; instr <= CEE_LDARG_3; instr++) s_ILInstrCategories[instr] = CEE_LDARG;
6792 s_ILInstrCategories[CEE_LDARG_S] = CEE_LDARG;
6794 for (unsigned instr = CEE_LDLOC_0; instr <= CEE_LDLOC_3; instr++) s_ILInstrCategories[instr] = CEE_LDLOC;
6795 s_ILInstrCategories[CEE_LDLOC_S] = CEE_LDLOC;
6797 for (unsigned instr = CEE_STLOC_0; instr <= CEE_STLOC_3; instr++) s_ILInstrCategories[instr] = CEE_STLOC;
6798 s_ILInstrCategories[CEE_STLOC_S] = CEE_STLOC;
6800 s_ILInstrCategories[CEE_LDLOCA_S] = CEE_LDLOCA;
6802 for (unsigned instr = CEE_LDC_I4_M1; instr <= CEE_LDC_I4_S; instr++) s_ILInstrCategories[instr] = CEE_LDC_I4;
6804 for (unsigned instr = CEE_BR_S; instr <= CEE_BLT_UN; instr++) s_ILInstrCategories[instr] = CEE_BR;
6806 for (unsigned instr = CEE_LDIND_I1; instr <= CEE_LDIND_REF; instr++) s_ILInstrCategories[instr] = CEE_LDIND_I;
6808 for (unsigned instr = CEE_STIND_REF; instr <= CEE_STIND_R8; instr++) s_ILInstrCategories[instr] = CEE_STIND_I;
6810 for (unsigned instr = CEE_ADD; instr <= CEE_REM_UN; instr++) s_ILInstrCategories[instr] = CEE_ADD;
6812 for (unsigned instr = CEE_AND; instr <= CEE_NOT; instr++) s_ILInstrCategories[instr] = CEE_AND;
6814 for (unsigned instr = CEE_CONV_I1; instr <= CEE_CONV_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6815 for (unsigned instr = CEE_CONV_OVF_I1_UN; instr <= CEE_CONV_OVF_U_UN; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6817 for (unsigned instr = CEE_LDELEM_I1; instr <= CEE_LDELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_LDELEM;
6818 for (unsigned instr = CEE_STELEM_I; instr <= CEE_STELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_STELEM;
6820 for (unsigned instr = CEE_CONV_OVF_I1; instr <= CEE_CONV_OVF_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6821 for (unsigned instr = CEE_CONV_U2; instr <= CEE_CONV_U1; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6822 for (unsigned instr = CEE_CONV_OVF_I; instr <= CEE_CONV_OVF_U; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
6824 for (unsigned instr = CEE_ADD_OVF; instr <= CEE_SUB_OVF; instr++) s_ILInstrCategories[instr] = CEE_ADD_OVF;
6826 s_ILInstrCategories[CEE_LEAVE_S] = CEE_LEAVE;
6827 s_ILInstrCategories[CEE_CONV_U] = CEE_CONV_I;
6829 #endif // INTERP_ILINSTR_PROFILE
6833 void Interpreter::CompareOp()
6841 _ASSERTE(m_curStackHt >= 2);
6842 unsigned op1idx = m_curStackHt - 2;
6843 INT32 res = CompareOpRes<op>(op1idx);
6844 OpStackSet<INT32>(op1idx, res);
6845 OpStackTypeSet(op1idx, InterpreterType(CORINFO_TYPE_INT));
6850 INT32 Interpreter::CompareOpRes(unsigned op1idx)
6858 _ASSERTE(m_curStackHt >= op1idx + 2);
6859 unsigned op2idx = op1idx + 1;
6860 InterpreterType t1 = OpStackTypeGet(op1idx);
6861 CorInfoType cit1 = t1.ToCorInfoType();
6862 _ASSERTE(IsStackNormalType(cit1));
6863 InterpreterType t2 = OpStackTypeGet(op2idx);
6864 CorInfoType cit2 = t2.ToCorInfoType();
6865 _ASSERTE(IsStackNormalType(cit2));
6870 case CORINFO_TYPE_INT:
6871 if (cit2 == CORINFO_TYPE_INT)
6873 INT32 val1 = OpStackGet<INT32>(op1idx);
6874 INT32 val2 = OpStackGet<INT32>(op2idx);
6877 if (val1 == val2) res = 1;
6879 else if (op == CO_GT)
6881 if (val1 > val2) res = 1;
6883 else if (op == CO_GT_UN)
6885 if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
6887 else if (op == CO_LT)
6889 if (val1 < val2) res = 1;
6893 _ASSERTE(op == CO_LT_UN);
6894 if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
6897 else if (cit2 == CORINFO_TYPE_NATIVEINT ||
6898 (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF) ||
6899 (cit2 == CORINFO_TYPE_VALUECLASS
6900 && CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(t2.ToClassHandle())) == CORINFO_TYPE_NATIVEINT))
6902 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
6903 NativeInt val2 = OpStackGet<NativeInt>(op2idx);
6906 if (val1 == val2) res = 1;
6908 else if (op == CO_GT)
6910 if (val1 > val2) res = 1;
6912 else if (op == CO_GT_UN)
6914 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
6916 else if (op == CO_LT)
6918 if (val1 < val2) res = 1;
6922 _ASSERTE(op == CO_LT_UN);
6923 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
6926 else if (cit2 == CORINFO_TYPE_VALUECLASS)
6928 cit2 = GetTypeForPrimitiveValueClass(t2.ToClassHandle());
6929 INT32 val1 = OpStackGet<INT32>(op1idx);
6931 if (CorInfoTypeStackNormalize(cit2) == CORINFO_TYPE_INT)
6934 size_t sz = t2.Size(&m_interpCeeInfo);
6938 if (CorInfoTypeIsUnsigned(cit2))
6940 val2 = OpStackGet<UINT8>(op2idx);
6944 val2 = OpStackGet<INT8>(op2idx);
6948 if (CorInfoTypeIsUnsigned(cit2))
6950 val2 = OpStackGet<UINT16>(op2idx);
6954 val2 = OpStackGet<INT16>(op2idx);
6958 val2 = OpStackGet<INT32>(op2idx);
6966 VerificationError("Can't compare with struct type.");
6970 if (val1 == val2) res = 1;
6972 else if (op == CO_GT)
6974 if (val1 > val2) res = 1;
6976 else if (op == CO_GT_UN)
6978 if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
6980 else if (op == CO_LT)
6982 if (val1 < val2) res = 1;
6986 _ASSERTE(op == CO_LT_UN);
6987 if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
6992 VerificationError("Binary comparison operation: type mismatch.");
6995 case CORINFO_TYPE_NATIVEINT:
6996 if (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_INT
6997 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
6998 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
6999 || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_CLASS && OpStackGet<void*>(op2idx) == 0))
7001 NativeInt val1 = OpStackGet<NativeInt>(op1idx);
7003 if (cit2 == CORINFO_TYPE_NATIVEINT)
7005 val2 = OpStackGet<NativeInt>(op2idx);
7007 else if (cit2 == CORINFO_TYPE_INT)
7009 val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
7011 else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
7013 val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
7015 else if (cit2 == CORINFO_TYPE_CLASS)
7017 _ASSERTE(OpStackGet<void*>(op2idx) == 0);
7022 _ASSERTE(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF);
7023 val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
7027 if (val1 == val2) res = 1;
7029 else if (op == CO_GT)
7031 if (val1 > val2) res = 1;
7033 else if (op == CO_GT_UN)
7035 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
7037 else if (op == CO_LT)
7039 if (val1 < val2) res = 1;
7043 _ASSERTE(op == CO_LT_UN);
7044 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
7049 VerificationError("Binary comparison operation: type mismatch.");
7052 case CORINFO_TYPE_LONG:
7054 bool looseLong = false;
7055 #if defined(HOST_AMD64)
7056 looseLong = s_InterpreterLooseRules && (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_BYREF);
7058 if (cit2 == CORINFO_TYPE_LONG || looseLong)
7060 INT64 val1 = OpStackGet<INT64>(op1idx);
7061 INT64 val2 = OpStackGet<INT64>(op2idx);
7064 if (val1 == val2) res = 1;
7066 else if (op == CO_GT)
7068 if (val1 > val2) res = 1;
7070 else if (op == CO_GT_UN)
7072 if (static_cast<UINT64>(val1) > static_cast<UINT64>(val2)) res = 1;
7074 else if (op == CO_LT)
7076 if (val1 < val2) res = 1;
7080 _ASSERTE(op == CO_LT_UN);
7081 if (static_cast<UINT64>(val1) < static_cast<UINT64>(val2)) res = 1;
7086 VerificationError("Binary comparison operation: type mismatch.");
7091 case CORINFO_TYPE_CLASS:
7092 case CORINFO_TYPE_STRING:
7093 if (cit2 == CORINFO_TYPE_CLASS || cit2 == CORINFO_TYPE_STRING)
7096 Object* val1 = OpStackGet<Object*>(op1idx);
7097 Object* val2 = OpStackGet<Object*>(op2idx);
7100 if (val1 == val2) res = 1;
7102 else if (op == CO_GT_UN)
7104 if (val1 != val2) res = 1;
7108 VerificationError("Binary comparison operation: type mismatch.");
7113 VerificationError("Binary comparison operation: type mismatch.");
7118 case CORINFO_TYPE_FLOAT:
7120 bool isDouble = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_DOUBLE);
7121 if (cit2 == CORINFO_TYPE_FLOAT || isDouble)
7123 float val1 = OpStackGet<float>(op1idx);
7124 float val2 = (isDouble) ? (float) OpStackGet<double>(op2idx) : OpStackGet<float>(op2idx);
7127 // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
7128 if (val1 == val2) res = 1;
7130 else if (op == CO_GT)
7132 // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
7133 if (val1 > val2) res = 1;
7135 else if (op == CO_GT_UN)
7137 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7138 if (_isnan(val1) || _isnan(val2)) res = 1;
7139 else if (val1 > val2) res = 1;
7141 else if (op == CO_LT)
7143 if (val1 < val2) res = 1;
7147 _ASSERTE(op == CO_LT_UN);
7148 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7149 if (_isnan(val1) || _isnan(val2)) res = 1;
7150 else if (val1 < val2) res = 1;
7155 VerificationError("Binary comparison operation: type mismatch.");
7160 case CORINFO_TYPE_DOUBLE:
7162 bool isFloat = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_FLOAT);
7163 if (cit2 == CORINFO_TYPE_DOUBLE || isFloat)
7165 double val1 = OpStackGet<double>(op1idx);
7166 double val2 = (isFloat) ? (double) OpStackGet<float>(op2idx) : OpStackGet<double>(op2idx);
7169 // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
7170 if (val1 == val2) res = 1;
7172 else if (op == CO_GT)
7174 // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
7175 if (val1 > val2) res = 1;
7177 else if (op == CO_GT_UN)
7179 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7180 if (_isnan(val1) || _isnan(val2)) res = 1;
7181 else if (val1 > val2) res = 1;
7183 else if (op == CO_LT)
7185 if (val1 < val2) res = 1;
7189 _ASSERTE(op == CO_LT_UN);
7190 // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
7191 if (_isnan(val1) || _isnan(val2)) res = 1;
7192 else if (val1 < val2) res = 1;
7197 VerificationError("Binary comparison operation: type mismatch.");
7202 case CORINFO_TYPE_BYREF:
7203 if (cit2 == CORINFO_TYPE_BYREF || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
7205 NativeInt val1 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op1idx));
7207 if (cit2 == CORINFO_TYPE_BYREF)
7209 val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
7213 _ASSERTE(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT);
7214 val2 = OpStackGet<NativeInt>(op2idx);
7218 if (val1 == val2) res = 1;
7220 else if (op == CO_GT)
7222 if (val1 > val2) res = 1;
7224 else if (op == CO_GT_UN)
7226 if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
7228 else if (op == CO_LT)
7230 if (val1 < val2) res = 1;
7234 _ASSERTE(op == CO_LT_UN);
7235 if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
7240 VerificationError("Binary comparison operation: type mismatch.");
7244 case CORINFO_TYPE_VALUECLASS:
7246 CorInfoType newCit1 = GetTypeForPrimitiveValueClass(t1.ToClassHandle());
7247 if (newCit1 == CORINFO_TYPE_UNDEF)
7249 VerificationError("Can't compare a value class.");
7253 NYI_INTERP("Must eliminate 'punning' value classes from the ostack.");
7259 _ASSERTE(false); // Should not be here if the type is stack-normal.
7265 template<bool val, int targetLen>
7266 void Interpreter::BrOnValue()
7268 _ASSERTE(targetLen == 1 || targetLen == 4);
7269 _ASSERTE(m_curStackHt > 0);
7270 unsigned stackInd = m_curStackHt - 1;
7271 InterpreterType it = OpStackTypeGet(stackInd);
7273 // It shouldn't be a value class, unless it's a punning name for a primitive integral type.
7274 if (it.ToCorInfoType() == CORINFO_TYPE_VALUECLASS)
7277 CorInfoType cit = m_interpCeeInfo.getTypeForPrimitiveValueClass(it.ToClassHandle());
7278 if (CorInfoTypeIsIntegral(cit))
7280 it = InterpreterType(cit);
7284 VerificationError("Can't branch on the value of a value type that is not a primitive type.");
7289 switch (it.ToCorInfoType())
7291 case CORINFO_TYPE_FLOAT:
7292 case CORINFO_TYPE_DOUBLE:
7293 VerificationError("Can't branch on the value of a float or double.");
7300 switch (it.SizeNotStruct())
7304 INT32 branchVal = OpStackGet<INT32>(stackInd);
7305 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7310 INT64 branchVal = OpStackGet<INT64>(stackInd);
7311 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7315 // The value-class case handled above makes sizes 1 and 2 possible.
7318 INT8 branchVal = OpStackGet<INT8>(stackInd);
7319 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7324 INT16 branchVal = OpStackGet<INT16>(stackInd);
7325 BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
7332 m_curStackHt = stackInd;
7335 // compOp is a member of the BranchComparisonOp enumeration.
7336 template<int compOp, bool reverse, int targetLen>
7337 void Interpreter::BrOnComparison()
7345 _ASSERTE(targetLen == 1 || targetLen == 4);
7346 _ASSERTE(m_curStackHt >= 2);
7347 unsigned v1Ind = m_curStackHt - 2;
7349 INT32 res = CompareOpRes<compOp>(v1Ind);
7352 res = (res == 0) ? 1 : 0;
7360 // BYTE is unsigned...
7361 offset = getI1(m_ILCodePtr + 1);
7365 offset = getI4LittleEndian(m_ILCodePtr + 1);
7367 // 1 is the size of the current instruction; offset is relative to start of next.
7370 // Backwards branch; enable caching.
7371 BackwardsBranchActions(offset);
7373 ExecuteBranch(m_ILCodePtr + 1 + targetLen + offset);
7377 m_ILCodePtr += targetLen + 1;
7382 void Interpreter::LdFld(FieldDesc* fldIn)
7390 BarrierIfVolatile();
7392 FieldDesc* fld = fldIn;
7393 CORINFO_CLASS_HANDLE valClsHnd = NULL;
7397 unsigned ilOffset = CurOffset();
7398 if (fld == NULL && s_InterpreterUseCaching)
7401 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFld]);
7402 #endif // INTERP_TRACING
7403 fld = GetCachedInstanceField(ilOffset);
7407 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7408 fld = FindField(tok InterpTracingArg(RTK_LdFld));
7409 _ASSERTE(fld != NULL);
7411 fldOffset = fld->GetOffset();
7412 if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
7413 CacheInstanceField(ilOffset, fld);
7417 fldOffset = fld->GetOffset();
7420 CorInfoType valCit = CEEInfo::asCorInfoType(fld->GetFieldType());
7422 // If "fldIn" is non-NULL, it's not a "real" LdFld -- the caller should handle updating the instruction pointer.
7424 m_ILCodePtr += 5; // Last use above, so update now.
7426 // We need to construct the interpreter type for a struct type before we try to do coordinated
7427 // pushes of the value and type on the opstacks -- these must be atomic wrt GC, and constructing
7428 // a struct InterpreterType transitions to preemptive mode.
7429 InterpreterType structValIT;
7430 if (valCit == CORINFO_TYPE_VALUECLASS)
7433 valCit = m_interpCeeInfo.getFieldType(CORINFO_FIELD_HANDLE(fld), &valClsHnd, nullptr);
7434 structValIT = InterpreterType(&m_interpCeeInfo, valClsHnd);
7437 UINT sz = fld->GetSize();
7439 // Live vars: valCit, structValIt
7440 _ASSERTE(m_curStackHt > 0);
7441 unsigned stackInd = m_curStackHt - 1;
7442 InterpreterType addrIt = OpStackTypeGet(stackInd);
7443 CorInfoType addrCit = addrIt.ToCorInfoType();
7446 if (addrCit == CORINFO_TYPE_CLASS)
7448 OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(stackInd));
7449 ThrowOnInvalidPointer(OBJECTREFToObject(obj));
7450 if (valCit == CORINFO_TYPE_VALUECLASS)
7452 void* srcPtr = fld->GetInstanceAddress(obj);
7454 // srcPtr is now vulnerable.
7457 MethodTable* valClsMT = GetMethodTableFromClsHnd(valClsHnd);
7458 if (sz > sizeof(INT64))
7460 // Large struct case: allocate space on the large struct operand stack.
7461 void* destPtr = LargeStructOperandStackPush(sz);
7462 OpStackSet<void*>(stackInd, destPtr);
7463 CopyValueClass(destPtr, srcPtr, valClsMT);
7467 // Small struct case -- is inline in operand stack.
7468 OpStackSet<INT64>(stackInd, GetSmallStructValue(srcPtr, sz));
7473 BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
7474 // fldStart is now a vulnerable byref
7480 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7483 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(fldStart));
7487 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(fldStart));
7491 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7494 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(fldStart));
7498 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(fldStart));
7502 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(fldStart));
7505 OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(fldStart));
7508 _ASSERTE_MSG(false, "Should not reach here.");
7516 if (addrCit == CORINFO_TYPE_VALUECLASS)
7518 size_t addrSize = addrIt.Size(&m_interpCeeInfo);
7519 // The ECMA spec allows ldfld to be applied to "an instance of a value type."
7520 // We will take the address of the ostack entry.
7521 if (addrIt.IsLargeStruct(&m_interpCeeInfo))
7523 ptr = reinterpret_cast<INT8*>(OpStackGet<void*>(stackInd));
7524 // This is delicate. I'm going to pop the large struct off the large-struct stack
7525 // now, even though the field value we push may go back on the large object stack.
7526 // We rely on the fact that this instruction doesn't do any other pushing, and
7527 // we assume that LargeStructOperandStackPop does not actually deallocate any memory,
7528 // and we rely on memcpy properly handling possibly-overlapping regions being copied.
7529 // Finally (wow, this really *is* delicate), we rely on the property that the large-struct
7530 // stack pop operation doesn't deallocate memory (the size of the allocated memory for the
7531 // large-struct stack only grows in a method execution), and that if we push the field value
7532 // on the large struct stack below, the size of the pushed item is at most the size of the
7533 // popped item, so the stack won't grow (which would allow a dealloc/realloc).
7534 // (All in all, maybe it would be better to just copy the value elsewhere then pop...but
7535 // that wouldn't be very aggressive.)
7536 LargeStructOperandStackPop(addrSize, ptr);
7540 ptr = reinterpret_cast<INT8*>(OpStackGetAddr(stackInd, addrSize));
7545 _ASSERTE(CorInfoTypeIsPointer(addrCit));
7546 ptr = OpStackGet<INT8*>(stackInd);
7547 ThrowOnInvalidPointer(ptr);
7550 _ASSERTE(ptr != NULL);
7553 if (valCit == CORINFO_TYPE_VALUECLASS)
7555 if (sz > sizeof(INT64))
7557 // Large struct case.
7558 void* dstPtr = LargeStructOperandStackPush(sz);
7559 memcpy(dstPtr, ptr, sz);
7560 OpStackSet<void*>(stackInd, dstPtr);
7564 // Small struct case -- is inline in operand stack.
7565 OpStackSet<INT64>(stackInd, GetSmallStructValue(ptr, sz));
7567 OpStackTypeSet(stackInd, structValIT.StackNormalize());
7574 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7577 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(ptr));
7581 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(ptr));
7585 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7588 OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(ptr));
7592 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(ptr));
7596 OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(ptr));
7599 OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(ptr));
7603 if (valCit == CORINFO_TYPE_VALUECLASS)
7605 OpStackTypeSet(stackInd, structValIT.StackNormalize());
7609 OpStackTypeSet(stackInd, InterpreterType(valCit).StackNormalize());
7613 void Interpreter::LdFldA()
7621 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7624 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFldA]);
7625 #endif // INTERP_TRACING
7627 unsigned offset = CurOffset();
7628 m_ILCodePtr += 5; // Last use above, so update now.
7630 FieldDesc* fld = NULL;
7631 if (s_InterpreterUseCaching) fld = GetCachedInstanceField(offset);
7635 fld = FindField(tok InterpTracingArg(RTK_LdFldA));
7636 if (s_InterpreterUseCaching) CacheInstanceField(offset, fld);
7638 _ASSERTE(m_curStackHt > 0);
7639 unsigned stackInd = m_curStackHt - 1;
7640 CorInfoType addrCit = OpStackTypeGet(stackInd).ToCorInfoType();
7641 if (addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_CLASS || addrCit == CORINFO_TYPE_NATIVEINT)
7643 NativeInt ptr = OpStackGet<NativeInt>(stackInd);
7644 ThrowOnInvalidPointer((void*)ptr);
7645 // The "offset" below does not include the Object (i.e., the MethodTable pointer) for object pointers, so add that in first.
7646 if (addrCit == CORINFO_TYPE_CLASS) ptr += sizeof(Object);
7647 // Now add the offset.
7648 ptr += fld->GetOffset();
7649 OpStackSet<NativeInt>(stackInd, ptr);
7650 if (addrCit == CORINFO_TYPE_NATIVEINT)
7652 OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
7656 OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_BYREF));
7661 VerificationError("LdfldA requires object reference, managed or unmanaged pointer type.");
7665 void Interpreter::StFld()
7674 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StFld]);
7675 #endif // INTERP_TRACING
7677 FieldDesc* fld = NULL;
7680 unsigned ilOffset = CurOffset();
7681 if (s_InterpreterUseCaching) fld = GetCachedInstanceField(ilOffset);
7684 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7686 fld = FindField(tok InterpTracingArg(RTK_StFld));
7687 _ASSERTE(fld != NULL);
7688 fldOffset = fld->GetOffset();
7689 if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
7690 CacheInstanceField(ilOffset, fld);
7694 fldOffset = fld->GetOffset();
7697 m_ILCodePtr += 5; // Last use above, so update now.
7699 UINT sz = fld->GetSize();
7700 _ASSERTE(m_curStackHt >= 2);
7701 unsigned addrInd = m_curStackHt - 2;
7702 CorInfoType addrCit = OpStackTypeGet(addrInd).ToCorInfoType();
7703 unsigned valInd = m_curStackHt - 1;
7704 CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
7705 _ASSERTE(IsStackNormalType(addrCit) && IsStackNormalType(valCit));
7709 if (addrCit == CORINFO_TYPE_CLASS)
7711 OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(addrInd));
7712 ThrowOnInvalidPointer(OBJECTREFToObject(obj));
7714 if (valCit == CORINFO_TYPE_CLASS)
7716 fld->SetRefValue(obj, ObjectToOBJECTREF(OpStackGet<Object*>(valInd)));
7718 else if (valCit == CORINFO_TYPE_VALUECLASS)
7720 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
7721 void* destPtr = fld->GetInstanceAddress(obj);
7723 // destPtr is now a vulnerable byref, so can't do GC.
7726 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
7727 // the value class contains GC pointers. We could do better...
7728 if (sz > sizeof(INT64))
7730 // Large struct case: stack slot contains pointer...
7731 void* srcPtr = OpStackGet<void*>(valInd);
7732 CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
7733 LargeStructOperandStackPop(sz, srcPtr);
7737 // Small struct case -- is inline in operand stack.
7738 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
7740 BarrierIfVolatile();
7745 BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
7746 // fldStart is now a vulnerable byref
7752 *reinterpret_cast<INT8*>(fldStart) = OpStackGet<INT8>(valInd);
7755 *reinterpret_cast<INT16*>(fldStart) = OpStackGet<INT16>(valInd);
7758 *reinterpret_cast<INT32*>(fldStart) = OpStackGet<INT32>(valInd);
7761 *reinterpret_cast<INT64*>(fldStart) = OpStackGet<INT64>(valInd);
7768 _ASSERTE(addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_NATIVEINT);
7770 INT8* destPtr = OpStackGet<INT8*>(addrInd);
7771 ThrowOnInvalidPointer(destPtr);
7772 destPtr += fldOffset;
7774 if (valCit == CORINFO_TYPE_VALUECLASS)
7776 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
7777 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
7778 // the value class contains GC pointers. We could do better...
7779 if (sz > sizeof(INT64))
7781 // Large struct case: stack slot contains pointer...
7782 void* srcPtr = OpStackGet<void*>(valInd);
7783 CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
7784 LargeStructOperandStackPop(sz, srcPtr);
7788 // Small struct case -- is inline in operand stack.
7789 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
7791 BarrierIfVolatile();
7794 else if (valCit == CORINFO_TYPE_CLASS)
7796 OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
7797 SetObjectReference(reinterpret_cast<OBJECTREF*>(destPtr), val);
7804 *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
7807 *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
7810 *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
7813 *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
7818 BarrierIfVolatile();
7821 bool Interpreter::StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, /*out (byref)*/void** pStaticFieldAddr, /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
7829 bool isCacheable = true;
7830 *pManagedMem = true; // Default result.
7832 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
7833 m_ILCodePtr += 5; // Above is last use of m_ILCodePtr in this method, so update now.
7836 CORINFO_FIELD_INFO fldInfo;
7837 CORINFO_RESOLVED_TOKEN fldTok;
7839 void* pFldAddr = NULL;
7844 ResolveToken(&fldTok, tok, CORINFO_TOKENKIND_Field InterpTracingArg(RTK_SFldAddr));
7845 fld = reinterpret_cast<FieldDesc*>(fldTok.hField);
7847 m_interpCeeInfo.getFieldInfo(&fldTok, m_methInfo->m_method, accessFlgs, &fldInfo);
7850 EnsureClassInit(GetMethodTableFromClsHnd(fldTok.hClass));
7852 if ((fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS) || (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS_MANAGED))
7854 NYI_INTERP("Thread-local static.");
7856 else if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER
7857 || fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER)
7859 *pStaticFieldAddr = fld->GetCurrentStaticAddress();
7860 isCacheable = false;
7864 *pStaticFieldAddr = fld->GetCurrentStaticAddress();
7867 if (fldInfo.structType != NULL && fldInfo.fieldType != CORINFO_TYPE_CLASS && fldInfo.fieldType != CORINFO_TYPE_PTR)
7869 *pit = InterpreterType(&m_interpCeeInfo, fldInfo.structType);
7871 if ((fldInfo.fieldFlags & CORINFO_FLG_FIELD_UNMANAGED) == 0)
7873 // For valuetypes in managed memory, the address returned contains a pointer into the heap, to a boxed version of the
7874 // static variable; return a pointer to the boxed struct.
7875 isCacheable = false;
7879 *pManagedMem = false;
7884 *pit = InterpreterType(fldInfo.fieldType);
7886 *pFldSize = fld->GetSize();
7891 void Interpreter::LdSFld()
7899 InterpreterType fldIt;
7902 void* srcPtr = NULL;
7904 BarrierIfVolatile();
7906 GCPROTECT_BEGININTERIOR(srcPtr);
7908 StaticFldAddr(CORINFO_ACCESS_GET, &srcPtr, &fldIt, &sz, &managedMem);
7912 if (fldIt.IsStruct())
7914 // Large struct case.
7915 CORINFO_CLASS_HANDLE sh = fldIt.ToClassHandle();
7916 // This call is GC_TRIGGERS, so do it before we copy the value: no GC after this,
7917 // until the op stacks and ht are consistent.
7918 OpStackTypeSet(m_curStackHt, InterpreterType(&m_interpCeeInfo, sh).StackNormalize());
7919 if (fldIt.IsLargeStruct(&m_interpCeeInfo))
7921 void* dstPtr = LargeStructOperandStackPush(sz);
7922 memcpy(dstPtr, srcPtr, sz);
7923 OpStackSet<void*>(m_curStackHt, dstPtr);
7927 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(srcPtr, sz));
7932 CorInfoType valCit = fldIt.ToCorInfoType();
7936 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7939 OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT8*>(srcPtr));
7943 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT8*>(srcPtr));
7947 isUnsigned = CorInfoTypeIsUnsigned(valCit);
7950 OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT16*>(srcPtr));
7954 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT16*>(srcPtr));
7958 OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT32*>(srcPtr));
7961 OpStackSet<INT64>(m_curStackHt, *reinterpret_cast<INT64*>(srcPtr));
7964 _ASSERTE_MSG(false, "LdSFld: this should have exhausted all the possible sizes.");
7967 OpStackTypeSet(m_curStackHt, fldIt.StackNormalize());
7973 void Interpreter::EnsureClassInit(MethodTable* pMT)
7975 if (!pMT->IsClassInited())
7977 pMT->CheckRestore();
7978 // This is tantamount to a call, so exempt it from the cycle count.
7979 #if INTERP_ILCYCLE_PROFILE
7980 unsigned __int64 startCycles;
7981 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
7982 #endif // INTERP_ILCYCLE_PROFILE
7984 pMT->CheckRunClassInitThrowing();
7986 #if INTERP_ILCYCLE_PROFILE
7987 unsigned __int64 endCycles;
7988 b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
7989 m_exemptCycles += (endCycles - startCycles);
7990 #endif // INTERP_ILCYCLE_PROFILE
7994 void Interpreter::LdSFldA()
8002 InterpreterType fldIt;
8005 void* srcPtr = NULL;
8006 GCPROTECT_BEGININTERIOR(srcPtr);
8008 StaticFldAddr(CORINFO_ACCESS_ADDRESS, &srcPtr, &fldIt, &fldSz, &managedMem);
8010 OpStackSet<void*>(m_curStackHt, srcPtr);
8013 // Static variable in managed memory...
8014 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
8018 // RVA is in unmanaged memory.
8019 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
8026 void Interpreter::StSFld()
8033 InterpreterType fldIt;
8036 void* dstPtr = NULL;
8037 GCPROTECT_BEGININTERIOR(dstPtr);
8039 StaticFldAddr(CORINFO_ACCESS_SET, &dstPtr, &fldIt, &sz, &managedMem);
8042 InterpreterType valIt = OpStackTypeGet(m_curStackHt);
8043 CorInfoType valCit = valIt.ToCorInfoType();
8045 if (valCit == CORINFO_TYPE_VALUECLASS)
8047 MethodTable* valClsMT = GetMethodTableFromClsHnd(valIt.ToClassHandle());
8048 if (sz > sizeof(INT64))
8050 // Large struct case: value in operand stack is indirect pointer.
8051 void* srcPtr = OpStackGet<void*>(m_curStackHt);
8052 CopyValueClassUnchecked(dstPtr, srcPtr, valClsMT);
8053 LargeStructOperandStackPop(sz, srcPtr);
8057 // Struct value is inline in the operand stack.
8058 CopyValueClassUnchecked(dstPtr, OpStackGetAddr(m_curStackHt, sz), valClsMT);
8061 else if (valCit == CORINFO_TYPE_CLASS)
8063 SetObjectReference(reinterpret_cast<OBJECTREF*>(dstPtr), ObjectToOBJECTREF(OpStackGet<Object*>(m_curStackHt)));
8070 *reinterpret_cast<UINT8*>(dstPtr) = OpStackGet<UINT8>(m_curStackHt);
8073 *reinterpret_cast<UINT16*>(dstPtr) = OpStackGet<UINT16>(m_curStackHt);
8076 *reinterpret_cast<UINT32*>(dstPtr) = OpStackGet<UINT32>(m_curStackHt);
8079 *reinterpret_cast<UINT64*>(dstPtr) = OpStackGet<UINT64>(m_curStackHt);
8082 _ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
8088 BarrierIfVolatile();
8091 template<typename T, bool IsObjType, CorInfoType cit>
8092 void Interpreter::LdElemWithType()
8100 _ASSERTE(m_curStackHt >= 2);
8101 unsigned arrInd = m_curStackHt - 2;
8102 unsigned indexInd = m_curStackHt - 1;
8104 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8106 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8107 ThrowOnInvalidPointer(a);
8108 int len = a->GetNumComponents();
8110 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8111 if (indexCit == CORINFO_TYPE_INT)
8113 int index = OpStackGet<INT32>(indexInd);
8114 if (index < 0 || index >= len) ThrowArrayBoundsException();
8120 OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
8121 OpStackSet<OBJECTREF>(arrInd, res);
8125 intptr_t res_ptr = reinterpret_cast<intptr_t>(reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements());
8126 if (cit == CORINFO_TYPE_INT)
8128 _ASSERTE(std::is_integral<T>::value);
8130 // Widen narrow types.
8135 ires = std::is_same<T, INT8>::value ?
8136 static_cast<int>(reinterpret_cast<INT8*>(res_ptr)[index]) :
8137 static_cast<int>(reinterpret_cast<UINT8*>(res_ptr)[index]);
8140 ires = std::is_same<T, INT16>::value ?
8141 static_cast<int>(reinterpret_cast<INT16*>(res_ptr)[index]) :
8142 static_cast<int>(reinterpret_cast<UINT16*>(res_ptr)[index]);
8145 ires = std::is_same<T, INT32>::value ?
8146 static_cast<int>(reinterpret_cast<INT32*>(res_ptr)[index]) :
8147 static_cast<int>(reinterpret_cast<UINT32*>(res_ptr)[index]);
8150 _ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
8154 OpStackSet<int>(arrInd, ires);
8158 OpStackSet<T>(arrInd, ((T*) res_ptr)[index]);
8164 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8165 NativeInt index = OpStackGet<NativeInt>(indexInd);
8166 if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
8172 OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
8173 OpStackSet<OBJECTREF>(arrInd, res);
8177 T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
8178 OpStackSet<T>(arrInd, res);
8182 OpStackTypeSet(arrInd, InterpreterType(cit));
8186 template<typename T, bool IsObjType>
8187 void Interpreter::StElemWithType()
8196 _ASSERTE(m_curStackHt >= 3);
8197 unsigned arrInd = m_curStackHt - 3;
8198 unsigned indexInd = m_curStackHt - 2;
8199 unsigned valInd = m_curStackHt - 1;
8201 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8203 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8204 ThrowOnInvalidPointer(a);
8205 int len = a->GetNumComponents();
8207 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8208 if (indexCit == CORINFO_TYPE_INT)
8210 int index = OpStackGet<INT32>(indexInd);
8211 if (index < 0 || index >= len) ThrowArrayBoundsException();
8218 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8219 gc.a = ObjectToOBJECTREF(a);
8220 GCPROTECT_BEGIN(gc);
8221 if (gc.val != NULL &&
8222 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8223 COMPlusThrow(kArrayTypeMismatchException);
8224 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8230 T val = OpStackGet<T>(valInd);
8231 reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
8236 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8237 NativeInt index = OpStackGet<NativeInt>(indexInd);
8238 if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
8245 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8246 gc.a = ObjectToOBJECTREF(a);
8247 GCPROTECT_BEGIN(gc);
8248 if (gc.val != NULL &&
8249 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8250 COMPlusThrow(kArrayTypeMismatchException);
8251 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8257 T val = OpStackGet<T>(valInd);
8258 reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
8265 template<bool takeAddress>
8266 void Interpreter::LdElem()
8274 _ASSERTE(m_curStackHt >= 2);
8275 unsigned arrInd = m_curStackHt - 2;
8276 unsigned indexInd = m_curStackHt - 1;
8278 unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
8281 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdElem]);
8282 #endif // INTERP_TRACING
8284 unsigned ilOffset = CurOffset();
8285 CORINFO_CLASS_HANDLE clsHnd = NULL;
8286 if (s_InterpreterUseCaching) clsHnd = GetCachedClassHandle(ilOffset);
8291 CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
8294 ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdElem));
8295 clsHnd = elemTypeResolvedTok.hClass;
8297 if (s_InterpreterUseCaching) CacheClassHandle(ilOffset, clsHnd);
8300 CorInfoType elemCit = ::asCorInfoType(clsHnd);
8305 InterpreterType elemIt;
8306 if (elemCit == CORINFO_TYPE_VALUECLASS)
8308 elemIt = InterpreterType(&m_interpCeeInfo, clsHnd);
8312 elemIt = InterpreterType(elemCit);
8315 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8318 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8319 ThrowOnInvalidPointer(a);
8320 int len = a->GetNumComponents();
8326 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8327 if (indexCit == CORINFO_TYPE_INT)
8329 index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
8333 _ASSERTE(indexCit == CORINFO_TYPE_NATIVEINT);
8334 index = OpStackGet<NativeInt>(indexInd);
8337 if (index < 0 || index >= len) ThrowArrayBoundsException();
8339 bool throwTypeMismatch = NULL;
8341 void* elemPtr = a->GetDataPtr() + a->GetComponentSize() * index;
8342 // elemPtr is now a vulnerable byref.
8347 // If the element type is a class type, may have to do a type check.
8348 if (elemCit == CORINFO_TYPE_CLASS)
8350 // Unless there was a readonly prefix, which removes the need to
8351 // do the (dynamic) type check.
8354 // Consume the readonly prefix, and don't do the type check below.
8355 m_readonlyFlag = false;
8359 PtrArray* pa = reinterpret_cast<PtrArray*>(a);
8360 // The element array type must be exactly the referent type of the managed
8361 // pointer we'll be creating.
8362 if (pa->GetArrayElementTypeHandle() != TypeHandle(clsHnd))
8364 throwTypeMismatch = true;
8368 if (!throwTypeMismatch)
8370 // If we're not going to throw the exception, we can take the address.
8371 OpStackSet<void*>(arrInd, elemPtr);
8372 OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_BYREF));
8379 LdFromMemAddr(elemPtr, elemIt);
8384 // If we're going to throw, we do the throw outside the GCX_FORBID region above, since it requires GC_TRIGGERS.
8385 if (throwTypeMismatch)
8387 COMPlusThrow(kArrayTypeMismatchException);
8391 void Interpreter::StElem()
8399 _ASSERTE(m_curStackHt >= 3);
8400 unsigned arrInd = m_curStackHt - 3;
8401 unsigned indexInd = m_curStackHt - 2;
8402 unsigned valInd = m_curStackHt - 1;
8404 CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
8407 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StElem]);
8408 #endif // INTERP_TRACING
8410 CORINFO_CLASS_HANDLE typeFromTok = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StElem));
8414 CorInfoType typeFromTokCit;
8417 typeFromTokCit = ::asCorInfoType(typeFromTok);
8422 InterpreterType typeFromTokIt;
8425 if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
8428 sz = getClassSize(typeFromTok);
8430 typeFromTokIt = InterpreterType(&m_interpCeeInfo, typeFromTok);
8435 sz = CorInfoTypeSize(typeFromTokCit);
8437 typeFromTokIt = InterpreterType(typeFromTokCit);
8442 // Instead of debug, I need to parameterize the interpreter at the top level over whether
8443 // to do checks corresponding to verification.
8444 if (typeFromTokIt.StackNormalize().ToCorInfoType() != valCit)
8446 // This is obviously only a partial test of the required condition.
8447 VerificationError("Value in stelem does not have the required type.");
8451 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
8453 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
8454 ThrowOnInvalidPointer(a);
8455 int len = a->GetNumComponents();
8457 CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
8458 NativeInt index = 0;
8459 if (indexCit == CORINFO_TYPE_INT)
8461 index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
8465 index = OpStackGet<NativeInt>(indexInd);
8468 if (index < 0 || index >= len) ThrowArrayBoundsException();
8470 if (typeFromTokCit == CORINFO_TYPE_CLASS)
8476 gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
8477 gc.a = ObjectToOBJECTREF(a);
8478 GCPROTECT_BEGIN(gc);
8479 if (gc.val != NULL &&
8480 !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
8481 COMPlusThrow(kArrayTypeMismatchException);
8482 reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
8489 void* destPtr = a->GetDataPtr() + index * sz;;
8491 if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
8493 MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
8494 // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
8495 // the value class contains GC pointers. We could do better...
8496 if (sz > sizeof(UINT64))
8498 // Large struct case: stack slot contains pointer...
8499 void* src = OpStackGet<void*>(valInd);
8500 CopyValueClassUnchecked(destPtr, src, valClsMT);
8501 LargeStructOperandStackPop(sz, src);
8505 // Small struct case -- is inline in operand stack.
8506 CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
8514 *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
8517 *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
8520 *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
8523 *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
8532 void Interpreter::InitBlk()
8540 _ASSERTE(m_curStackHt >= 3);
8541 unsigned addrInd = m_curStackHt - 3;
8542 unsigned valInd = m_curStackHt - 2;
8543 unsigned sizeInd = m_curStackHt - 1;
8546 CorInfoType addrCIT = OpStackTypeGet(addrInd).ToCorInfoType();
8547 bool addrValidType = (addrCIT == CORINFO_TYPE_NATIVEINT || addrCIT == CORINFO_TYPE_BYREF);
8548 #if defined(HOST_AMD64)
8549 if (s_InterpreterLooseRules && addrCIT == CORINFO_TYPE_LONG)
8550 addrValidType = true;
8553 VerificationError("Addr of InitBlk must be native int or &.");
8555 CorInfoType valCIT = OpStackTypeGet(valInd).ToCorInfoType();
8556 if (valCIT != CORINFO_TYPE_INT)
8557 VerificationError("Value of InitBlk must be int");
8561 CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
8562 bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
8565 if (sizeCIT != CORINFO_TYPE_INT && !isLong)
8566 VerificationError("Size of InitBlk must be int");
8569 void* addr = OpStackGet<void*>(addrInd);
8570 ThrowOnInvalidPointer(addr);
8571 GCX_FORBID(); // addr is a potentially vulnerable byref.
8572 INT8 val = OpStackGet<INT8>(valInd);
8573 size_t size = (size_t) ((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
8574 memset(addr, val, size);
8576 m_curStackHt = addrInd;
8579 BarrierIfVolatile();
8582 void Interpreter::CpBlk()
8590 _ASSERTE(m_curStackHt >= 3);
8591 unsigned destInd = m_curStackHt - 3;
8592 unsigned srcInd = m_curStackHt - 2;
8593 unsigned sizeInd = m_curStackHt - 1;
8596 CorInfoType destCIT = OpStackTypeGet(destInd).ToCorInfoType();
8597 bool destValidType = (destCIT == CORINFO_TYPE_NATIVEINT || destCIT == CORINFO_TYPE_BYREF);
8598 #if defined(HOST_AMD64)
8599 if (s_InterpreterLooseRules && destCIT == CORINFO_TYPE_LONG)
8600 destValidType = true;
8604 VerificationError("Dest addr of CpBlk must be native int or &.");
8606 CorInfoType srcCIT = OpStackTypeGet(srcInd).ToCorInfoType();
8607 bool srcValidType = (srcCIT == CORINFO_TYPE_NATIVEINT || srcCIT == CORINFO_TYPE_BYREF);
8608 #if defined(HOST_AMD64)
8609 if (s_InterpreterLooseRules && srcCIT == CORINFO_TYPE_LONG)
8610 srcValidType = true;
8613 VerificationError("Src addr of CpBlk must be native int or &.");
8616 CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
8617 bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
8620 if (sizeCIT != CORINFO_TYPE_INT && !isLong)
8621 VerificationError("Size of CpBlk must be int");
8625 void* destAddr = OpStackGet<void*>(destInd);
8626 void* srcAddr = OpStackGet<void*>(srcInd);
8627 ThrowOnInvalidPointer(destAddr);
8628 ThrowOnInvalidPointer(srcAddr);
8629 GCX_FORBID(); // destAddr & srcAddr are potentially vulnerable byrefs.
8630 size_t size = (size_t)((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
8631 memcpyNoGCRefs(destAddr, srcAddr, size);
8633 m_curStackHt = destInd;
8636 BarrierIfVolatile();
8639 void Interpreter::Box()
8647 _ASSERTE(m_curStackHt >= 1);
8648 unsigned ind = m_curStackHt - 1;
8650 DWORD boxTypeAttribs = 0;
8653 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Box]);
8654 #endif // INTERP_TRACING
8656 CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Box));
8660 boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
8665 if (boxTypeAttribs & CORINFO_FLG_VALUECLASS)
8667 InterpreterType valIt = OpStackTypeGet(ind);
8670 if (valIt.IsLargeStruct(&m_interpCeeInfo))
8672 // Operand stack entry is pointer to the data.
8673 valPtr = OpStackGet<void*>(ind);
8677 // Operand stack entry *is* the data.
8678 size_t classSize = getClassSize(boxTypeClsHnd);
8679 valPtr = OpStackGetAddr(ind, classSize);
8682 TypeHandle th(boxTypeClsHnd);
8683 if (th.IsTypeDesc())
8685 COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
8688 MethodTable* pMT = th.AsMethodTable();
8691 Object* res = OBJECTREFToObject(pMT->Box(valPtr));
8695 // If we're popping a large struct off the operand stack, make sure we clean up.
8696 if (valIt.IsLargeStruct(&m_interpCeeInfo))
8698 LargeStructOperandStackPop(valIt.Size(&m_interpCeeInfo), valPtr);
8700 OpStackSet<Object*>(ind, res);
8701 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
8706 void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
8714 _ASSERTE_MSG(ind < m_curStackHt, "Precondition");
8717 _ASSERTE_MSG(m_interpCeeInfo.getClassAttribs(valCls) & CORINFO_FLG_VALUECLASS, "Precondition");
8719 _ASSERTE_MSG(OpStackTypeGet(ind).ToCorInfoType() == CORINFO_TYPE_BYREF, "Precondition");
8721 InterpreterType valIt = InterpreterType(&m_interpCeeInfo, valCls);
8723 void* valPtr = OpStackGet<void*>(ind);
8725 TypeHandle th(valCls);
8726 if (th.IsTypeDesc())
8727 COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
8729 MethodTable* pMT = th.AsMethodTable();
8732 Object* res = OBJECTREFToObject(pMT->Box(valPtr));
8736 OpStackSet<Object*>(ind, res);
8737 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
8742 void Interpreter::Unbox()
8750 _ASSERTE(m_curStackHt > 0);
8751 unsigned tos = m_curStackHt - 1;
8754 CorInfoType tosCIT = OpStackTypeGet(tos).ToCorInfoType();
8755 if (tosCIT != CORINFO_TYPE_CLASS)
8756 VerificationError("Unbox requires that TOS is an object pointer.");
8760 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Unbox]);
8761 #endif // INTERP_TRACING
8763 CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Unbox));
8765 CorInfoHelpFunc unboxHelper;
8769 unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
8773 Object* obj = OpStackGet<Object*>(tos);
8775 switch (unboxHelper)
8777 case CORINFO_HELP_UNBOX:
8779 ThrowOnInvalidPointer(obj);
8781 MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
8782 MethodTable* pMT2 = obj->GetMethodTable();
8784 if (pMT1->IsEquivalentTo(pMT2))
8786 res = OpStackGet<Object*>(tos)->UnBox();
8790 CorElementType type1 = pMT1->GetInternalCorElementType();
8791 CorElementType type2 = pMT2->GetInternalCorElementType();
8793 // we allow enums and their primitive type to be interchangeable
8796 if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
8797 (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
8799 res = OpStackGet<Object*>(tos)->UnBox();
8806 COMPlusThrow(kInvalidCastException);
8811 case CORINFO_HELP_UNBOX_NULLABLE:
8813 // For "unbox Nullable<T>", we need to create a new object (maybe in some temporary local
8814 // space (that we reuse every time we hit this IL instruction?), that gets reported to the GC,
8815 // maybe in the GC heap itself). That object will contain an embedded Nullable<T>. Then, we need to
8816 // get a byref to the data within the object.
8818 NYI_INTERP("Unhandled 'unbox' of Nullable<T>.");
8823 NYI_INTERP("Unhandled 'unbox' helper.");
8828 OpStackSet<void*>(tos, res);
8829 OpStackTypeSet(tos, InterpreterType(CORINFO_TYPE_BYREF));
8836 void Interpreter::Throw()
8844 _ASSERTE(m_curStackHt >= 1);
8846 // Note that we can't decrement the stack height here, since the operand stack
8847 // protects the thrown object. Nor do we need to, since the ostack will be cleared on
8848 // any catch within this method.
8849 unsigned exInd = m_curStackHt - 1;
8852 CorInfoType exCIT = OpStackTypeGet(exInd).ToCorInfoType();
8853 if (exCIT != CORINFO_TYPE_CLASS)
8855 VerificationError("Can only throw an object.");
8859 Object* obj = OpStackGet<Object*>(exInd);
8860 ThrowOnInvalidPointer(obj);
8862 OBJECTREF oref = ObjectToOBJECTREF(obj);
8863 if (!IsException(oref->GetMethodTable()))
8865 GCPROTECT_BEGIN(oref);
8866 WrapNonCompliantException(&oref);
8872 void Interpreter::Rethrow()
8880 OBJECTREF throwable = GetThread()->LastThrownObject();
8881 COMPlusThrow(throwable);
8884 void Interpreter::UnboxAny()
8892 _ASSERTE(m_curStackHt > 0);
8893 unsigned tos = m_curStackHt - 1;
8895 unsigned boxTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
8899 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_UnboxAny]);
8900 #endif // INTERP_TRACING
8902 CORINFO_RESOLVED_TOKEN boxTypeResolvedTok;
8903 CORINFO_CLASS_HANDLE boxTypeClsHnd;
8904 DWORD boxTypeAttribs = 0;
8908 ResolveToken(&boxTypeResolvedTok, boxTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_UnboxAny));
8909 boxTypeClsHnd = boxTypeResolvedTok.hClass;
8910 boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
8913 CorInfoType unboxCIT = OpStackTypeGet(tos).ToCorInfoType();
8914 if (unboxCIT != CORINFO_TYPE_CLASS)
8915 VerificationError("Type mismatch in UNBOXANY.");
8917 if ((boxTypeAttribs & CORINFO_FLG_VALUECLASS) == 0)
8919 Object* obj = OpStackGet<Object*>(tos);
8920 if (obj != NULL && !ObjIsInstanceOf(obj, TypeHandle(boxTypeClsHnd), TRUE))
8922 UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
8927 CorInfoHelpFunc unboxHelper;
8931 unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
8934 // Important that this *not* be factored out with the identical statement in the "if" branch:
8935 // delay read from GC-protected operand stack until after COOP-->PREEMP transition above.
8936 Object* obj = OpStackGet<Object*>(tos);
8938 switch (unboxHelper)
8940 case CORINFO_HELP_UNBOX:
8942 ThrowOnInvalidPointer(obj);
8944 MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
8945 MethodTable* pMT2 = obj->GetMethodTable();
8948 if (pMT1->IsEquivalentTo(pMT2))
8950 res = OpStackGet<Object*>(tos)->UnBox();
8954 if (pMT1->GetInternalCorElementType() == pMT2->GetInternalCorElementType() &&
8955 (pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
8956 (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
8958 res = OpStackGet<Object*>(tos)->UnBox();
8964 COMPlusThrow(kInvalidCastException);
8967 // As the ECMA spec says, the rest is like a "ldobj".
8968 LdObjValueClassWork(boxTypeClsHnd, tos, res);
8972 case CORINFO_HELP_UNBOX_NULLABLE:
8974 InterpreterType it = InterpreterType(&m_interpCeeInfo, boxTypeClsHnd);
8975 size_t sz = it.Size(&m_interpCeeInfo);
8976 if (sz > sizeof(INT64))
8978 void* destPtr = LargeStructOperandStackPush(sz);
8979 if (!Nullable::UnBox(destPtr, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
8981 COMPlusThrow(kInvalidCastException);
8983 OpStackSet<void*>(tos, destPtr);
8988 if (!Nullable::UnBox(&dest, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
8990 COMPlusThrow(kInvalidCastException);
8992 OpStackSet<INT64>(tos, dest);
8994 OpStackTypeSet(tos, it.StackNormalize());
8999 NYI_INTERP("Unhandled 'unbox.any' helper.");
9004 void Interpreter::LdLen()
9012 _ASSERTE(m_curStackHt >= 1);
9013 unsigned arrInd = m_curStackHt - 1;
9015 _ASSERTE(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
9019 ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
9020 ThrowOnInvalidPointer(a);
9021 int len = a->GetNumComponents();
9023 OpStackSet<NativeUInt>(arrInd, NativeUInt(len));
9024 // The ECMA spec says that the type of the length value is NATIVEUINT, but this
9025 // doesn't make any sense -- unsigned types are not stack-normalized. So I'm
9026 // using NATIVEINT, to get the width right.
9027 OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
9031 void Interpreter::DoCall(bool virtualCall)
9033 #if INTERP_DYNAMIC_CONTRACTS
9040 // Dynamic contract occupies too much stack.
9041 STATIC_CONTRACT_THROWS;
9042 STATIC_CONTRACT_GC_TRIGGERS;
9043 STATIC_CONTRACT_MODE_COOPERATIVE;
9047 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Call]);
9048 #endif // INTERP_TRACING
9050 DoCallWork(virtualCall);
9055 CORINFO_CONTEXT_HANDLE InterpreterMethodInfo::GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg)
9057 // If the caller has a generic argument, then we need to get the exact methodContext.
9058 // There are several possibilities that lead to a generic argument:
9059 // 1) Static method of generic class: generic argument is the method table of the class.
9060 // 2) generic method of a class: generic argument is the precise MethodDesc* of the method.
9061 if (GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
9063 _ASSERTE(GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>() || GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>());
9064 if (GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>())
9066 return MAKE_METHODCONTEXT(reinterpret_cast<CORINFO_METHOD_HANDLE>(genericsCtxtArg));
9070 MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
9071 MethodTable* contextClass = reinterpret_cast<MethodTable*>(genericsCtxtArg)->GetMethodTableMatchingParentClass(methodClass);
9072 return MAKE_CLASSCONTEXT(contextClass);
9075 // TODO: This condition isn't quite right. If the actual class is a subtype of the declaring type of the method,
9076 // then it might be in another module, the scope and context won't agree.
9077 else if (GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>()
9078 && !GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>()
9079 && GetFlag<InterpreterMethodInfo::Flag_hasThisArg>()
9080 && GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>() && thisArg != NULL)
9082 MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
9083 MethodTable* contextClass = thisArg->GetMethodTable()->GetMethodTableMatchingParentClass(methodClass);
9084 return MAKE_CLASSCONTEXT(contextClass);
9088 return MAKE_METHODCONTEXT(m_method);
9092 void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_TOKEN* methTokPtr, CORINFO_CALL_INFO* callInfoPtr)
9094 #if INTERP_DYNAMIC_CONTRACTS
9101 // Dynamic contract occupies too much stack.
9102 STATIC_CONTRACT_THROWS;
9103 STATIC_CONTRACT_GC_TRIGGERS;
9104 STATIC_CONTRACT_MODE_COOPERATIVE;
9107 #if INTERP_ILCYCLE_PROFILE
9110 unsigned __int64 callStartCycles;
9111 bool b = CycleTimer::GetThreadCyclesS(&callStartCycles); _ASSERTE(b);
9112 unsigned __int64 callStartExemptCycles = m_exemptCycles;
9114 #endif // INTERP_ILCYCLE_PROFILE
9117 InterlockedIncrement(&s_totalInterpCalls);
9118 #endif // INTERP_TRACING
9119 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
9121 // It's possible for an IL method to push a capital-F Frame. If so, we pop it and save it;
9122 // we'll push it back on after our GCPROTECT frame is popped.
9123 Frame* ilPushedFrame = NULL;
9125 // We can't protect "thisArg" with a GCPROTECT, because this pushes a Frame, and there
9126 // exist managed methods that push (and pop) Frames -- so that the Frame chain does not return
9127 // to its original state after a call. Therefore, we can't have a Frame on the stack over the duration
9128 // of a call. (I assume that any method that calls a Frame-pushing IL method performs a matching
9129 // call to pop that Frame before the caller method completes. If this were not true, if one method could push
9130 // a Frame, but defer the pop to its caller, then we could *never* use a Frame in the interpreter, and
9131 // our implementation plan would be doomed.)
9132 _ASSERTE(m_callThisArg == NULL);
9133 m_callThisArg = thisArg;
9135 // Have we already cached a MethodDescCallSite for this call? (We do this only in loops
9136 // in the current execution).
9137 unsigned iloffset = CurOffset();
9138 CallSiteCacheData* pCscd = NULL;
9139 if (s_InterpreterUseCaching) pCscd = GetCachedCallInfo(iloffset);
9141 // If this is true, then we should not cache this call site.
9144 CORINFO_RESOLVED_TOKEN methTok;
9145 CORINFO_CALL_INFO callInfo;
9146 MethodDesc* methToCall = NULL;
9147 CORINFO_CLASS_HANDLE exactClass = NULL;
9148 CORINFO_SIG_INFO_SMALL sigInfo;
9152 methToCall = pCscd->m_pMD;
9153 sigInfo = pCscd->m_sigInfo;
9155 doNotCache = true; // We already have a cache entry.
9159 doNotCache = false; // Until we determine otherwise.
9160 if (callInfoPtr == NULL)
9164 // callInfoPtr and methTokPtr must either both be NULL, or neither.
9165 _ASSERTE(methTokPtr == NULL);
9167 methTokPtr = &methTok;
9168 ResolveToken(methTokPtr, tok, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
9169 OPCODE opcode = (OPCODE)(*m_ILCodePtr);
9171 m_interpCeeInfo.getCallInfo(methTokPtr,
9172 m_constrainedFlag ? & m_constrainedResolvedToken : NULL,
9173 m_methInfo->m_method,
9174 //this is how impImportCall invokes getCallInfo
9175 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM,
9176 CORINFO_CALLINFO_SECURITYCHECKS),
9177 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
9178 : CORINFO_CALLINFO_NONE),
9180 #if INTERP_ILCYCLE_PROFILE
9184 unsigned __int64 callEndCycles;
9185 b = CycleTimer::GetThreadCyclesS(&callEndCycles); _ASSERTE(b);
9186 unsigned __int64 delta = (callEndCycles - callStartCycles);
9187 delta -= (m_exemptCycles - callStartExemptCycles);
9188 s_callCycles += delta;
9192 #endif // INTERP_ILCYCLE_PROFILE
9194 callInfoPtr = &callInfo;
9196 _ASSERTE(!callInfoPtr->exactContextNeedsRuntimeLookup);
9198 methToCall = reinterpret_cast<MethodDesc*>(methTok.hMethod);
9199 exactClass = methTok.hClass;
9203 // callInfoPtr and methTokPtr must either both be NULL, or neither.
9204 _ASSERTE(methTokPtr != NULL);
9206 _ASSERTE(!callInfoPtr->exactContextNeedsRuntimeLookup);
9208 methToCall = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
9209 exactClass = methTokPtr->hClass;
9212 // We used to take the sigInfo from the callInfo here, but that isn't precise, since
9213 // we may have made "methToCall" more precise wrt generics than the method handle in
9214 // the callinfo. So look up th emore precise signature.
9217 CORINFO_SIG_INFO sigInfoFull;
9218 m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull, nullptr);
9219 sigInfo.retTypeClass = sigInfoFull.retTypeClass;
9220 sigInfo.numArgs = sigInfoFull.numArgs;
9221 sigInfo.callConv = sigInfoFull.callConv;
9222 sigInfo.retType = sigInfoFull.retType;
9225 // Point A in our cycle count.
9228 // TODO: enable when NamedIntrinsic is available to interpreter
9231 // Is the method an intrinsic? If so, and if it's one we've written special-case code for
9232 // handle intrinsically.
9233 NamedIntrinsic intrinsicName;
9236 intrinsicName = getIntrinsicName(CORINFO_METHOD_HANDLE(methToCall), nullptr);
9240 if (intrinsicName == NI_Illegal)
9241 InterlockedIncrement(&s_totalInterpCallsToIntrinsics);
9242 #endif // INTERP_TRACING
9243 bool didIntrinsic = false;
9244 if (!m_constrainedFlag)
9246 switch (intrinsicId)
9249 case NI_System_StubHelpers_GetStubContext:
9250 OpStackSet<void*>(m_curStackHt, GetStubContext());
9251 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
9252 m_curStackHt++; didIntrinsic = true;
9254 #endif // INTERP_ILSTUBS
9257 InterlockedIncrement(&s_totalInterpCallsToIntrinsicsUnhandled);
9258 #endif // INTERP_TRACING
9262 // Plus some other calls that we're going to treat "like" intrinsics...
9263 if (methToCall == CoreLibBinder::GetMethod(METHOD__STUBHELPERS__SET_LAST_ERROR))
9265 // If we're interpreting a method that calls "SetLastError", it's very likely that the call(i) whose
9266 // error we're trying to capture was performed with MethodDescCallSite machinery that itself trashes
9267 // the last error. We solve this by saving the last error in a special interpreter-specific field of
9268 // "Thread" in that case, and essentially implement SetLastError here, taking that field as the
9269 // source for the last error.
9270 Thread* thrd = GetThread();
9271 thrd->m_dwLastError = thrd->m_dwLastErrorInterp;
9272 didIntrinsic = true;
9275 // TODO: The following check for hardware intrinsics is not a production-level
9276 // solution and may produce incorrect results.
9277 static ConfigDWORD s_InterpreterHWIntrinsicsIsSupportedFalse;
9278 if (s_InterpreterHWIntrinsicsIsSupportedFalse.val(CLRConfig::INTERNAL_InterpreterHWIntrinsicsIsSupportedFalse) != 0)
9282 // Hardware intrinsics are recognized by name.
9283 const char* namespaceName = NULL;
9284 const char* className = NULL;
9285 const char* methodName = m_interpCeeInfo.getMethodNameFromMetadata((CORINFO_METHOD_HANDLE)methToCall, &className, &namespaceName, NULL);
9287 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9288 strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0 &&
9289 #elif defined(TARGET_ARM64)
9290 strcmp(namespaceName, "System.Runtime.Intrinsics.Arm") == 0 &&
9291 #endif // defined(TARGET_X86) || defined(TARGET_AMD64)
9292 strcmp(methodName, "get_IsSupported") == 0
9297 didIntrinsic = true;
9302 // Check for the simd class...
9303 _ASSERTE(exactClass != NULL);
9305 bool isIntrinsicType = m_interpCeeInfo.isIntrinsicType(exactClass);
9307 if (isIntrinsicType)
9309 // SIMD intrinsics are recognized by name.
9310 const char* namespaceName = NULL;
9311 const char* className = NULL;
9312 const char* methodName = m_interpCeeInfo.getMethodNameFromMetadata((CORINFO_METHOD_HANDLE)methToCall, &className, &namespaceName, NULL);
9313 if ((strcmp(methodName, "get_IsHardwareAccelerated") == 0) && (strcmp(className, "Vector") == 0) && (strcmp(namespaceName, "System.Numerics") == 0))
9316 DoSIMDHwAccelerated();
9317 didIntrinsic = true;
9323 // Must block caching or we lose easy access to the class
9326 #endif // FEATURE_SIMD
9332 if (s_InterpreterUseCaching && !doNotCache)
9334 // Cache the token resolution result...
9335 pCscd = new CallSiteCacheData(methToCall, sigInfo);
9336 CacheCallInfo(iloffset, pCscd);
9338 // Now we can return.
9343 // Handle other simple special cases:
9345 #if FEATURE_INTERPRETER_DEADSIMPLE_OPT
9346 #ifndef DACCESS_COMPILE
9347 // Dead simple static getters.
9348 InterpreterMethodInfo* calleeInterpMethInfo;
9349 if (GetMethodHandleToInterpMethInfoPtrMap()->Lookup(CORINFO_METHOD_HANDLE(methToCall), &calleeInterpMethInfo))
9351 if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>())
9353 if (methToCall->IsStatic())
9359 ILOffsetToItemCache* calleeCache;
9361 Object* thisArg = OpStackGet<Object*>(m_curStackHt-1);
9363 // We pass NULL for the generic context arg, because a dead simple getter takes none, by definition.
9364 calleeCache = calleeInterpMethInfo->GetCacheForCall(thisArg, /*genericsContextArg*/NULL);
9366 // We've interpreted the getter at least once, so the cache for *some* generics context is populated -- but maybe not
9367 // this one. We're hoping that it usually is.
9368 if (calleeCache != NULL)
9370 CachedItem cachedItem;
9371 unsigned offsetOfLd;
9372 if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>())
9373 offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
9375 offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
9377 bool b = calleeCache->GetItem(offsetOfLd, cachedItem);
9378 _ASSERTE_MSG(b, "If the cache exists for this generic context, it should an entry for the LdFld.");
9379 _ASSERTE_MSG(cachedItem.m_tag == CIK_InstanceField, "If it's there, it should be an instance field cache.");
9380 LdFld(cachedItem.m_value.m_instanceField);
9382 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
9383 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGettersShortCircuited);
9384 #endif // INTERP_TRACING
9390 #endif // DACCESS_COMPILE
9391 #endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
9393 unsigned totalSigArgs;
9394 CORINFO_VARARGS_HANDLE vaSigCookie = nullptr;
9395 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
9396 (sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
9399 CORINFO_SIG_INFO sig;
9400 m_interpCeeInfo.findCallSiteSig(m_methInfo->m_module, methTokPtr->token, MAKE_METHODCONTEXT(m_methInfo->m_method), &sig);
9401 sigInfo.retTypeClass = sig.retTypeClass;
9402 sigInfo.numArgs = sig.numArgs;
9403 sigInfo.callConv = sig.callConv;
9404 sigInfo.retType = sig.retType;
9405 // Adding 'this' pointer because, numArgs doesn't include the this pointer.
9406 totalSigArgs = sigInfo.numArgs + sigInfo.hasThis();
9408 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
9410 Module* module = GetModule(sig.scope);
9411 vaSigCookie = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig.pSig, sig.cbSig)));
9417 totalSigArgs = sigInfo.totalILArgs();
9420 // Note that "totalNativeArgs()" includes space for ret buff arg.
9421 unsigned nSlots = totalSigArgs + 1;
9422 if (sigInfo.hasTypeArg()) nSlots++;
9423 if (sigInfo.isVarArg()) nSlots++;
9425 DelegateCtorArgs ctorData;
9426 // If any of these are non-null, they will be pushed as extra arguments (see the code below).
9427 ctorData.pArg3 = NULL;
9428 ctorData.pArg4 = NULL;
9429 ctorData.pArg5 = NULL;
9431 // Since we make "doNotCache" true below, well never have a non-null "pCscd" for a delegate
9432 // constructor. But we have to check for a cached method first, since callInfoPtr may be null in the cached case.
9433 if (pCscd == NULL && callInfoPtr->classFlags & CORINFO_FLG_DELEGATE && callInfoPtr->methodFlags & CORINFO_FLG_CONSTRUCTOR)
9435 // We won't cache this case.
9438 _ASSERTE_MSG(!sigInfo.hasTypeArg(), "I assume that this isn't possible.");
9441 ctorData.pMethod = methToCall;
9443 // Second argument to delegate constructor will be code address of the function the delegate wraps.
9444 _ASSERTE(TOSIsPtr() && OpStackTypeGet(m_curStackHt-1).ToCorInfoType() != CORINFO_TYPE_BYREF);
9445 CORINFO_METHOD_HANDLE targetMethodHnd = GetFunctionPointerStack()[m_curStackHt-1];
9446 _ASSERTE(targetMethodHnd != NULL);
9447 CORINFO_METHOD_HANDLE alternateCtorHnd = m_interpCeeInfo.GetDelegateCtor(reinterpret_cast<CORINFO_METHOD_HANDLE>(methToCall), methTokPtr->hClass, targetMethodHnd, &ctorData);
9448 MethodDesc* alternateCtor = reinterpret_cast<MethodDesc*>(alternateCtorHnd);
9449 if (alternateCtor != methToCall)
9451 methToCall = alternateCtor;
9453 // Translate the method address argument from a method handle to the actual callable code address.
9454 void* val = (void *)((MethodDesc *)targetMethodHnd)->GetMultiCallableAddrOfCode();
9455 // Change the method argument to the code pointer.
9456 OpStackSet<void*>(m_curStackHt-1, val);
9458 // Now if there are extra arguments, add them to the number of slots; we'll push them on the
9460 if (ctorData.pArg3) nSlots++;
9461 if (ctorData.pArg4) nSlots++;
9462 if (ctorData.pArg5) nSlots++;
9466 // Make sure that the operand stack has the required number of arguments.
9467 // (Note that this is IL args, not native.)
9470 // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
9471 // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
9472 // is provided by an allocation (the call is to a constructor).
9473 unsigned totalArgsOnILStack = totalSigArgs;
9474 if (m_callThisArg != NULL)
9476 _ASSERTE(totalArgsOnILStack > 0);
9477 totalArgsOnILStack--;
9480 #if defined(FEATURE_HFA)
9481 // Does the callee have an HFA return type?
9482 unsigned HFAReturnArgSlots = 0;
9486 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS
9487 && (m_interpCeeInfo.getHFAType(sigInfo.retTypeClass) != CORINFO_HFA_ELEM_NONE)
9488 && (sigInfo.getCallConv() & CORINFO_CALLCONV_VARARG) == 0)
9490 HFAReturnArgSlots = getClassSize(sigInfo.retTypeClass);
9491 // Round up to a multiple of double size.
9492 HFAReturnArgSlots = (HFAReturnArgSlots + sizeof(ARG_SLOT) - 1) / sizeof(ARG_SLOT);
9495 #elif defined(UNIX_AMD64_ABI) || defined(TARGET_RISCV64)
9496 unsigned HasTwoSlotBuf = sigInfo.retType == CORINFO_TYPE_VALUECLASS &&
9497 getClassSize(sigInfo.retTypeClass) == 16;
9502 const unsigned LOCAL_ARG_SLOTS = 8;
9503 ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
9504 InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
9507 InterpreterType* argTypes;
9508 #if defined(HOST_X86)
9509 unsigned totalArgSlots = nSlots;
9510 #elif defined(HOST_ARM) || defined(HOST_ARM64)
9511 // ARM64TODO: Verify that the following statement is correct for ARM64.
9512 unsigned totalArgSlots = nSlots + HFAReturnArgSlots;
9513 #elif defined(HOST_AMD64)
9514 unsigned totalArgSlots = nSlots;
9515 #elif defined(HOST_LOONGARCH64)
9516 unsigned totalArgSlots = nSlots;
9517 #elif defined(HOST_RISCV64)
9518 unsigned totalArgSlots = nSlots;
9520 #error "unsupported platform"
9523 if (totalArgSlots <= LOCAL_ARG_SLOTS)
9525 args = &localArgs[0];
9526 argTypes = &localArgTypes[0];
9530 args = (ARG_SLOT*)_alloca(totalArgSlots * sizeof(ARG_SLOT));
9531 #if defined(HOST_ARM)
9532 // The HFA return buffer, if any, is assumed to be at a negative
9533 // offset from the IL arg pointer, so adjust that pointer upward.
9534 args = args + HFAReturnArgSlots;
9535 #endif // defined(HOST_ARM)
9536 argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
9538 // Make sure that we don't scan any of these until we overwrite them with
9539 // the real types of the arguments.
9540 InterpreterType undefIt(CORINFO_TYPE_UNDEF);
9541 for (unsigned i = 0; i < nSlots; i++) argTypes[i] = undefIt;
9543 // GC-protect the argument array (as byrefs).
9544 m_args = args; m_argsSize = nSlots; m_argTypes = argTypes;
9546 // This is the index into the "args" array (where we copy the value to).
9549 // The operand stack index of the first IL argument.
9550 _ASSERTE(m_curStackHt >= totalArgsOnILStack);
9551 int argsBase = m_curStackHt - totalArgsOnILStack;
9553 // Current on-stack argument index.
9556 // We do "this" -- in the case of a constructor, we "shuffle" the "m_callThisArg" argument in as the first
9557 // argument -- it isn't on the IL operand stack.
9559 if (m_constrainedFlag)
9561 _ASSERTE(m_callThisArg == NULL); // "m_callThisArg" non-null only for .ctor, which are not callvirts.
9563 CorInfoType argCIT = OpStackTypeGet(argsBase + arg).ToCorInfoType();
9564 if (argCIT != CORINFO_TYPE_BYREF)
9565 VerificationError("This arg of constrained call must be managed pointer.");
9567 // We only cache for the CORINFO_NO_THIS_TRANSFORM case, so we may assume that if we have a cached call site,
9568 // there's no thisTransform to perform.
9571 switch (callInfoPtr->thisTransform)
9573 case CORINFO_NO_THIS_TRANSFORM:
9574 // It is a constrained call on a method implemented by a value type; this is already the proper managed pointer.
9577 case CORINFO_DEREF_THIS:
9581 DWORD clsAttribs = m_interpCeeInfo.getClassAttribs(m_constrainedResolvedToken.hClass);
9582 _ASSERTE((clsAttribs & CORINFO_FLG_VALUECLASS) == 0);
9586 // As per the spec, dereference the byref to the "this" pointer, and substitute it as the new "this" pointer.
9588 Object** objPtrPtr = OpStackGet<Object**>(argsBase + arg);
9589 OpStackSet<Object*>(argsBase + arg, *objPtrPtr);
9590 OpStackTypeSet(argsBase + arg, InterpreterType(CORINFO_TYPE_CLASS));
9595 case CORINFO_BOX_THIS:
9596 // This is the case where the call is to a virtual method of Object the given
9597 // struct class does not override -- the struct must be boxed, so that the
9598 // method can be invoked as a virtual.
9599 BoxStructRefAt(argsBase + arg, m_constrainedResolvedToken.hClass);
9604 exactClass = m_constrainedResolvedToken.hClass;
9607 DWORD exactClassAttribs = m_interpCeeInfo.getClassAttribs(exactClass);
9608 // If the constraint type is a value class, then it is the exact class (which will be the
9609 // "owner type" in the MDCS below.) If it is not, leave it as the (precise) interface method.
9610 if (exactClassAttribs & CORINFO_FLG_VALUECLASS)
9612 MethodTable* exactClassMT = GetMethodTableFromClsHnd(exactClass);
9613 // Find the method on exactClass corresponding to methToCall.
9614 methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(
9615 reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod), // pPrimaryMD
9616 exactClassMT, // pExactMT
9617 FALSE, // forceBoxedEntryPoint
9618 methToCall->GetMethodInstantiation(), // methodInst
9619 FALSE); // allowInstParam
9623 exactClass = methTokPtr->hClass;
9628 // We've consumed the constraint, so reset the flag.
9629 m_constrainedFlag = false;
9634 if (callInfoPtr->methodFlags & CORINFO_FLG_STATIC)
9636 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
9637 EnsureClassInit(pMD->GetMethodTable());
9643 // We must do anything that might make a COOP->PREEMP transition before copying arguments out of the
9644 // operand stack (where they are GC-protected) into the args array (where they are not).
9646 const char* clsOfMethToCallName;;
9647 const char* methToCallName = NULL;
9650 methToCallName = m_interpCeeInfo.getMethodNameFromMetadata(CORINFO_METHOD_HANDLE(methToCall), &clsOfMethToCallName, NULL, NULL);
9653 if (strncmp(methToCallName, "get_", 4) == 0)
9655 InterlockedIncrement(&s_totalInterpCallsToGetters);
9657 if (IsDeadSimpleGetter(&m_interpCeeInfo, methToCall, &offsetOfLd))
9659 InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
9662 else if (strncmp(methToCallName, "set_", 4) == 0)
9664 InterlockedIncrement(&s_totalInterpCallsToSetters);
9666 #endif // INTERP_TRACING
9668 // Only do this check on the first call, since it should be the same each time.
9671 // Ensure that any value types used as argument types are loaded. This property is checked
9672 // by the MethodDescCall site mechanisms. Since enums are freely convertible with their underlying
9673 // integer type, this is at least one case where a caller may push a value convertible to a value type
9674 // without any code having caused the value type to be loaded. This is DEBUG-only because if the callee
9675 // the integer-type value as the enum value type, it will have loaded the value type.
9676 MetaSig ms(methToCall);
9677 CorElementType argType;
9678 while ((argType = ms.NextArg()) != ELEMENT_TYPE_END)
9680 if (argType == ELEMENT_TYPE_VALUETYPE)
9682 TypeHandle th = ms.GetLastTypeHandleThrowing(ClassLoader::LoadTypes);
9683 CONSISTENCY_CHECK(th.CheckFullyLoaded());
9684 CONSISTENCY_CHECK(th.IsRestored());
9690 // CYCLE PROFILE: BEFORE ARG PROCESSING.
9692 if (sigInfo.hasThis())
9694 if (m_callThisArg != NULL)
9696 if (size_t(m_callThisArg) == 0x1)
9698 args[curArgSlot] = NULL;
9702 args[curArgSlot] = PtrToArgSlot(m_callThisArg);
9704 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_BYREF);
9708 args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
9709 argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
9712 // AV -> NullRef translation is NYI for the interpreter,
9713 // so we should manually check and throw the correct exception.
9714 if (args[curArgSlot] == NULL)
9716 // If we're calling a constructor, we bypass this check since the runtime
9717 // should have thrown OOM if it was unable to allocate an instance.
9718 if (m_callThisArg == NULL)
9720 _ASSERTE(!methToCall->IsStatic());
9721 ThrowNullPointerException();
9723 // ...except in the case of strings, which are both
9724 // allocated and initialized by their special constructor.
9727 _ASSERTE(methToCall->IsCtor() && methToCall->GetMethodTable()->IsString());
9733 // This is the argument slot that will be used to hold the return value.
9734 // In UNIX_AMD64_ABI, return type may have need tow ARG_SLOTs.
9735 ARG_SLOT retVals[2] = {0, 0};
9736 #if !defined(HOST_ARM) && !defined(UNIX_AMD64_ABI) && !defined(TARGET_RISCV64)
9737 _ASSERTE (NUMBER_RETURNVALUE_SLOTS == 1);
9740 // If the return type is a structure, then these will be initialized.
9741 CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
9742 InterpreterType retTypeIt;
9743 size_t retTypeSz = 0;
9745 // If non-null, space allocated to hold a large struct return value. Should be deleted later.
9746 // (I could probably optimize this pop all the arguments first, then allocate space for the return value
9747 // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
9748 // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
9749 BYTE* pLargeStructRetVal = NULL;
9751 // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
9752 // otherwise, we'll dynamically allocate memory for it.
9753 ARG_SLOT smallStructRetVal = 0;
9755 // We should have no return buffer temp space registered here...unless this is a constructor, in which
9756 // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
9757 _ASSERTE_MSG((pCscd != NULL) || sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
9759 // Is it the return value a struct with a ret buff?
9760 _ASSERTE_MSG(methToCall != NULL, "assumption");
9761 bool hasRetBuffArg = false;
9762 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS || sigInfo.retType == CORINFO_TYPE_REFANY)
9764 hasRetBuffArg = !!methToCall->HasRetBuffArg();
9765 retTypeClsHnd = sigInfo.retTypeClass;
9767 MetaSig ms(methToCall);
9770 // On ARM, if there's an HFA return type, we must also allocate a return buffer, since the
9771 // MDCS calling convention requires it.
9773 #if defined(HOST_ARM)
9774 || HFAReturnArgSlots > 0
9775 #endif // defined(HOST_ARM)
9778 _ASSERTE(retTypeClsHnd != NULL);
9779 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
9780 retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
9782 #if defined(HOST_ARM)
9783 if (HFAReturnArgSlots > 0)
9785 args[curArgSlot] = PtrToArgSlot(args - HFAReturnArgSlots);
9788 #endif // defined(HOST_ARM)
9790 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
9792 size_t retBuffSize = retTypeSz;
9793 // If the target architecture can sometimes return a struct in several registers,
9794 // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
9795 // It will then copy *all* of this into the return buffer area we allocate. So make sure
9796 // we allocate at least that much.
9797 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
9798 retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
9799 #endif // ENREGISTERED_RETURNTYPE_MAXSIZE
9800 pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
9801 // Clear this in case a GC happens.
9802 for (unsigned i = 0; i < retTypeSz; i++) pLargeStructRetVal[i] = 0;
9803 // Register this as location needing GC.
9804 m_structRetValTempSpace = pLargeStructRetVal;
9805 // Set it as the return buffer.
9806 args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
9810 // Clear this in case a GC happens.
9811 smallStructRetVal = 0;
9812 // Register this as location needing GC.
9813 m_structRetValTempSpace = &smallStructRetVal;
9814 // Set it as the return buffer.
9815 args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
9817 m_structRetValITPtr = &retTypeIt;
9818 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9823 // The struct type might "normalize" to a primitive type.
9824 if (retTypeClsHnd == NULL)
9826 retTypeIt = InterpreterType(CEEInfo::asCorInfoType(ms.GetReturnTypeNormalized()));
9830 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
9835 if (((sigInfo.callConv & CORINFO_CALLCONV_VARARG) != 0) && sigInfo.isVarArg())
9837 _ASSERTE(vaSigCookie != nullptr);
9838 args[curArgSlot] = PtrToArgSlot(vaSigCookie);
9839 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9845 if (sigInfo.hasTypeArg())
9848 // We will find the instantiating stub for the method, and call that instead.
9849 CORINFO_SIG_INFO sigInfoFull;
9850 Instantiation methodInst = methToCall->GetMethodInstantiation();
9851 BOOL fNeedUnboxingStub = virtualCall && TypeHandle(exactClass).IsValueType() && methToCall->IsVirtual();
9852 methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(methToCall,
9853 TypeHandle(exactClass).GetMethodTable(), fNeedUnboxingStub, methodInst, FALSE, TRUE);
9854 m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull, nullptr);
9855 sigInfo.retTypeClass = sigInfoFull.retTypeClass;
9856 sigInfo.numArgs = sigInfoFull.numArgs;
9857 sigInfo.callConv = sigInfoFull.callConv;
9858 sigInfo.retType = sigInfoFull.retType;
9861 if (sigInfo.hasTypeArg())
9863 // If we still have a type argument, we're calling an ArrayOpStub and need to pass the array TypeHandle.
9864 _ASSERTE(methToCall->IsArray());
9866 args[curArgSlot] = PtrToArgSlot(exactClass);
9867 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9872 // Now we do the non-this arguments.
9873 size_t largeStructSpaceToPop = 0;
9874 for (; arg < totalArgsOnILStack; arg++)
9876 InterpreterType argIt = OpStackTypeGet(argsBase + arg);
9877 size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
9881 args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
9884 args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
9887 args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
9893 void* srcPtr = OpStackGet<void*>(argsBase + arg);
9894 args[curArgSlot] = PtrToArgSlot(srcPtr);
9895 if (!IsInLargeStructLocalArea(srcPtr))
9896 largeStructSpaceToPop += sz;
9900 args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
9904 argTypes[curArgSlot] = argIt;
9910 args[curArgSlot] = PtrToArgSlot(ctorData.pArg3);
9911 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9916 args[curArgSlot] = PtrToArgSlot(ctorData.pArg4);
9917 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9922 args[curArgSlot] = PtrToArgSlot(ctorData.pArg5);
9923 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
9927 // CYCLE PROFILE: AFTER ARG PROCESSING.
9929 Thread* thr = GetThread();
9931 Object** thisArgHnd = NULL;
9932 ARG_SLOT nullThisArg = NULL;
9933 if (sigInfo.hasThis())
9935 if (m_callThisArg != NULL)
9937 if (size_t(m_callThisArg) == 0x1)
9939 thisArgHnd = reinterpret_cast<Object**>(&nullThisArg);
9943 thisArgHnd = reinterpret_cast<Object**>(&m_callThisArg);
9948 thisArgHnd = OpStackGetAddr<Object*>(argsBase);
9952 Frame* topFrameBefore = thr->GetFrame();
9954 #if INTERP_ILCYCLE_PROFILE
9955 unsigned __int64 startCycles;
9956 #endif // INTERP_ILCYCLE_PROFILE
9958 // CYCLE PROFILE: BEFORE MDCS CREATION.
9960 PCODE target = NULL;
9961 MethodDesc *exactMethToCall = methToCall;
9963 // Determine the target of virtual calls.
9964 if (virtualCall && methToCall->IsVtableMethod())
9968 _ASSERTE(thisArgHnd != NULL);
9969 OBJECTREF objRef = ObjectToOBJECTREF(*thisArgHnd);
9970 GCPROTECT_BEGIN(objRef);
9971 pCode = methToCall->GetMultiCallableAddrOfVirtualizedCode(&objRef, methToCall->GetMethodTable());
9974 exactMethToCall = Entry2MethodDesc(pCode, objRef->GetMethodTable());
9977 // Compile the target in advance of calling.
9978 if (exactMethToCall->IsPointingToPrestub())
9980 MethodTable* dispatchingMT = NULL;
9981 if (exactMethToCall->IsVtableMethod())
9983 _ASSERTE(thisArgHnd != NULL);
9984 dispatchingMT = (*thisArgHnd)->GetMethodTable();
9987 target = exactMethToCall->DoPrestub(dispatchingMT);
9991 target = exactMethToCall->GetMethodEntryPoint();
9994 // If we're interpreting the method, simply call it directly.
9995 if (InterpretationStubToMethodInfo(target) == exactMethToCall)
9997 _ASSERTE(!exactMethToCall->IsILStub());
9998 InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(exactMethToCall));
9999 _ASSERTE(methInfo != NULL);
10000 #if INTERP_ILCYCLE_PROFILE
10001 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
10002 #endif // INTERP_ILCYCLE_PROFILE
10003 retVals[0] = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
10004 pCscd = NULL; // Nothing to cache.
10008 MetaSig msig(exactMethToCall);
10009 // We've already resolved the virtual call target above, so there is no need to do it again.
10010 MethodDescCallSite mdcs(exactMethToCall, &msig, target);
10011 #if INTERP_ILCYCLE_PROFILE
10012 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
10013 #endif // INTERP_ILCYCLE_PROFILE
10015 #if defined(UNIX_AMD64_ABI) || defined(TARGET_RISCV64)
10016 mdcs.CallTargetWorker(args, retVals, HasTwoSlotBuf ? 16: 8);
10018 mdcs.CallTargetWorker(args, retVals, 8);
10023 // We will do a check at the end to determine whether to cache pCscd, to set
10024 // to NULL here to make sure we don't.
10029 // For now, we won't cache virtual calls to virtual methods.
10030 // TODO: fix this somehow.
10031 if (virtualCall && (callInfoPtr->methodFlags & CORINFO_FLG_VIRTUAL)) doNotCache = true;
10033 if (s_InterpreterUseCaching && !doNotCache)
10035 // We will add this to the cache later; the locking provokes a GC,
10036 // and "retVal" is vulnerable.
10037 pCscd = new CallSiteCacheData(exactMethToCall, sigInfo);
10041 #if INTERP_ILCYCLE_PROFILE
10042 unsigned __int64 endCycles;
10043 bool b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
10044 m_exemptCycles += (endCycles - startCycles);
10045 #endif // INTERP_ILCYCLE_PROFILE
10047 // retVal is now vulnerable.
10050 // Some managed methods, believe it or not, can push capital-F Frames on the Frame chain.
10051 // If this happens, executing the EX_CATCH below will pop it, which is bad.
10052 // So detect that case, pop the explicitly-pushed frame, and push it again after the EX_CATCH.
10053 // (Asserting that there is only 1 such frame!)
10054 if (thr->GetFrame() != topFrameBefore)
10056 ilPushedFrame = thr->GetFrame();
10057 if (ilPushedFrame != NULL)
10059 ilPushedFrame->Pop(thr);
10060 if (thr->GetFrame() != topFrameBefore)
10062 // This wasn't an IL-pushed frame, so restore.
10063 ilPushedFrame->Push(thr);
10064 ilPushedFrame = NULL;
10070 // retVal is still vulnerable.
10075 // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
10076 m_curStackHt -= totalArgsOnILStack;
10077 // We've already checked that "largeStructSpaceToPop
10078 LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
10080 if (size_t(m_callThisArg) == 0x1)
10082 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
10083 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVals[0]));
10084 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
10087 else if (sigInfo.retType != CORINFO_TYPE_VOID)
10089 switch (sigInfo.retType)
10091 case CORINFO_TYPE_BOOL:
10092 case CORINFO_TYPE_BYTE:
10093 OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVals[0]));
10095 case CORINFO_TYPE_UBYTE:
10096 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVals[0]));
10098 case CORINFO_TYPE_SHORT:
10099 OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVals[0]));
10101 case CORINFO_TYPE_USHORT:
10102 case CORINFO_TYPE_CHAR:
10103 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVals[0]));
10105 case CORINFO_TYPE_INT:
10106 case CORINFO_TYPE_UINT:
10107 case CORINFO_TYPE_FLOAT:
10108 OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVals[0]));
10110 case CORINFO_TYPE_LONG:
10111 case CORINFO_TYPE_ULONG:
10112 case CORINFO_TYPE_DOUBLE:
10113 OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVals[0]));
10115 case CORINFO_TYPE_NATIVEINT:
10116 case CORINFO_TYPE_NATIVEUINT:
10117 case CORINFO_TYPE_PTR:
10118 OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVals[0]));
10120 case CORINFO_TYPE_CLASS:
10121 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVals[0]));
10123 case CORINFO_TYPE_BYREF:
10124 OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(retVals[0]));
10126 case CORINFO_TYPE_VALUECLASS:
10127 case CORINFO_TYPE_REFANY:
10129 // We must be careful here to write the value, the type, and update the stack height in one
10130 // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
10131 // is protected by being fully "on" the operandStack.
10132 #if defined(HOST_ARM)
10133 // Is the return type an HFA?
10134 if (HFAReturnArgSlots > 0)
10136 ARG_SLOT* hfaRetBuff = args - HFAReturnArgSlots;
10137 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
10139 void* dst = LargeStructOperandStackPush(retTypeSz);
10140 memcpy(dst, hfaRetBuff, retTypeSz);
10141 OpStackSet<void*>(m_curStackHt, dst);
10145 memcpy(OpStackGetAddr<UINT64>(m_curStackHt), hfaRetBuff, retTypeSz);
10149 #endif // defined(HOST_ARM)
10150 if (pLargeStructRetVal != NULL)
10152 _ASSERTE(hasRetBuffArg);
10153 void* dst = LargeStructOperandStackPush(retTypeSz);
10154 CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
10155 OpStackSet<void*>(m_curStackHt, dst);
10157 else if (hasRetBuffArg)
10159 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
10161 #if defined(UNIX_AMD64_ABI) || defined(TARGET_RISCV64)
10162 else if (HasTwoSlotBuf)
10164 void* dst = LargeStructOperandStackPush(16);
10165 CopyValueClassUnchecked(dst, retVals, GetMethodTableFromClsHnd(retTypeClsHnd));
10166 OpStackSet<void*>(m_curStackHt, dst);
10171 OpStackSet<UINT64>(m_curStackHt, retVals[0]);
10173 // We already created this interpreter type, so use it.
10174 OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
10178 // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
10179 // Make sure it's unregistered.
10180 m_structRetValITPtr = NULL;
10184 NYI_INTERP("Unhandled return type");
10187 _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
10189 // The valueclass case is handled fully in the switch above.
10190 if (sigInfo.retType != CORINFO_TYPE_VALUECLASS &&
10191 sigInfo.retType != CORINFO_TYPE_REFANY)
10193 OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
10199 // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
10200 // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
10201 // is vulnerable. So, for completeness, do it here.
10202 _ASSERTE(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
10204 // If we created a cached call site, cache it now (when it's safe to take a GC).
10205 if (pCscd != NULL && !doNotCache)
10207 CacheCallInfo(iloffset, pCscd);
10210 m_callThisArg = NULL;
10212 // If the call we just made pushed a Frame, we popped it above, so re-push it.
10213 if (ilPushedFrame != NULL) ilPushedFrame->Push();
10216 #include "metadata.h"
10218 void Interpreter::CallI()
10220 #if INTERP_DYNAMIC_CONTRACTS
10227 // Dynamic contract occupies too much stack.
10228 STATIC_CONTRACT_THROWS;
10229 STATIC_CONTRACT_GC_TRIGGERS;
10230 STATIC_CONTRACT_MODE_COOPERATIVE;
10234 InterlockedIncrement(&s_totalInterpCalls);
10235 #endif // INTERP_TRACING
10237 unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(BYTE));
10239 CORINFO_SIG_INFO sigInfo;
10243 m_interpCeeInfo.findSig(m_methInfo->m_module, tok, GetPreciseGenericsContext(), &sigInfo);
10246 // I'm assuming that a calli can't depend on the generics context, so the simple form of type
10247 // context should suffice?
10248 MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
10249 SigTypeContext sigTypeCtxt(pMD);
10250 MetaSig mSig(sigInfo.pSig, sigInfo.cbSig, GetModule(sigInfo.scope), &sigTypeCtxt);
10252 unsigned totalSigArgs = sigInfo.totalILArgs();
10254 // Note that "totalNativeArgs()" includes space for ret buff arg.
10255 unsigned nSlots = totalSigArgs + 1;
10256 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
10261 // Make sure that the operand stack has the required number of arguments.
10262 // (Note that this is IL args, not native.)
10265 // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
10266 // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
10267 // is provided by an allocation (the call is to a constructor).
10268 unsigned totalArgsOnILStack = totalSigArgs;
10270 const unsigned LOCAL_ARG_SLOTS = 8;
10271 ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
10272 InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
10275 InterpreterType* argTypes;
10276 if (nSlots <= LOCAL_ARG_SLOTS)
10278 args = &localArgs[0];
10279 argTypes = &localArgTypes[0];
10283 args = (ARG_SLOT*)_alloca(nSlots * sizeof(ARG_SLOT));
10284 argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
10286 // Make sure that we don't scan any of these until we overwrite them with
10287 // the real types of the arguments.
10288 InterpreterType undefIt(CORINFO_TYPE_UNDEF);
10289 for (unsigned i = 0; i < nSlots; i++)
10291 argTypes[i] = undefIt;
10294 // GC-protect the argument array (as byrefs).
10296 m_argsSize = nSlots;
10297 m_argTypes = argTypes;
10299 // This is the index into the "args" array (where we copy the value to).
10300 int curArgSlot = 0;
10302 // The operand stack index of the first IL argument.
10303 unsigned totalArgPositions = totalArgsOnILStack + 1; // + 1 for the ftn argument.
10304 _ASSERTE(m_curStackHt >= totalArgPositions);
10305 int argsBase = m_curStackHt - totalArgPositions;
10307 // Current on-stack argument index.
10310 if (sigInfo.hasThis())
10312 args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
10313 argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
10314 // AV -> NullRef translation is NYI for the interpreter,
10315 // so we should manually check and throw the correct exception.
10316 ThrowOnInvalidPointer((void*)args[curArgSlot]);
10321 // This is the argument slot that will be used to hold the return value.
10322 ARG_SLOT retVal = 0;
10324 // If the return type is a structure, then these will be initialized.
10325 CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
10326 InterpreterType retTypeIt;
10327 size_t retTypeSz = 0;
10329 // If non-null, space allocated to hold a large struct return value. Should be deleted later.
10330 // (I could probably optimize this pop all the arguments first, then allocate space for the return value
10331 // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
10332 // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
10333 BYTE* pLargeStructRetVal = NULL;
10335 // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
10336 // otherwise, we'll dynamically allocate memory for it.
10337 ARG_SLOT smallStructRetVal = 0;
10339 // We should have no return buffer temp space registered here...unless this is a constructor, in which
10340 // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
10341 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
10343 // Is it the return value a struct with a ret buff?
10344 bool hasRetBuffArg = false;
10345 if (sigInfo.retType == CORINFO_TYPE_VALUECLASS)
10347 retTypeClsHnd = sigInfo.retTypeClass;
10348 retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
10349 retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
10351 #if defined(UNIX_AMD64_ABI)
10353 #elif defined(HOST_AMD64)
10354 // TODO: Investigate why HasRetBuffArg can't be used. pMD is a hacked up MD for the
10355 // calli because it belongs to the current method. Doing what the JIT does.
10356 hasRetBuffArg = (retTypeSz > sizeof(void*)) || ((retTypeSz & (retTypeSz - 1)) != 0);
10358 hasRetBuffArg = !!pMD->HasRetBuffArg();
10362 if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
10364 size_t retBuffSize = retTypeSz;
10365 // If the target architecture can sometimes return a struct in several registers,
10366 // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
10367 // It will then copy *all* of this into the return buffer area we allocate. So make sure
10368 // we allocate at least that much.
10369 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
10370 retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
10371 #endif // ENREGISTERED_RETURNTYPE_MAXSIZE
10372 pLargeStructRetVal = (BYTE*)_alloca(retBuffSize);
10374 // Clear this in case a GC happens.
10375 for (unsigned i = 0; i < retTypeSz; i++)
10377 pLargeStructRetVal[i] = 0;
10380 // Register this as location needing GC.
10381 m_structRetValTempSpace = pLargeStructRetVal;
10383 // Set it as the return buffer.
10384 args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
10388 // Clear this in case a GC happens.
10389 smallStructRetVal = 0;
10391 // Register this as location needing GC.
10392 m_structRetValTempSpace = &smallStructRetVal;
10394 // Set it as the return buffer.
10395 args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
10397 m_structRetValITPtr = &retTypeIt;
10398 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
10403 if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
10405 Module* module = GetModule(sigInfo.scope);
10406 CORINFO_VARARGS_HANDLE handle = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sigInfo.pSig, sigInfo.cbSig)));
10407 args[curArgSlot] = PtrToArgSlot(handle);
10408 argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
10412 // Now we do the non-this arguments.
10413 size_t largeStructSpaceToPop = 0;
10414 for (; arg < totalArgsOnILStack; arg++)
10416 InterpreterType argIt = OpStackTypeGet(argsBase + arg);
10417 size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
10421 args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
10424 args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
10427 args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
10433 void* srcPtr = OpStackGet<void*>(argsBase + arg);
10434 args[curArgSlot] = PtrToArgSlot(srcPtr);
10435 if (!IsInLargeStructLocalArea(srcPtr))
10437 largeStructSpaceToPop += sz;
10442 args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
10446 argTypes[curArgSlot] = argIt;
10450 // Finally, we get the code pointer.
10451 unsigned ftnInd = m_curStackHt - 1;
10453 CorInfoType ftnType = OpStackTypeGet(ftnInd).ToCorInfoType();
10454 _ASSERTE(ftnType == CORINFO_TYPE_NATIVEINT
10455 || ftnType == CORINFO_TYPE_INT
10456 || ftnType == CORINFO_TYPE_LONG);
10459 PCODE ftnPtr = OpStackGet<PCODE>(ftnInd);
10462 MethodDesc* methToCall;
10463 // If we're interpreting the target, simply call it directly.
10464 if ((methToCall = InterpretationStubToMethodInfo((PCODE)ftnPtr)) != NULL)
10466 InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(methToCall));
10467 _ASSERTE(methInfo != NULL);
10468 #if INTERP_ILCYCLE_PROFILE
10469 bool b = CycleTimer::GetThreadCyclesS(&startCycles); _ASSERTE(b);
10470 #endif // INTERP_ILCYCLE_PROFILE
10471 retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
10475 // This is not a great workaround. For the most part, we really don't care what method desc we're using, since
10476 // we're providing the signature and function pointer -- other than that it's well-formed and "activated."
10477 // And also, one more thing: whether it is static or not. Which is actually determined by the signature.
10478 // So we query the signature we have to determine whether we need a static or instance MethodDesc, and then
10479 // use one of the appropriate staticness that happens to be sitting around in global variables. For static
10480 // we use "RuntimeHelpers.PrepareConstrainedRegions", for instance we use the default constructor of "Object."
10481 // TODO: make this cleaner -- maybe invent a couple of empty methods with instructive names, just for this purpose.
10483 if (mSig.HasThis())
10485 pMD = g_pObjectFinalizerMD;
10489 pMD = CoreLibBinder::GetMethod(METHOD__INTERLOCKED__COMPARE_EXCHANGE_OBJECT); // A random static method.
10491 MethodDescCallSite mdcs(pMD, &mSig, ftnPtr);
10493 // If the current method being interpreted is an IL stub, we're calling native code, so
10494 // change the GC mode. (We'll only do this at the call if the calling convention turns out
10495 // to be a managed calling convention.)
10496 MethodDesc* pStubContextMD = reinterpret_cast<MethodDesc*>(m_stubContext);
10497 bool transitionToPreemptive = (pStubContextMD != NULL && !pStubContextMD->IsIL());
10498 mdcs.CallTargetWorker(args, &retVal, sizeof(retVal), transitionToPreemptive);
10500 // TODO The code above triggers assertion at threads.cpp:6861:
10501 // _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
10502 // The workaround will likely break more things than what it is fixing:
10503 // just do not make transition to preemptive GC for now.
10504 mdcs.CallTargetWorker(args, &retVal, sizeof(retVal));
10507 // retVal is now vulnerable.
10511 // retVal is still vulnerable.
10516 // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
10517 m_curStackHt -= totalArgPositions;
10519 // We've already checked that "largeStructSpaceToPop
10520 LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
10522 if (size_t(m_callThisArg) == 0x1)
10524 _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
10525 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
10526 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
10529 else if (sigInfo.retType != CORINFO_TYPE_VOID)
10531 switch (sigInfo.retType)
10533 case CORINFO_TYPE_BOOL:
10534 case CORINFO_TYPE_BYTE:
10535 OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
10537 case CORINFO_TYPE_UBYTE:
10538 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
10540 case CORINFO_TYPE_SHORT:
10541 OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
10543 case CORINFO_TYPE_USHORT:
10544 case CORINFO_TYPE_CHAR:
10545 OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
10547 case CORINFO_TYPE_INT:
10548 case CORINFO_TYPE_UINT:
10549 case CORINFO_TYPE_FLOAT:
10550 OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
10552 case CORINFO_TYPE_LONG:
10553 case CORINFO_TYPE_ULONG:
10554 case CORINFO_TYPE_DOUBLE:
10555 OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
10557 case CORINFO_TYPE_NATIVEINT:
10558 case CORINFO_TYPE_NATIVEUINT:
10559 case CORINFO_TYPE_PTR:
10560 OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
10562 case CORINFO_TYPE_CLASS:
10563 OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
10565 case CORINFO_TYPE_VALUECLASS:
10567 // We must be careful here to write the value, the type, and update the stack height in one
10568 // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
10569 // is protected by being fully "on" the operandStack.
10570 if (pLargeStructRetVal != NULL)
10572 _ASSERTE(hasRetBuffArg);
10573 void* dst = LargeStructOperandStackPush(retTypeSz);
10574 CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
10575 OpStackSet<void*>(m_curStackHt, dst);
10577 else if (hasRetBuffArg)
10579 OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
10583 OpStackSet<UINT64>(m_curStackHt, retVal);
10585 // We already created this interpreter type, so use it.
10586 OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
10589 // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
10590 // Make sure it's unregistered.
10591 m_structRetValITPtr = NULL;
10595 NYI_INTERP("Unhandled return type");
10598 _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
10600 // The valueclass case is handled fully in the switch above.
10601 if (sigInfo.retType != CORINFO_TYPE_VALUECLASS)
10603 OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
10609 // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
10610 // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
10611 // is vulnerable. So, for completeness, do it here.
10612 _ASSERTE(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
10618 bool Interpreter::IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd)
10626 DWORD flags = pMD->GetAttrs();
10627 CORINFO_METHOD_INFO methInfo;
10630 bool b = info->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo, NULL);
10631 if (!b) return false;
10634 // If the method takes a generic type argument, it's not dead simple...
10635 if (methInfo.args.callConv & CORINFO_CALLCONV_PARAMTYPE) return false;
10637 BYTE* codePtr = methInfo.ILCode;
10639 if (flags & CORINFO_FLG_STATIC)
10641 if (methInfo.ILCodeSize != 6)
10643 if (*codePtr != CEE_LDSFLD)
10645 _ASSERTE(ILOffsetOfLdSFldInDeadSimpleStaticGetter == 0);
10648 return (*codePtr == CEE_RET);
10652 // We handle two forms, one for DBG IL, and one for OPT IL.
10654 if (methInfo.ILCodeSize == 0xc)
10656 else if (methInfo.ILCodeSize != 7)
10661 if (*codePtr != CEE_NOP)
10665 if (*codePtr != CEE_LDARG_0)
10668 if (*codePtr != CEE_LDFLD)
10670 *offsetOfLd = codePtr - methInfo.ILCode;
10671 _ASSERTE((dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg == *offsetOfLd)
10672 || (!dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt == *offsetOfLd));
10676 if (*codePtr != CEE_STLOC_0)
10679 if (*codePtr != CEE_BR)
10681 if (getU4LittleEndian(codePtr + 1) != 0)
10684 if (*codePtr != CEE_LDLOC_0)
10687 return (*codePtr == CEE_RET);
10691 void Interpreter::DoStringLength()
10699 _ASSERTE(m_curStackHt > 0);
10700 unsigned ind = m_curStackHt - 1;
10703 CorInfoType stringCIT = OpStackTypeGet(ind).ToCorInfoType();
10704 if (stringCIT != CORINFO_TYPE_CLASS)
10706 VerificationError("StringLength called on non-string.");
10710 Object* obj = OpStackGet<Object*>(ind);
10714 ThrowNullPointerException();
10718 if (obj->GetMethodTable() != g_pStringClass)
10720 VerificationError("StringLength called on non-string.");
10724 StringObject* str = reinterpret_cast<StringObject*>(obj);
10725 INT32 len = str->GetStringLength();
10726 OpStackSet<INT32>(ind, len);
10727 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_INT));
10730 void Interpreter::DoStringGetChar()
10738 _ASSERTE(m_curStackHt >= 2);
10739 unsigned strInd = m_curStackHt - 2;
10740 unsigned indexInd = strInd + 1;
10743 CorInfoType stringCIT = OpStackTypeGet(strInd).ToCorInfoType();
10744 if (stringCIT != CORINFO_TYPE_CLASS)
10746 VerificationError("StringGetChar called on non-string.");
10750 Object* obj = OpStackGet<Object*>(strInd);
10754 ThrowNullPointerException();
10758 if (obj->GetMethodTable() != g_pStringClass)
10760 VerificationError("StringGetChar called on non-string.");
10764 StringObject* str = reinterpret_cast<StringObject*>(obj);
10767 CorInfoType indexCIT = OpStackTypeGet(indexInd).ToCorInfoType();
10768 if (indexCIT != CORINFO_TYPE_INT)
10770 VerificationError("StringGetChar needs integer index.");
10774 INT32 ind = OpStackGet<INT32>(indexInd);
10776 ThrowArrayBoundsException();
10777 UINT32 uind = static_cast<UINT32>(ind);
10778 if (uind >= str->GetStringLength())
10779 ThrowArrayBoundsException();
10782 GCX_FORBID(); // str is vulnerable.
10783 UINT16* dataPtr = reinterpret_cast<UINT16*>(reinterpret_cast<INT8*>(str) + StringObject::GetBufferOffset());
10784 UINT32 filledChar = dataPtr[ind];
10785 OpStackSet<UINT32>(strInd, filledChar);
10786 OpStackTypeSet(strInd, InterpreterType(CORINFO_TYPE_INT));
10787 m_curStackHt = indexInd;
10790 void Interpreter::DoGetTypeFromHandle()
10798 _ASSERTE(m_curStackHt > 0);
10799 unsigned ind = m_curStackHt - 1;
10802 CorInfoType handleCIT = OpStackTypeGet(ind).ToCorInfoType();
10803 if (handleCIT != CORINFO_TYPE_VALUECLASS && handleCIT != CORINFO_TYPE_CLASS)
10805 VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
10807 Object* obj = OpStackGet<Object*>(ind);
10808 if (obj->GetMethodTable() != g_pRuntimeTypeClass)
10810 VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
10814 OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
10817 void Interpreter::DoSIMDHwAccelerated()
10826 if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
10828 fprintf(GetLogFile(), " System.Numerics.Vector.IsHardwareAccelerated -- intrinsic\n");
10830 #endif // INTERP_TRACING
10836 void Interpreter::DoGetIsSupported()
10844 OpStackSet<BOOL>(m_curStackHt, false);
10845 OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
10849 void Interpreter::RecordConstrainedCall()
10858 InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Constrained]);
10859 #endif // INTERP_TRACING
10863 ResolveToken(&m_constrainedResolvedToken, getU4LittleEndian(m_ILCodePtr + 2), CORINFO_TOKENKIND_Constrained InterpTracingArg(RTK_Constrained));
10866 m_constrainedFlag = true;
10871 void Interpreter::LargeStructOperandStackEnsureCanPush(size_t sz)
10873 size_t remaining = m_largeStructOperandStackAllocSize - m_largeStructOperandStackHt;
10874 if (remaining < sz)
10876 size_t newAllocSize = max(m_largeStructOperandStackAllocSize + sz * 4, m_largeStructOperandStackAllocSize * 2);
10877 BYTE* newStack = new BYTE[newAllocSize];
10878 m_largeStructOperandStackAllocSize = newAllocSize;
10879 if (m_largeStructOperandStack != NULL)
10881 memcpy(newStack, m_largeStructOperandStack, m_largeStructOperandStackHt);
10882 delete[] m_largeStructOperandStack;
10884 m_largeStructOperandStack = newStack;
10888 void* Interpreter::LargeStructOperandStackPush(size_t sz)
10890 LargeStructOperandStackEnsureCanPush(sz);
10891 _ASSERTE(m_largeStructOperandStackAllocSize >= m_largeStructOperandStackHt + sz);
10892 void* res = &m_largeStructOperandStack[m_largeStructOperandStackHt];
10893 m_largeStructOperandStackHt += sz;
10897 void Interpreter::LargeStructOperandStackPop(size_t sz, void* fromAddr)
10899 if (!IsInLargeStructLocalArea(fromAddr))
10901 _ASSERTE(m_largeStructOperandStackHt >= sz);
10902 m_largeStructOperandStackHt -= sz;
10907 bool Interpreter::LargeStructStackHeightIsValid()
10910 for (unsigned k = 0; k < m_curStackHt; k++)
10912 if (OpStackTypeGet(k).IsLargeStruct(&m_interpCeeInfo) && !IsInLargeStructLocalArea(OpStackGet<void*>(k)))
10914 sz2 += OpStackTypeGet(k).Size(&m_interpCeeInfo);
10917 _ASSERTE(sz2 == m_largeStructOperandStackHt);
10918 return sz2 == m_largeStructOperandStackHt;
10922 void Interpreter::VerificationError(const char* msg)
10924 // TODO: Should raise an exception eventually; for now:
10925 const char* const msgPrefix = "Verification Error: ";
10926 size_t len = strlen(msgPrefix) + strlen(msg) + 1;
10927 char* msgFinal = (char*)_alloca(len);
10928 strcpy_s(msgFinal, len, msgPrefix);
10929 strcat_s(msgFinal, len, msg);
10930 _ASSERTE_MSG(false, msgFinal);
10933 void Interpreter::ThrowDivideByZero()
10941 COMPlusThrow(kDivideByZeroException);
10944 void Interpreter::ThrowSysArithException()
10952 // According to the ECMA spec, this should be an ArithmeticException; however,
10953 // the JITs throw an OverflowException and consistency is top priority...
10954 COMPlusThrow(kOverflowException);
10957 void Interpreter::ThrowNullPointerException()
10965 COMPlusThrow(kNullReferenceException);
10968 void Interpreter::ThrowOverflowException()
10976 COMPlusThrow(kOverflowException);
10979 void Interpreter::ThrowArrayBoundsException()
10987 COMPlusThrow(kIndexOutOfRangeException);
10990 void Interpreter::ThrowInvalidCastException()
10998 COMPlusThrow(kInvalidCastException);
11001 void Interpreter::ThrowStackOverflow()
11009 COMPlusThrow(kStackOverflowException);
11012 float Interpreter::RemFunc(float v1, float v2)
11014 return fmodf(v1, v2);
11017 double Interpreter::RemFunc(double v1, double v2)
11019 return fmod(v1, v2);
11022 // Static members and methods.
11023 Interpreter::AddrToMDMap* Interpreter::s_addrToMDMap = NULL;
11025 unsigned Interpreter::s_interpreterStubNum = 0;
11027 // TODO: contracts and synchronization for the AddrToMDMap methods.
11028 // Requires caller to hold "s_interpStubToMDMapLock".
11029 Interpreter::AddrToMDMap* Interpreter::GetAddrToMdMap()
11038 if (s_addrToMDMap == NULL)
11040 s_addrToMDMap = new AddrToMDMap();
11042 return s_addrToMDMap;
11045 void Interpreter::RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr)
11054 CrstHolder ch(&s_interpStubToMDMapLock);
11056 AddrToMDMap* map = Interpreter::GetAddrToMdMap();
11058 CORINFO_METHOD_HANDLE dummy;
11059 _ASSERTE(!map->Lookup(addr, &dummy));
11061 map->AddOrReplace(KeyValuePair<void*,CORINFO_METHOD_HANDLE>(addr, md));
11064 MethodDesc* Interpreter::InterpretationStubToMethodInfo(PCODE addr)
11072 // This query function will never allocate the table...
11073 if (s_addrToMDMap == NULL)
11076 // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
11077 // CrstHolder ch(&s_interpStubToMDMapLock);
11079 AddrToMDMap* map = Interpreter::GetAddrToMdMap();
11080 CORINFO_METHOD_HANDLE result = NULL;
11081 (void)map->Lookup((void*)addr, &result);
11082 return (MethodDesc*)result;
11085 Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::s_methodHandleToInterpMethInfoPtrMap = NULL;
11087 // Requires caller to hold "s_interpStubToMDMapLock".
11088 Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::GetMethodHandleToInterpMethInfoPtrMap()
11097 if (s_methodHandleToInterpMethInfoPtrMap == NULL)
11099 s_methodHandleToInterpMethInfoPtrMap = new MethodHandleToInterpMethInfoPtrMap();
11101 return s_methodHandleToInterpMethInfoPtrMap;
11104 InterpreterMethodInfo* Interpreter::RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo)
11113 CrstHolder ch(&s_interpStubToMDMapLock);
11115 MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
11118 if (map->Lookup(md, &mi))
11120 // If there's already an entry, make sure it was created by another thread -- the same thread shouldn't create two
11122 _ASSERTE_MSG(mi.m_thread != GetThread(), "Two InterpMethInfo's for same meth by same thread.");
11123 // If we were creating an interpreter stub at the same time as another thread, and we lost the race to
11124 // insert it, use the already-existing one, and delete this one.
11129 mi.m_info = methInfo;
11131 mi.m_thread = GetThread();
11134 _ASSERTE_MSG(map->LookupPtr(md) == NULL, "Multiple InterpMethInfos for method desc.");
11139 InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md)
11146 // This query function will never allocate the table...
11147 if (s_methodHandleToInterpMethInfoPtrMap == NULL)
11150 // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
11151 CrstHolder ch(&s_interpStubToMDMapLock);
11153 MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
11157 (void)map->Lookup(md, &mi);
11162 #ifndef DACCESS_COMPILE
11164 // Requires that the current thread holds "s_methodCacheLock."
11165 ILOffsetToItemCache* InterpreterMethodInfo::GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc)
11167 // First, does the current method have dynamic generic information, and, if so,
11169 CORINFO_CONTEXT_HANDLE context = GetPreciseGenericsContext(thisArg, genericsCtxtArg);
11170 if (context == MAKE_METHODCONTEXT(m_method))
11172 // No dynamic generics context information. The caching field in "m_methInfo" is the
11173 // ILoffset->Item cache directly.
11174 // First, ensure that it's allocated.
11175 if (m_methodCache == NULL && alloc)
11177 // Lazy init via compare-exchange.
11178 ILOffsetToItemCache* cache = new ILOffsetToItemCache();
11179 void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, cache, NULL);
11180 if (prev != NULL) delete cache;
11182 return reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
11186 // Otherwise, it does have generic info, so find the right cache.
11187 // First ensure that the top-level generics-context --> cache cache exists.
11188 GenericContextToInnerCache* outerCache = reinterpret_cast<GenericContextToInnerCache*>(m_methodCache);
11189 if (outerCache == NULL)
11193 // Lazy init via compare-exchange.
11194 outerCache = new GenericContextToInnerCache();
11195 void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, outerCache, NULL);
11199 outerCache = reinterpret_cast<GenericContextToInnerCache*>(prev);
11207 // Does the outerCache already have an entry for this instantiation?
11208 ILOffsetToItemCache* innerCache = NULL;
11209 if (!outerCache->GetItem(size_t(context), innerCache) && alloc)
11211 innerCache = new ILOffsetToItemCache();
11212 outerCache->AddItem(size_t(context), innerCache);
11218 void Interpreter::CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo)
11220 CrstHolder ch(&s_methodCacheLock);
11222 ILOffsetToItemCache* cache = GetThisExecCache(true);
11223 // Insert, but if the item is already there, delete "mdcs" (which would have been owned
11225 // (Duplicate entries can happen because of recursive calls -- F makes a recursive call to F, and when it
11226 // returns wants to cache it, but the recursive call makes a furher recursive call, and caches that, so the
11227 // first call finds the iloffset already occupied.)
11228 if (!cache->AddItem(iloffset, CachedItem(callInfo)))
11234 CallSiteCacheData* Interpreter::GetCachedCallInfo(unsigned iloffset)
11236 CrstHolder ch(&s_methodCacheLock);
11238 ILOffsetToItemCache* cache = GetThisExecCache(false);
11239 if (cache == NULL) return NULL;
11242 if (cache->GetItem(iloffset, item))
11244 _ASSERTE_MSG(item.m_tag == CIK_CallSite, "Wrong cached item tag.");
11245 return item.m_value.m_callSiteInfo;
11253 void Interpreter::CacheInstanceField(unsigned iloffset, FieldDesc* fld)
11255 CrstHolder ch(&s_methodCacheLock);
11257 ILOffsetToItemCache* cache = GetThisExecCache(true);
11258 cache->AddItem(iloffset, CachedItem(fld));
11261 FieldDesc* Interpreter::GetCachedInstanceField(unsigned iloffset)
11263 CrstHolder ch(&s_methodCacheLock);
11265 ILOffsetToItemCache* cache = GetThisExecCache(false);
11266 if (cache == NULL) return NULL;
11269 if (cache->GetItem(iloffset, item))
11271 _ASSERTE_MSG(item.m_tag == CIK_InstanceField, "Wrong cached item tag.");
11272 return item.m_value.m_instanceField;
11280 void Interpreter::CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry)
11282 CrstHolder ch(&s_methodCacheLock);
11284 ILOffsetToItemCache* cache = GetThisExecCache(true);
11285 // If (say) a concurrent thread has beaten us to this, delete the entry (which otherwise would have
11286 // been owned by the cache).
11287 if (!cache->AddItem(iloffset, CachedItem(pEntry)))
11293 StaticFieldCacheEntry* Interpreter::GetCachedStaticField(unsigned iloffset)
11295 CrstHolder ch(&s_methodCacheLock);
11297 ILOffsetToItemCache* cache = GetThisExecCache(false);
11303 if (cache->GetItem(iloffset, item))
11305 _ASSERTE_MSG(item.m_tag == CIK_StaticField, "Wrong cached item tag.");
11306 return item.m_value.m_staticFieldAddr;
11315 void Interpreter::CacheClassHandle(unsigned iloffset, CORINFO_CLASS_HANDLE clsHnd)
11317 CrstHolder ch(&s_methodCacheLock);
11319 ILOffsetToItemCache* cache = GetThisExecCache(true);
11320 cache->AddItem(iloffset, CachedItem(clsHnd));
11323 CORINFO_CLASS_HANDLE Interpreter::GetCachedClassHandle(unsigned iloffset)
11325 CrstHolder ch(&s_methodCacheLock);
11327 ILOffsetToItemCache* cache = GetThisExecCache(false);
11333 if (cache->GetItem(iloffset, item))
11335 _ASSERTE_MSG(item.m_tag == CIK_ClassHandle, "Wrong cached item tag.");
11336 return item.m_value.m_clsHnd;
11343 #endif // DACCESS_COMPILE
11347 // Theses are not debug-only.
11348 ConfigMethodSet Interpreter::s_InterpretMeths;
11349 ConfigMethodSet Interpreter::s_InterpretMethsExclude;
11350 ConfigDWORD Interpreter::s_InterpretMethHashMin;
11351 ConfigDWORD Interpreter::s_InterpretMethHashMax;
11352 ConfigDWORD Interpreter::s_InterpreterJITThreshold;
11353 ConfigDWORD Interpreter::s_InterpreterDoLoopMethodsFlag;
11354 ConfigDWORD Interpreter::s_InterpreterUseCachingFlag;
11355 ConfigDWORD Interpreter::s_InterpreterLooseRulesFlag;
11357 bool Interpreter::s_InterpreterDoLoopMethods;
11358 bool Interpreter::s_InterpreterUseCaching;
11359 bool Interpreter::s_InterpreterLooseRules;
11361 CrstExplicitInit Interpreter::s_methodCacheLock;
11362 CrstExplicitInit Interpreter::s_interpStubToMDMapLock;
11364 // The static variables below are debug-only.
11366 LONG Interpreter::s_totalInvocations = 0;
11367 LONG Interpreter::s_totalInterpCalls = 0;
11368 LONG Interpreter::s_totalInterpCallsToGetters = 0;
11369 LONG Interpreter::s_totalInterpCallsToDeadSimpleGetters = 0;
11370 LONG Interpreter::s_totalInterpCallsToDeadSimpleGettersShortCircuited = 0;
11371 LONG Interpreter::s_totalInterpCallsToSetters = 0;
11372 LONG Interpreter::s_totalInterpCallsToIntrinsics = 0;
11373 LONG Interpreter::s_totalInterpCallsToIntrinsicsUnhandled = 0;
11375 LONG Interpreter::s_tokenResolutionOpportunities[RTK_Count] = {0, };
11376 LONG Interpreter::s_tokenResolutionCalls[RTK_Count] = {0, };
11377 const char* Interpreter::s_tokenResolutionKindNames[RTK_Count] =
11409 FILE* Interpreter::s_InterpreterLogFile = NULL;
11410 ConfigDWORD Interpreter::s_DumpInterpreterStubsFlag;
11411 ConfigDWORD Interpreter::s_TraceInterpreterEntriesFlag;
11412 ConfigDWORD Interpreter::s_TraceInterpreterILFlag;
11413 ConfigDWORD Interpreter::s_TraceInterpreterOstackFlag;
11414 ConfigDWORD Interpreter::s_TraceInterpreterVerboseFlag;
11415 ConfigDWORD Interpreter::s_TraceInterpreterJITTransitionFlag;
11416 ConfigDWORD Interpreter::s_InterpreterStubMin;
11417 ConfigDWORD Interpreter::s_InterpreterStubMax;
11418 #endif // INTERP_TRACING
11420 #if INTERP_ILINSTR_PROFILE
11421 unsigned short Interpreter::s_ILInstrCategories[512];
11423 int Interpreter::s_ILInstrExecs[256] = {0, };
11424 int Interpreter::s_ILInstrExecsByCategory[512] = {0, };
11425 int Interpreter::s_ILInstr2ByteExecs[Interpreter::CountIlInstr2Byte] = {0, };
11426 #if INTERP_ILCYCLE_PROFILE
11427 unsigned __int64 Interpreter::s_ILInstrCycles[512] = { 0, };
11428 unsigned __int64 Interpreter::s_ILInstrCyclesByCategory[512] = { 0, };
11430 unsigned __int64 Interpreter::s_callCycles = 0;
11431 unsigned Interpreter::s_calls = 0;
11433 void Interpreter::UpdateCycleCount()
11435 unsigned __int64 endCycles;
11436 bool b = CycleTimer::GetThreadCyclesS(&endCycles); _ASSERTE(b);
11437 if (m_instr != CEE_COUNT)
11439 unsigned __int64 delta = (endCycles - m_startCycles);
11440 if (m_exemptCycles > 0)
11442 delta = delta - m_exemptCycles;
11443 m_exemptCycles = 0;
11445 CycleTimer::InterlockedAddU64(&s_ILInstrCycles[m_instr], delta);
11447 // In any case, set the instruction to the current one, and record it's start time.
11448 m_instr = (*m_ILCodePtr);
11449 if (m_instr == CEE_PREFIX1) {
11450 m_instr = *(m_ILCodePtr + 1) + 0x100;
11452 b = CycleTimer::GetThreadCyclesS(&m_startCycles); _ASSERTE(b);
11455 #endif // INTERP_ILCYCLE_PROFILE
11456 #endif // INTERP_ILINSTR_PROFILE
11459 InterpreterMethodInfo** Interpreter::s_interpMethInfos = NULL;
11460 unsigned Interpreter::s_interpMethInfosAllocSize = 0;
11461 unsigned Interpreter::s_interpMethInfosCount = 0;
11463 bool Interpreter::TOSIsPtr()
11465 if (m_curStackHt == 0)
11468 return CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt - 1).ToCorInfoType());
11472 ConfigDWORD Interpreter::s_PrintPostMortemFlag;
11474 // InterpreterCache.
11475 template<typename Key, typename Val>
11476 InterpreterCache<Key,Val>::InterpreterCache() : m_pairs(NULL), m_allocSize(0), m_count(0)
11479 AddAllocBytes(sizeof(*this));
11485 static unsigned InterpreterCacheAllocBytes = 0;
11486 const unsigned KBYTE = 1024;
11487 const unsigned MBYTE = KBYTE*KBYTE;
11488 const unsigned InterpreterCacheAllocBytesIncrement = 16*KBYTE;
11489 static unsigned InterpreterCacheAllocBytesNextTarget = InterpreterCacheAllocBytesIncrement;
11491 template<typename Key, typename Val>
11492 void InterpreterCache<Key,Val>::AddAllocBytes(unsigned bytes)
11494 // Reinstate this code if you want to track bytes attributable to caching.
11496 InterpreterCacheAllocBytes += bytes;
11497 if (InterpreterCacheAllocBytes > InterpreterCacheAllocBytesNextTarget)
11499 printf("Total cache alloc = %d bytes.\n", InterpreterCacheAllocBytes);
11501 InterpreterCacheAllocBytesNextTarget += InterpreterCacheAllocBytesIncrement;
11507 template<typename Key, typename Val>
11508 void InterpreterCache<Key,Val>::EnsureCanInsert()
11510 if (m_count < m_allocSize)
11513 // Otherwise, must make room.
11514 if (m_allocSize == 0)
11516 _ASSERTE(m_count == 0);
11517 m_pairs = new KeyValPair[InitSize];
11518 m_allocSize = InitSize;
11520 AddAllocBytes(m_allocSize * sizeof(KeyValPair));
11525 unsigned short newSize = min(m_allocSize * 2, USHRT_MAX);
11527 KeyValPair* newPairs = new KeyValPair[newSize];
11528 memcpy(newPairs, m_pairs, m_count * sizeof(KeyValPair));
11530 m_pairs = newPairs;
11532 AddAllocBytes((newSize - m_allocSize) * sizeof(KeyValPair));
11534 m_allocSize = newSize;
11538 template<typename Key, typename Val>
11539 bool InterpreterCache<Key,Val>::AddItem(Key key, Val val)
11542 // Find the index to insert before.
11543 unsigned firstGreaterOrEqual = 0;
11544 for (; firstGreaterOrEqual < m_count; firstGreaterOrEqual++)
11546 if (m_pairs[firstGreaterOrEqual].m_key >= key)
11549 if (firstGreaterOrEqual < m_count && m_pairs[firstGreaterOrEqual].m_key == key)
11551 _ASSERTE(m_pairs[firstGreaterOrEqual].m_val == val);
11554 // Move everything starting at firstGreater up one index (if necessary)
11557 for (unsigned k = m_count-1; k >= firstGreaterOrEqual; k--)
11559 m_pairs[k + 1] = m_pairs[k];
11564 // Now we can insert the new element.
11565 m_pairs[firstGreaterOrEqual].m_key = key;
11566 m_pairs[firstGreaterOrEqual].m_val = val;
11571 template<typename Key, typename Val>
11572 bool InterpreterCache<Key,Val>::GetItem(Key key, Val& v)
11575 unsigned hi = m_count;
11576 // Invariant: we've determined that the pair for "iloffset", if present,
11577 // is in the index interval [lo, hi).
11580 unsigned mid = (hi + lo)/2;
11581 Key midKey = m_pairs[mid].m_key;
11584 v = m_pairs[mid].m_val;
11587 else if (key < midKey)
11593 _ASSERTE(key > midKey);
11597 // If we reach here without returning, it's not here.
11601 // TODO: add a header comment here describing this function.
11602 void Interpreter::OpStackNormalize()
11604 size_t largeStructStackOffset = 0;
11605 // Yes, I've written a quadratic algorithm here. I don't think it will matter in practice.
11606 for (unsigned i = 0; i < m_curStackHt; i++)
11608 InterpreterType tp = OpStackTypeGet(i);
11609 if (tp.IsLargeStruct(&m_interpCeeInfo))
11611 size_t sz = tp.Size(&m_interpCeeInfo);
11613 void* addr = OpStackGet<void*>(i);
11614 if (IsInLargeStructLocalArea(addr))
11616 // We're going to allocate space at the top for the new value, then copy everything above the current slot
11617 // up into that new space, then copy the value into the vacated space.
11618 // How much will we have to copy?
11619 size_t toCopy = m_largeStructOperandStackHt - largeStructStackOffset;
11621 // Allocate space for the new value.
11622 void* dummy = LargeStructOperandStackPush(sz);
11624 // Remember where we're going to write to.
11625 BYTE* fromAddr = m_largeStructOperandStack + largeStructStackOffset;
11626 BYTE* toAddr = fromAddr + sz;
11627 memcpy(toAddr, fromAddr, toCopy);
11629 // Now copy the local variable value.
11630 memcpy(fromAddr, addr, sz);
11631 OpStackSet<void*>(i, fromAddr);
11633 largeStructStackOffset += sz;
11636 // When we've normalized the stack, it contains no pointers to locals.
11637 m_orOfPushedInterpreterTypes = 0;
11642 // Code copied from eeinterface.cpp in "compiler". Should be common...
11644 static const char* CorInfoTypeNames[] = {
11670 const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName)
11680 const char* returnType = NULL;
11682 const char* className;
11683 const char* methodName = info->getMethodNameFromMetadata(hnd, &className, NULL, NULL);
11684 if (clsName != NULL)
11686 *clsName = className;
11692 /* Generating the full signature is a two-pass process. First we have to walk
11693 the components in order to assess the total size, then we allocate the buffer
11694 and copy the elements into it.
11697 /* Right now there is a race-condition in the EE, className can be NULL */
11699 /* initialize length with length of className and '.' */
11703 length = strlen(className) + 1;
11707 _ASSERTE(strlen("<NULL>.") == 7);
11711 /* add length of methodName and opening bracket */
11712 length += strlen(methodName) + 1;
11714 CORINFO_SIG_INFO sig;
11715 info->getMethodSig(hnd, &sig, nullptr);
11716 CORINFO_ARG_LIST_HANDLE argLst = sig.args;
11718 CORINFO_CLASS_HANDLE dummyCls;
11719 for (i = 0; i < sig.numArgs; i++)
11721 CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
11723 length += strlen(CorInfoTypeNames[type]);
11724 argLst = info->getArgNext(argLst);
11727 /* add ',' if there is more than one argument */
11729 if (sig.numArgs > 1)
11731 length += (sig.numArgs - 1);
11734 if (sig.retType != CORINFO_TYPE_VOID)
11736 returnType = CorInfoTypeNames[sig.retType];
11737 length += strlen(returnType) + 1; // don't forget the delimiter ':'
11740 /* add closing bracket and null terminator */
11744 char* retName = new char[length];
11746 /* Now generate the full signature string in the allocated buffer */
11750 strcpy_s(retName, length, className);
11751 strcat_s(retName, length, ":");
11755 strcpy_s(retName, length, "<NULL>.");
11758 strcat_s(retName, length, methodName);
11760 // append the signature
11761 strcat_s(retName, length, "(");
11765 for (i = 0; i < sig.numArgs; i++)
11767 CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
11768 strcat_s(retName, length, CorInfoTypeNames[type]);
11770 argLst = info->getArgNext(argLst);
11771 if (i + 1 < sig.numArgs)
11773 strcat_s(retName, length, ",");
11777 strcat_s(retName, length, ")");
11781 strcat_s(retName, length, ":");
11782 strcat_s(retName, length, returnType);
11785 _ASSERTE(strlen(retName) == length - 1);
11790 const char* Interpreter::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
11792 return ::eeGetMethodFullName(&m_interpCeeInfo, hnd);
11795 const char* ILOpNames[256*2];
11796 bool ILOpNamesInited = false;
11798 void InitILOpNames()
11800 if (!ILOpNamesInited)
11802 // Initialize the array.
11803 #define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) if (s1 == 0xfe || s1 == 0xff) { int ind ((unsigned(s1) << 8) + unsigned(s2)); ind -= 0xfe00; ILOpNames[ind] = s; }
11804 #include "opcode.def"
11806 ILOpNamesInited = true;
11809 const char* Interpreter::ILOp(BYTE* m_ILCodePtr)
11812 BYTE b = *m_ILCodePtr;
11815 return ILOpNames[*(m_ILCodePtr + 1)];
11819 return ILOpNames[(0x1 << 8) + b];
11822 const char* Interpreter::ILOp1Byte(unsigned short ilInstrVal)
11825 return ILOpNames[(0x1 << 8) + ilInstrVal];
11827 const char* Interpreter::ILOp2Byte(unsigned short ilInstrVal)
11830 return ILOpNames[ilInstrVal];
11833 void Interpreter::PrintOStack()
11835 if (m_curStackHt == 0)
11837 fprintf(GetLogFile(), " <empty>\n");
11841 for (unsigned k = 0; k < m_curStackHt; k++)
11843 CorInfoType cit = OpStackTypeGet(k).ToCorInfoType();
11844 _ASSERTE(IsStackNormalType(cit));
11845 fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
11846 PrintOStackValue(k);
11847 fprintf(GetLogFile(), "\n");
11850 fflush(GetLogFile());
11853 void Interpreter::PrintOStackValue(unsigned index)
11855 _ASSERTE_MSG(index < m_curStackHt, "precondition");
11856 InterpreterType it = OpStackTypeGet(index);
11857 if (it.IsLargeStruct(&m_interpCeeInfo))
11859 PrintValue(it, OpStackGet<BYTE*>(index));
11863 PrintValue(it, reinterpret_cast<BYTE*>(OpStackGetAddr(index, it.Size(&m_interpCeeInfo))));
11867 void Interpreter::PrintLocals()
11869 if (m_methInfo->m_numLocals == 0)
11871 fprintf(GetLogFile(), " <no locals>\n");
11875 for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
11877 InterpreterType it = m_methInfo->m_localDescs[i].m_type;
11878 CorInfoType cit = it.ToCorInfoType();
11879 void* localPtr = NULL;
11880 if (it.IsLargeStruct(&m_interpCeeInfo))
11882 void* structPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
11883 localPtr = *reinterpret_cast<void**>(structPtr);
11887 localPtr = ArgSlotEndiannessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
11889 fprintf(GetLogFile(), " loc%-4d: %10s: ", i, CorInfoTypeNames[cit]);
11890 PrintValue(it, reinterpret_cast<BYTE*>(localPtr));
11891 fprintf(GetLogFile(), "\n");
11894 fflush(GetLogFile());
11897 void Interpreter::PrintArgs()
11899 for (unsigned k = 0; k < m_methInfo->m_numArgs; k++)
11901 CorInfoType cit = GetArgType(k).ToCorInfoType();
11902 fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
11904 fprintf(GetLogFile(), "\n");
11906 fprintf(GetLogFile(), "\n");
11907 fflush(GetLogFile());
11910 void Interpreter::PrintArgValue(unsigned argNum)
11912 _ASSERTE_MSG(argNum < m_methInfo->m_numArgs, "precondition");
11913 InterpreterType it = GetArgType(argNum);
11914 PrintValue(it, GetArgAddr(argNum));
11917 // Note that this is used to print non-stack-normal values, so
11918 // it must handle all cases.
11919 void Interpreter::PrintValue(InterpreterType it, BYTE* valAddr)
11921 switch (it.ToCorInfoType())
11923 case CORINFO_TYPE_BOOL:
11924 fprintf(GetLogFile(), "%s", ((*reinterpret_cast<INT8*>(valAddr)) ? "true" : "false"));
11926 case CORINFO_TYPE_BYTE:
11927 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT8*>(valAddr));
11929 case CORINFO_TYPE_UBYTE:
11930 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT8*>(valAddr));
11933 case CORINFO_TYPE_SHORT:
11934 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT16*>(valAddr));
11936 case CORINFO_TYPE_USHORT: case CORINFO_TYPE_CHAR:
11937 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT16*>(valAddr));
11940 case CORINFO_TYPE_INT:
11941 fprintf(GetLogFile(), "%d", *reinterpret_cast<INT32*>(valAddr));
11943 case CORINFO_TYPE_UINT:
11944 fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT32*>(valAddr));
11947 case CORINFO_TYPE_NATIVEINT:
11949 INT64 val = static_cast<INT64>(*reinterpret_cast<NativeInt*>(valAddr));
11950 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11953 case CORINFO_TYPE_NATIVEUINT:
11955 UINT64 val = static_cast<UINT64>(*reinterpret_cast<NativeUInt*>(valAddr));
11956 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11960 case CORINFO_TYPE_BYREF:
11961 fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
11964 case CORINFO_TYPE_LONG:
11966 INT64 val = *reinterpret_cast<INT64*>(valAddr);
11967 fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
11970 case CORINFO_TYPE_ULONG:
11971 fprintf(GetLogFile(), "%lld", *reinterpret_cast<UINT64*>(valAddr));
11974 case CORINFO_TYPE_CLASS:
11976 Object* obj = *reinterpret_cast<Object**>(valAddr);
11979 fprintf(GetLogFile(), "null");
11984 fprintf(GetLogFile(), "0x%p (%s) [", obj, obj->GetMethodTable()->GetDebugClassName());
11986 fprintf(GetLogFile(), "0x%p (MT=0x%p) [", obj, obj->GetMethodTable());
11988 unsigned sz = obj->GetMethodTable()->GetBaseSize();
11989 BYTE* objBytes = reinterpret_cast<BYTE*>(obj);
11990 for (unsigned i = 0; i < sz; i++)
11994 fprintf(GetLogFile(), " ");
11996 fprintf(GetLogFile(), "0x%x", objBytes[i]);
11998 fprintf(GetLogFile(), "]");
12002 case CORINFO_TYPE_VALUECLASS:
12005 fprintf(GetLogFile(), "<%s>: [", m_interpCeeInfo.getClassNameFromMetadata(it.ToClassHandle(), NULL));
12006 unsigned sz = getClassSize(it.ToClassHandle());
12007 for (unsigned i = 0; i < sz; i++)
12011 fprintf(GetLogFile(), " ");
12013 fprintf(GetLogFile(), "0x%02x", valAddr[i]);
12015 fprintf(GetLogFile(), "]");
12018 case CORINFO_TYPE_REFANY:
12019 fprintf(GetLogFile(), "<refany>");
12021 case CORINFO_TYPE_FLOAT:
12022 fprintf(GetLogFile(), "%f", *reinterpret_cast<float*>(valAddr));
12024 case CORINFO_TYPE_DOUBLE:
12025 fprintf(GetLogFile(), "%g", *reinterpret_cast<double*>(valAddr));
12027 case CORINFO_TYPE_PTR:
12028 fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
12031 _ASSERTE_MSG(false, "Unknown type in PrintValue.");
12035 #endif // INTERP_TRACING
12038 void Interpreter::AddInterpMethInfo(InterpreterMethodInfo* methInfo)
12040 typedef InterpreterMethodInfo* InterpreterMethodInfoPtr;
12041 // TODO: this requires synchronization.
12042 const unsigned InitSize = 128;
12043 if (s_interpMethInfos == NULL)
12045 s_interpMethInfos = new InterpreterMethodInfoPtr[InitSize];
12046 s_interpMethInfosAllocSize = InitSize;
12048 if (s_interpMethInfosAllocSize == s_interpMethInfosCount)
12050 unsigned newSize = s_interpMethInfosAllocSize * 2;
12051 InterpreterMethodInfoPtr* tmp = new InterpreterMethodInfoPtr[newSize];
12052 memcpy(tmp, s_interpMethInfos, s_interpMethInfosCount * sizeof(InterpreterMethodInfoPtr));
12053 delete[] s_interpMethInfos;
12054 s_interpMethInfos = tmp;
12055 s_interpMethInfosAllocSize = newSize;
12057 s_interpMethInfos[s_interpMethInfosCount] = methInfo;
12058 s_interpMethInfosCount++;
12061 int _cdecl Interpreter::CompareMethInfosByInvocations(const void* mi0in, const void* mi1in)
12063 const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
12064 const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
12065 if (mi0->m_invocations < mi1->m_invocations)
12069 else if (mi0->m_invocations == mi1->m_invocations)
12075 _ASSERTE(mi0->m_invocations > mi1->m_invocations);
12081 int _cdecl Interpreter::CompareMethInfosByILInstrs(const void* mi0in, const void* mi1in)
12083 const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
12084 const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
12085 if (mi0->m_totIlInstructionsExeced < mi1->m_totIlInstructionsExeced) return 1;
12086 else if (mi0->m_totIlInstructionsExeced == mi1->m_totIlInstructionsExeced) return 0;
12089 _ASSERTE(mi0->m_totIlInstructionsExeced > mi1->m_totIlInstructionsExeced);
12093 #endif // INTERP_PROFILE
12096 const int MIL = 1000000;
12098 // Leaving this disabled for now.
12100 unsigned __int64 ForceSigWalkCycles = 0;
12103 void Interpreter::PrintPostMortemData()
12105 if (s_PrintPostMortemFlag.val(CLRConfig::INTERNAL_InterpreterPrintPostMortem) == 0)
12111 // Let's print two things: the number of methods that are 0-10, or more, and
12112 // For each 10% of methods, cumulative % of invocations they represent. By 1% for last 10%.
12114 // First one doesn't require any sorting.
12115 const unsigned HistoMax = 11;
12116 unsigned histo[HistoMax];
12117 unsigned numExecs[HistoMax];
12118 for (unsigned k = 0; k < HistoMax; k++)
12120 histo[k] = 0; numExecs[k] = 0;
12122 for (unsigned k = 0; k < s_interpMethInfosCount; k++)
12124 unsigned invokes = s_interpMethInfos[k]->m_invocations;
12125 if (invokes > HistoMax - 1)
12127 invokes = HistoMax - 1;
12130 numExecs[invokes] += s_interpMethInfos[k]->m_invocations;
12133 fprintf(GetLogFile(), "Histogram of method executions:\n");
12134 fprintf(GetLogFile(), " # of execs | # meths (%%) | cum %% | %% cum execs\n");
12135 fprintf(GetLogFile(), " -------------------------------------------------------\n");
12136 float fTotMeths = float(s_interpMethInfosCount);
12137 float fTotExecs = float(s_totalInvocations);
12138 float numPct = 0.0f;
12139 float numExecPct = 0.0f;
12140 for (unsigned k = 0; k < HistoMax; k++)
12142 fprintf(GetLogFile(), " %10d", k);
12145 fprintf(GetLogFile(), "+ ");
12149 fprintf(GetLogFile(), " ");
12151 float pct = float(histo[k])*100.0f/fTotMeths;
12153 float execPct = float(numExecs[k])*100.0f/fTotExecs;
12154 numExecPct += execPct;
12155 fprintf(GetLogFile(), "| %7d (%5.2f%%) | %6.2f%% | %6.2f%%\n", histo[k], pct, numPct, numExecPct);
12158 // This sorts them in ascending order of number of invocations.
12159 qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByInvocations);
12161 fprintf(GetLogFile(), "\nFor methods sorted in ascending # of executions order, cumulative %% of executions:\n");
12162 if (s_totalInvocations > 0)
12164 fprintf(GetLogFile(), " %% of methods | max execs | cum %% of execs\n");
12165 fprintf(GetLogFile(), " ------------------------------------------\n");
12166 unsigned methNum = 0;
12167 unsigned nNumExecs = 0;
12168 float totExecsF = float(s_totalInvocations);
12169 for (unsigned k = 10; k < 100; k += 10)
12171 unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
12172 unsigned targLess1 = (targ > 0 ? targ - 1 : 0);
12173 while (methNum < targ)
12175 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12178 float pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12180 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12185 for (; k < 100; k++)
12187 unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
12188 while (methNum < targ)
12190 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12193 pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12195 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12199 targ = s_interpMethInfosCount;
12200 while (methNum < targ)
12202 nNumExecs += s_interpMethInfos[methNum]->m_invocations;
12205 pctExecs = float(nNumExecs) * 100.0f / totExecsF;
12206 fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
12211 fprintf(GetLogFile(), "\nTotal number of calls from interpreted code: %d.\n", s_totalInterpCalls);
12212 fprintf(GetLogFile(), " Also, %d are intrinsics; %d of these are not currently handled intrinsically.\n",
12213 s_totalInterpCallsToIntrinsics, s_totalInterpCallsToIntrinsicsUnhandled);
12214 fprintf(GetLogFile(), " Of these, %d to potential property getters (%d of these dead simple), %d to setters.\n",
12215 s_totalInterpCallsToGetters, s_totalInterpCallsToDeadSimpleGetters, s_totalInterpCallsToSetters);
12216 fprintf(GetLogFile(), " Of the dead simple getter calls, %d have been short-circuited.\n",
12217 s_totalInterpCallsToDeadSimpleGettersShortCircuited);
12219 fprintf(GetLogFile(), "\nToken resolutions by category:\n");
12220 fprintf(GetLogFile(), "Category | opportunities | calls | %%\n");
12221 fprintf(GetLogFile(), "---------------------------------------------------\n");
12222 for (unsigned i = RTK_Undefined; i < RTK_Count; i++)
12225 if (s_tokenResolutionOpportunities[i] > 0)
12226 pct = 100.0f * float(s_tokenResolutionCalls[i]) / float(s_tokenResolutionOpportunities[i]);
12227 fprintf(GetLogFile(), "%12s | %15d | %9d | %6.2f%%\n",
12228 s_tokenResolutionKindNames[i], s_tokenResolutionOpportunities[i], s_tokenResolutionCalls[i], pct);
12232 fprintf(GetLogFile(), "Information on num of execs:\n");
12234 UINT64 totILInstrs = 0;
12235 for (unsigned i = 0; i < s_interpMethInfosCount; i++) totILInstrs += s_interpMethInfos[i]->m_totIlInstructionsExeced;
12237 float totILInstrsF = float(totILInstrs);
12239 fprintf(GetLogFile(), "\nTotal instructions = %lld.\n", totILInstrs);
12240 fprintf(GetLogFile(), "\nTop <=10 methods by # of IL instructions executed.\n");
12241 fprintf(GetLogFile(), "%10s | %9s | %10s | %10s | %8s | %s\n", "tot execs", "# invokes", "code size", "ratio", "% of tot", "Method");
12242 fprintf(GetLogFile(), "----------------------------------------------------------------------------\n");
12244 qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByILInstrs);
12246 for (unsigned i = 0; i < min(10, s_interpMethInfosCount); i++)
12248 unsigned ilCodeSize = unsigned(s_interpMethInfos[i]->m_ILCodeEnd - s_interpMethInfos[i]->m_ILCode);
12249 fprintf(GetLogFile(), "%10lld | %9d | %10d | %10.2f | %8.2f%% | %s:%s\n",
12250 s_interpMethInfos[i]->m_totIlInstructionsExeced,
12251 s_interpMethInfos[i]->m_invocations,
12253 float(s_interpMethInfos[i]->m_totIlInstructionsExeced) / float(ilCodeSize),
12254 float(s_interpMethInfos[i]->m_totIlInstructionsExeced) * 100.0f / totILInstrsF,
12255 s_interpMethInfos[i]->m_clsName,
12256 s_interpMethInfos[i]->m_methName);
12258 #endif // INTERP_PROFILE
12261 #if INTERP_ILINSTR_PROFILE
12262 fprintf(GetLogFile(), "\nIL instruction profiling:\n");
12263 // First, classify by categories.
12264 unsigned totInstrs = 0;
12265 #if INTERP_ILCYCLE_PROFILE
12266 unsigned __int64 totCycles = 0;
12267 unsigned __int64 perMeasurementOverhead = CycleTimer::QueryOverhead();
12268 #endif // INTERP_ILCYCLE_PROFILE
12269 for (unsigned i = 0; i < 256; i++)
12271 s_ILInstrExecsByCategory[s_ILInstrCategories[i]] += s_ILInstrExecs[i];
12272 totInstrs += s_ILInstrExecs[i];
12273 #if INTERP_ILCYCLE_PROFILE
12274 unsigned __int64 cycles = s_ILInstrCycles[i];
12275 if (cycles > s_ILInstrExecs[i] * perMeasurementOverhead) cycles -= s_ILInstrExecs[i] * perMeasurementOverhead;
12277 s_ILInstrCycles[i] = cycles;
12278 s_ILInstrCyclesByCategory[s_ILInstrCategories[i]] += cycles;
12279 totCycles += cycles;
12280 #endif // INTERP_ILCYCLE_PROFILE
12282 unsigned totInstrs2Byte = 0;
12283 #if INTERP_ILCYCLE_PROFILE
12284 unsigned __int64 totCycles2Byte = 0;
12285 #endif // INTERP_ILCYCLE_PROFILE
12286 for (unsigned i = 0; i < CountIlInstr2Byte; i++)
12288 unsigned ind = 0x100 + i;
12289 s_ILInstrExecsByCategory[s_ILInstrCategories[ind]] += s_ILInstr2ByteExecs[i];
12290 totInstrs += s_ILInstr2ByteExecs[i];
12291 totInstrs2Byte += s_ILInstr2ByteExecs[i];
12292 #if INTERP_ILCYCLE_PROFILE
12293 unsigned __int64 cycles = s_ILInstrCycles[ind];
12294 if (cycles > s_ILInstrExecs[ind] * perMeasurementOverhead) cycles -= s_ILInstrExecs[ind] * perMeasurementOverhead;
12296 s_ILInstrCycles[i] = cycles;
12297 s_ILInstrCyclesByCategory[s_ILInstrCategories[ind]] += cycles;
12298 totCycles += cycles;
12299 totCycles2Byte += cycles;
12300 #endif // INTERP_ILCYCLE_PROFILE
12303 // Now sort the categories by # of occurrences.
12305 InstrExecRecord ieps[256 + CountIlInstr2Byte];
12306 for (unsigned short i = 0; i < 256; i++)
12308 ieps[i].m_instr = i; ieps[i].m_is2byte = false; ieps[i].m_execs = s_ILInstrExecs[i];
12309 #if INTERP_ILCYCLE_PROFILE
12310 if (i == CEE_BREAK)
12312 ieps[i].m_cycles = 0;
12313 continue; // Don't count these if they occur...
12315 ieps[i].m_cycles = s_ILInstrCycles[i];
12316 _ASSERTE((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
12317 #endif // INTERP_ILCYCLE_PROFILE
12319 for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
12322 ieps[ind].m_instr = i; ieps[ind].m_is2byte = true; ieps[ind].m_execs = s_ILInstr2ByteExecs[i];
12323 #if INTERP_ILCYCLE_PROFILE
12324 ieps[ind].m_cycles = s_ILInstrCycles[ind];
12325 _ASSERTE((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
12326 #endif // INTERP_ILCYCLE_PROFILE
12329 qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
12331 fprintf(GetLogFile(), "\nInstructions (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
12332 #if INTERP_ILCYCLE_PROFILE
12333 if (s_callCycles > s_calls * perMeasurementOverhead) s_callCycles -= s_calls * perMeasurementOverhead;
12334 else s_callCycles = 0;
12335 fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte, %lld calls (%d calls, %10.2f cyc/call):\n",
12336 totCycles/MIL, (totCycles - totCycles2Byte)/MIL, s_callCycles/MIL, s_calls, float(s_callCycles)/float(s_calls));
12338 extern unsigned __int64 MetaSigCtor1Cycles;
12339 fprintf(GetLogFile(), " MetaSig(MethodDesc, TypeHandle) ctor: %lld MCycles.\n",
12340 MetaSigCtor1Cycles/MIL);
12341 fprintf(GetLogFile(), " ForceSigWalk: %lld MCycles.\n",
12342 ForceSigWalkCycles/MIL);
12344 #endif // INTERP_ILCYCLE_PROFILE
12346 PrintILProfile(&ieps[0], totInstrs
12347 #if INTERP_ILCYCLE_PROFILE
12349 #endif // INTERP_ILCYCLE_PROFILE
12352 fprintf(GetLogFile(), "\nInstructions grouped by category: (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
12353 #if INTERP_ILCYCLE_PROFILE
12354 fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte):\n",
12355 totCycles/MIL, (totCycles - totCycles2Byte)/MIL);
12356 #endif // INTERP_ILCYCLE_PROFILE
12357 for (unsigned short i = 0; i < 256 + CountIlInstr2Byte; i++)
12361 ieps[i].m_instr = i; ieps[i].m_is2byte = false;
12365 ieps[i].m_instr = i - 256; ieps[i].m_is2byte = true;
12367 ieps[i].m_execs = s_ILInstrExecsByCategory[i];
12368 #if INTERP_ILCYCLE_PROFILE
12369 ieps[i].m_cycles = s_ILInstrCyclesByCategory[i];
12370 #endif // INTERP_ILCYCLE_PROFILE
12372 qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
12373 PrintILProfile(&ieps[0], totInstrs
12374 #if INTERP_ILCYCLE_PROFILE
12376 #endif // INTERP_ILCYCLE_PROFILE
12380 // Early debugging code.
12381 fprintf(GetLogFile(), "\nInstructions grouped category mapping:\n", totInstrs, totInstrs - totInstrs2Byte);
12382 for (unsigned short i = 0; i < 256; i++)
12384 unsigned short cat = s_ILInstrCategories[i];
12386 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp1Byte(cat));
12388 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp2Byte(cat - 256));
12391 for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
12393 unsigned ind = 256 + i;
12394 unsigned short cat = s_ILInstrCategories[ind];
12396 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp1Byte(cat));
12398 fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp2Byte(cat - 256));
12402 #endif // INTERP_ILINSTR_PROFILE
12405 #if INTERP_ILINSTR_PROFILE
12407 const int K = 1000;
12410 void Interpreter::PrintILProfile(Interpreter::InstrExecRecord *recs, unsigned int totInstrs
12411 #if INTERP_ILCYCLE_PROFILE
12412 , unsigned __int64 totCycles
12413 #endif // INTERP_ILCYCLE_PROFILE
12416 float fTotInstrs = float(totInstrs);
12417 fprintf(GetLogFile(), "Instruction | execs | %% | cum %%");
12418 #if INTERP_ILCYCLE_PROFILE
12419 float fTotCycles = float(totCycles);
12420 fprintf(GetLogFile(), "| KCycles | %% | cum %% | cyc/inst\n");
12421 fprintf(GetLogFile(), "--------------------------------------------------"
12422 "-----------------------------------------\n");
12424 fprintf(GetLogFile(), "\n-------------------------------------------\n");
12426 float numPct = 0.0f;
12427 #if INTERP_ILCYCLE_PROFILE
12428 float numCyclePct = 0.0f;
12429 #endif // INTERP_ILCYCLE_PROFILE
12430 for (unsigned i = 0; i < 256 + CountIlInstr2Byte; i++)
12433 if (totInstrs > 0) pct = float(recs[i].m_execs) * 100.0f / fTotInstrs;
12435 if (recs[i].m_execs > 0)
12437 fprintf(GetLogFile(), "%12s | %9d | %6.2f%% | %6.2f%%",
12438 (recs[i].m_is2byte ? ILOp2Byte(recs[i].m_instr) : ILOp1Byte(recs[i].m_instr)), recs[i].m_execs,
12440 #if INTERP_ILCYCLE_PROFILE
12442 if (totCycles > 0) pct = float(recs[i].m_cycles) * 100.0f / fTotCycles;
12443 numCyclePct += pct;
12444 float cyclesPerInst = float(recs[i].m_cycles) / float(recs[i].m_execs);
12445 fprintf(GetLogFile(), "| %12llu | %6.2f%% | %6.2f%% | %11.2f",
12446 recs[i].m_cycles/K, pct, numCyclePct, cyclesPerInst);
12447 #endif // INTERP_ILCYCLE_PROFILE
12448 fprintf(GetLogFile(), "\n");
12452 #endif // INTERP_ILINSTR_PROFILE
12454 #endif // FEATURE_INTERPRETER