Merge pull request #8601 from pgavlin/gh7963
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492 #ifdef FEATURE_CORECLR
1493     const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1494 #else
1495     // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496     const bool        isRefAny    = (structHnd == impGetRefAnyClass());
1497     const bool        hasGCPtrs   = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1498 #endif
1499
1500 #ifdef FEATURE_SIMD
1501     // Check to see if this is a SIMD type.
1502     if (featureSIMD && !hasGCPtrs)
1503     {
1504         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505
1506         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507         {
1508             unsigned int sizeBytes;
1509             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510             if (simdBaseType != TYP_UNKNOWN)
1511             {
1512                 assert(sizeBytes == originalSize);
1513                 structType = getSIMDTypeForSize(sizeBytes);
1514                 if (pSimdBaseType != nullptr)
1515                 {
1516                     *pSimdBaseType = simdBaseType;
1517                 }
1518 #ifdef _TARGET_AMD64_
1519                 // Amd64: also indicate that we use floating point registers
1520                 compFloatingPointUsed = true;
1521 #endif
1522             }
1523         }
1524     }
1525 #endif // FEATURE_SIMD
1526
1527     // Fetch GC layout info if requested
1528     if (gcLayout != nullptr)
1529     {
1530         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1531
1532         // Verify that the quick test up above via the class attributes gave a
1533         // safe view of the type's GCness.
1534         //
1535         // Note there are cases where hasGCPtrs is true but getClassGClayout
1536         // does not report any gc fields.
1537         assert(hasGCPtrs || (numGCVars == 0));
1538
1539         if (pNumGCVars != nullptr)
1540         {
1541             *pNumGCVars = numGCVars;
1542         }
1543     }
1544     else
1545     {
1546         // Can't safely ask for number of GC pointers without also
1547         // asking for layout.
1548         assert(pNumGCVars == nullptr);
1549     }
1550
1551     return structType;
1552 }
1553
1554 //****************************************************************************
1555 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1557 //
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1559                                       CORINFO_CLASS_HANDLE structHnd,
1560                                       unsigned             curLevel,
1561                                       bool                 forceNormalization /*=false*/)
1562 {
1563     assert(forceNormalization || varTypeIsStruct(structVal));
1564     assert(structHnd != NO_CLASS_HANDLE);
1565     var_types structType = structVal->TypeGet();
1566     bool      makeTemp   = false;
1567     if (structType == TYP_STRUCT)
1568     {
1569         structType = impNormStructType(structHnd);
1570     }
1571     bool                 alreadyNormalized = false;
1572     GenTreeLclVarCommon* structLcl         = nullptr;
1573
1574     genTreeOps oper = structVal->OperGet();
1575     switch (oper)
1576     {
1577         // GT_RETURN and GT_MKREFANY don't capture the handle.
1578         case GT_RETURN:
1579             break;
1580         case GT_MKREFANY:
1581             alreadyNormalized = true;
1582             break;
1583
1584         case GT_CALL:
1585             structVal->gtCall.gtRetClsHnd = structHnd;
1586             makeTemp                      = true;
1587             break;
1588
1589         case GT_RET_EXPR:
1590             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1591             makeTemp                         = true;
1592             break;
1593
1594         case GT_ARGPLACE:
1595             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1596             break;
1597
1598         case GT_INDEX:
1599             // This will be transformed to an OBJ later.
1600             alreadyNormalized                    = true;
1601             structVal->gtIndex.gtStructElemClass = structHnd;
1602             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1603             break;
1604
1605         case GT_FIELD:
1606             // Wrap it in a GT_OBJ.
1607             structVal->gtType = structType;
1608             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1609             break;
1610
1611         case GT_LCL_VAR:
1612         case GT_LCL_FLD:
1613             structLcl = structVal->AsLclVarCommon();
1614             // Wrap it in a GT_OBJ.
1615             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1616             __fallthrough;
1617
1618         case GT_OBJ:
1619         case GT_BLK:
1620         case GT_DYN_BLK:
1621         case GT_ASG:
1622             // These should already have the appropriate type.
1623             assert(structVal->gtType == structType);
1624             alreadyNormalized = true;
1625             break;
1626
1627         case GT_IND:
1628             assert(structVal->gtType == structType);
1629             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630             alreadyNormalized = true;
1631             break;
1632
1633 #ifdef FEATURE_SIMD
1634         case GT_SIMD:
1635             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1636             break;
1637 #endif // FEATURE_SIMD
1638
1639         case GT_COMMA:
1640         {
1641             // The second thing is the block node.
1642             GenTree* blockNode = structVal->gtOp.gtOp2;
1643             assert(blockNode->gtType == structType);
1644             // It had better be a block node - any others should not occur here.
1645             assert(blockNode->OperIsBlk());
1646
1647             // Sink the GT_COMMA below the blockNode addr.
1648             GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1649             assert(blockNodeAddr->gtType == TYP_BYREF);
1650             GenTree* commaNode    = structVal;
1651             commaNode->gtType     = TYP_BYREF;
1652             commaNode->gtOp.gtOp2 = blockNodeAddr;
1653             blockNode->gtOp.gtOp1 = commaNode;
1654             structVal             = blockNode;
1655             alreadyNormalized     = true;
1656         }
1657         break;
1658
1659         default:
1660             assert(!"Unexpected node in impNormStructVal()");
1661             break;
1662     }
1663     structVal->gtType  = structType;
1664     GenTree* structObj = structVal;
1665
1666     if (!alreadyNormalized || forceNormalization)
1667     {
1668         if (makeTemp)
1669         {
1670             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1671
1672             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1673
1674             // The structVal is now the temp itself
1675
1676             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1677             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1678             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1679         }
1680         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1681         {
1682             // Wrap it in a GT_OBJ
1683             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1684         }
1685     }
1686
1687     if (structLcl != nullptr)
1688     {
1689         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1690         // so we don't set GTF_EXCEPT here.
1691         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1692         {
1693             structObj->gtFlags &= ~GTF_GLOB_REF;
1694         }
1695     }
1696     else
1697     {
1698         // In general a OBJ is an indirection and could raise an exception.
1699         structObj->gtFlags |= GTF_EXCEPT;
1700     }
1701     return (structObj);
1702 }
1703
1704 /******************************************************************************/
1705 // Given a type token, generate code that will evaluate to the correct
1706 // handle representation of that token (type handle, field handle, or method handle)
1707 //
1708 // For most cases, the handle is determined at compile-time, and the code
1709 // generated is simply an embedded handle.
1710 //
1711 // Run-time lookup is required if the enclosing method is shared between instantiations
1712 // and the token refers to formal type parameters whose instantiation is not known
1713 // at compile-time.
1714 //
1715 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1716                                       BOOL*                   pRuntimeLookup /* = NULL */,
1717                                       BOOL                    mustRestoreHandle /* = FALSE */,
1718                                       BOOL                    importParent /* = FALSE */)
1719 {
1720     assert(!fgGlobalMorph);
1721
1722     CORINFO_GENERICHANDLE_RESULT embedInfo;
1723     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1724
1725     if (pRuntimeLookup)
1726     {
1727         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1728     }
1729
1730     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1731     {
1732         switch (embedInfo.handleType)
1733         {
1734             case CORINFO_HANDLETYPE_CLASS:
1735                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1736                 break;
1737
1738             case CORINFO_HANDLETYPE_METHOD:
1739                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1740                 break;
1741
1742             case CORINFO_HANDLETYPE_FIELD:
1743                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1744                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1745                 break;
1746
1747             default:
1748                 break;
1749         }
1750     }
1751
1752     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1753                            embedInfo.compileTimeHandle);
1754 }
1755
1756 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1757                                      CORINFO_LOOKUP*         pLookup,
1758                                      unsigned                handleFlags,
1759                                      void*                   compileTimeHandle)
1760 {
1761     if (!pLookup->lookupKind.needsRuntimeLookup)
1762     {
1763         // No runtime lookup is required.
1764         // Access is direct or memory-indirect (of a fixed address) reference
1765
1766         CORINFO_GENERIC_HANDLE handle       = nullptr;
1767         void*                  pIndirection = nullptr;
1768         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1769
1770         if (pLookup->constLookup.accessType == IAT_VALUE)
1771         {
1772             handle = pLookup->constLookup.handle;
1773         }
1774         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1775         {
1776             pIndirection = pLookup->constLookup.addr;
1777         }
1778         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1779     }
1780     else if (compIsForInlining())
1781     {
1782         // Don't import runtime lookups when inlining
1783         // Inlining has to be aborted in such a case
1784         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1785         return nullptr;
1786     }
1787     else
1788     {
1789         // Need to use dictionary-based access which depends on the typeContext
1790         // which is only available at runtime, not at compile-time.
1791
1792         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1793     }
1794 }
1795
1796 #ifdef FEATURE_READYTORUN_COMPILER
1797 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1798                                                unsigned              handleFlags,
1799                                                void*                 compileTimeHandle)
1800 {
1801     CORINFO_GENERIC_HANDLE handle       = nullptr;
1802     void*                  pIndirection = nullptr;
1803     assert(pLookup->accessType != IAT_PPVALUE);
1804
1805     if (pLookup->accessType == IAT_VALUE)
1806     {
1807         handle = pLookup->handle;
1808     }
1809     else if (pLookup->accessType == IAT_PVALUE)
1810     {
1811         pIndirection = pLookup->addr;
1812     }
1813     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1814 }
1815
1816 GenTreePtr Compiler::impReadyToRunHelperToTree(
1817     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1818     CorInfoHelpFunc         helper,
1819     var_types               type,
1820     GenTreeArgList*         args /* =NULL*/,
1821     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1822 {
1823     CORINFO_CONST_LOOKUP lookup;
1824 #if COR_JIT_EE_VERSION > 460
1825     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1826     {
1827         return nullptr;
1828     }
1829 #else
1830     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1831 #endif
1832
1833     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1834
1835     op1->gtCall.setEntryPoint(lookup);
1836
1837     return op1;
1838 }
1839 #endif
1840
1841 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1842 {
1843     GenTreePtr op1 = nullptr;
1844
1845     switch (pCallInfo->kind)
1846     {
1847         case CORINFO_CALL:
1848             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1849
1850 #ifdef FEATURE_READYTORUN_COMPILER
1851             if (opts.IsReadyToRun())
1852             {
1853                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1854                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1855                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1856             }
1857             else
1858             {
1859                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1860             }
1861 #endif
1862             break;
1863
1864         case CORINFO_CALL_CODE_POINTER:
1865             if (compIsForInlining())
1866             {
1867                 // Don't import runtime lookups when inlining
1868                 // Inlining has to be aborted in such a case
1869                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1870                 return nullptr;
1871             }
1872
1873             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1874             break;
1875
1876         default:
1877             noway_assert(!"unknown call kind");
1878             break;
1879     }
1880
1881     return op1;
1882 }
1883
1884 //------------------------------------------------------------------------
1885 // getRuntimeContextTree: find pointer to context for runtime lookup.
1886 //
1887 // Arguments:
1888 //    kind - lookup kind.
1889 //
1890 // Return Value:
1891 //    Return GenTree pointer to generic shared context.
1892 //
1893 // Notes:
1894 //    Reports about generic context using.
1895
1896 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1897 {
1898     GenTreePtr ctxTree = nullptr;
1899
1900     // Collectible types requires that for shared generic code, if we use the generic context parameter
1901     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1902     // context parameter is this that we don't need the eager reporting logic.)
1903     lvaGenericsContextUsed = true;
1904
1905     if (kind == CORINFO_LOOKUP_THISOBJ)
1906     {
1907         // this Object
1908         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1909
1910         // Vtable pointer of this object
1911         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1912         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1913         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1914     }
1915     else
1916     {
1917         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1918
1919         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1920     }
1921     return ctxTree;
1922 }
1923
1924 /*****************************************************************************/
1925 /* Import a dictionary lookup to access a handle in code shared between
1926    generic instantiations.
1927    The lookup depends on the typeContext which is only available at
1928    runtime, and not at compile-time.
1929    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1930    The cases are:
1931
1932    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1933       instantiation-specific handle, and the tokens to lookup the handle.
1934    2. pLookup->indirections != CORINFO_USEHELPER :
1935       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1936           to get the handle.
1937       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1938           If it is non-NULL, it is the handle required. Else, call a helper
1939           to lookup the handle.
1940  */
1941
1942 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1943                                             CORINFO_LOOKUP*         pLookup,
1944                                             void*                   compileTimeHandle)
1945 {
1946
1947     // This method can only be called from the importer instance of the Compiler.
1948     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1949     assert(!compIsForInlining());
1950
1951     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1952
1953 #ifdef FEATURE_READYTORUN_COMPILER
1954     if (opts.IsReadyToRun())
1955     {
1956         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1957                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1958     }
1959 #endif
1960
1961     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1962     // It's available only via the run-time helper function
1963     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1964     {
1965         GenTreeArgList* helperArgs =
1966             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1967                                                       nullptr, compileTimeHandle));
1968
1969         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1970     }
1971
1972     // Slot pointer
1973     GenTreePtr slotPtrTree = ctxTree;
1974
1975     if (pRuntimeLookup->testForNull)
1976     {
1977         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1978                                    nullptr DEBUGARG("impRuntimeLookup slot"));
1979     }
1980
1981     // Applied repeated indirections
1982     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1983     {
1984         if (i != 0)
1985         {
1986             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1987             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1988             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1989         }
1990         if (pRuntimeLookup->offsets[i] != 0)
1991         {
1992             slotPtrTree =
1993                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1994         }
1995     }
1996
1997     // No null test required
1998     if (!pRuntimeLookup->testForNull)
1999     {
2000         if (pRuntimeLookup->indirections == 0)
2001         {
2002             return slotPtrTree;
2003         }
2004
2005         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2006         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2007
2008         if (!pRuntimeLookup->testForFixup)
2009         {
2010             return slotPtrTree;
2011         }
2012
2013         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2014
2015         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2016                                       nullptr DEBUGARG("impRuntimeLookup test"));
2017         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2018
2019         // Use a GT_AND to check for the lowest bit and indirect if it is set
2020         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2021         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2022         relop->gtFlags |= GTF_RELOP_QMARK;
2023
2024         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2025                            nullptr DEBUGARG("impRuntimeLookup indir"));
2026         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2027         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2028         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2029
2030         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2031
2032         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2033         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2034         return gtNewLclvNode(tmp, TYP_I_IMPL);
2035     }
2036
2037     assert(pRuntimeLookup->indirections != 0);
2038
2039     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2040
2041     // Extract the handle
2042     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043     handle->gtFlags |= GTF_IND_NONFAULTING;
2044
2045     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2046                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2047
2048     // Call to helper
2049     GenTreeArgList* helperArgs =
2050         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2051                                                   compileTimeHandle));
2052     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2053
2054     // Check for null and possibly call helper
2055     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2056     relop->gtFlags |= GTF_RELOP_QMARK;
2057
2058     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2059                                                          gtNewNothingNode(), // do nothing if nonnull
2060                                                          helperCall);
2061
2062     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2063
2064     unsigned tmp;
2065     if (handleCopy->IsLocal())
2066     {
2067         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2068     }
2069     else
2070     {
2071         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2072     }
2073
2074     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2075     return gtNewLclvNode(tmp, TYP_I_IMPL);
2076 }
2077
2078 /******************************************************************************
2079  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2080  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2081  *     else, grab a new temp.
2082  *  For structs (which can be pushed on the stack using obj, etc),
2083  *  special handling is needed
2084  */
2085
2086 struct RecursiveGuard
2087 {
2088 public:
2089     RecursiveGuard()
2090     {
2091         m_pAddress = nullptr;
2092     }
2093
2094     ~RecursiveGuard()
2095     {
2096         if (m_pAddress)
2097         {
2098             *m_pAddress = false;
2099         }
2100     }
2101
2102     void Init(bool* pAddress, bool bInitialize)
2103     {
2104         assert(pAddress && *pAddress == false && "Recursive guard violation");
2105         m_pAddress = pAddress;
2106
2107         if (bInitialize)
2108         {
2109             *m_pAddress = true;
2110         }
2111     }
2112
2113 protected:
2114     bool* m_pAddress;
2115 };
2116
2117 bool Compiler::impSpillStackEntry(unsigned level,
2118                                   unsigned tnum
2119 #ifdef DEBUG
2120                                   ,
2121                                   bool        bAssertOnRecursion,
2122                                   const char* reason
2123 #endif
2124                                   )
2125 {
2126
2127 #ifdef DEBUG
2128     RecursiveGuard guard;
2129     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2130 #endif
2131
2132     GenTreePtr tree = verCurrentState.esStack[level].val;
2133
2134     /* Allocate a temp if we haven't been asked to use a particular one */
2135
2136     if (tiVerificationNeeded)
2137     {
2138         // Ignore bad temp requests (they will happen with bad code and will be
2139         // catched when importing the destblock)
2140         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2141         {
2142             return false;
2143         }
2144     }
2145     else
2146     {
2147         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2148         {
2149             return false;
2150         }
2151     }
2152
2153     if (tnum == BAD_VAR_NUM)
2154     {
2155         tnum = lvaGrabTemp(true DEBUGARG(reason));
2156     }
2157     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2158     {
2159         // if verification is needed and tnum's type is incompatible with
2160         // type on that stack, we grab a new temp. This is safe since
2161         // we will throw a verification exception in the dest block.
2162
2163         var_types valTyp = tree->TypeGet();
2164         var_types dstTyp = lvaTable[tnum].TypeGet();
2165
2166         // if the two types are different, we return. This will only happen with bad code and will
2167         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2168         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2169             !(
2170 #ifndef _TARGET_64BIT_
2171                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2172 #endif // !_TARGET_64BIT_
2173                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2174         {
2175             if (verNeedsVerification())
2176             {
2177                 return false;
2178             }
2179         }
2180     }
2181
2182     /* Assign the spilled entry to the temp */
2183     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2184
2185     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2186     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2187     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2188     verCurrentState.esStack[level].val = temp;
2189
2190     return true;
2191 }
2192
2193 /*****************************************************************************
2194  *
2195  *  Ensure that the stack has only spilled values
2196  */
2197
2198 void Compiler::impSpillStackEnsure(bool spillLeaves)
2199 {
2200     assert(!spillLeaves || opts.compDbgCode);
2201
2202     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2203     {
2204         GenTreePtr tree = verCurrentState.esStack[level].val;
2205
2206         if (!spillLeaves && tree->OperIsLeaf())
2207         {
2208             continue;
2209         }
2210
2211         // Temps introduced by the importer itself don't need to be spilled
2212
2213         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2214
2215         if (isTempLcl)
2216         {
2217             continue;
2218         }
2219
2220         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2221     }
2222 }
2223
2224 void Compiler::impSpillEvalStack()
2225 {
2226     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2227     {
2228         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2229     }
2230 }
2231
2232 /*****************************************************************************
2233  *
2234  *  If the stack contains any trees with side effects in them, assign those
2235  *  trees to temps and append the assignments to the statement list.
2236  *  On return the stack is guaranteed to be empty.
2237  */
2238
2239 inline void Compiler::impEvalSideEffects()
2240 {
2241     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2242     verCurrentState.esStackDepth = 0;
2243 }
2244
2245 /*****************************************************************************
2246  *
2247  *  If the stack contains any trees with side effects in them, assign those
2248  *  trees to temps and replace them on the stack with refs to their temps.
2249  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2250  */
2251
2252 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2253 {
2254     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2255
2256     /* Before we make any appends to the tree list we must spill the
2257      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2258
2259     impSpillSpecialSideEff();
2260
2261     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2262     {
2263         chkLevel = verCurrentState.esStackDepth;
2264     }
2265
2266     assert(chkLevel <= verCurrentState.esStackDepth);
2267
2268     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2269
2270     for (unsigned i = 0; i < chkLevel; i++)
2271     {
2272         GenTreePtr tree = verCurrentState.esStack[i].val;
2273
2274         GenTreePtr lclVarTree;
2275
2276         if ((tree->gtFlags & spillFlags) != 0 ||
2277             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2278              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2279              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2280                                            // lvAddrTaken flag.
2281         {
2282             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2283         }
2284     }
2285 }
2286
2287 /*****************************************************************************
2288  *
2289  *  If the stack contains any trees with special side effects in them, assign
2290  *  those trees to temps and replace them on the stack with refs to their temps.
2291  */
2292
2293 inline void Compiler::impSpillSpecialSideEff()
2294 {
2295     // Only exception objects need to be carefully handled
2296
2297     if (!compCurBB->bbCatchTyp)
2298     {
2299         return;
2300     }
2301
2302     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2303     {
2304         GenTreePtr tree = verCurrentState.esStack[level].val;
2305         // Make sure if we have an exception object in the sub tree we spill ourselves.
2306         if (gtHasCatchArg(tree))
2307         {
2308             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2309         }
2310     }
2311 }
2312
2313 /*****************************************************************************
2314  *
2315  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2316  */
2317
2318 void Compiler::impSpillValueClasses()
2319 {
2320     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2321     {
2322         GenTreePtr tree = verCurrentState.esStack[level].val;
2323
2324         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2325         {
2326             // Tree walk was aborted, which means that we found a
2327             // value class on the stack.  Need to spill that
2328             // stack entry.
2329
2330             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2331         }
2332     }
2333 }
2334
2335 /*****************************************************************************
2336  *
2337  *  Callback that checks if a tree node is TYP_STRUCT
2338  */
2339
2340 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2341 {
2342     fgWalkResult walkResult = WALK_CONTINUE;
2343
2344     if ((*pTree)->gtType == TYP_STRUCT)
2345     {
2346         // Abort the walk and indicate that we found a value class
2347
2348         walkResult = WALK_ABORT;
2349     }
2350
2351     return walkResult;
2352 }
2353
2354 /*****************************************************************************
2355  *
2356  *  If the stack contains any trees with references to local #lclNum, assign
2357  *  those trees to temps and replace their place on the stack with refs to
2358  *  their temps.
2359  */
2360
2361 void Compiler::impSpillLclRefs(ssize_t lclNum)
2362 {
2363     /* Before we make any appends to the tree list we must spill the
2364      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2365
2366     impSpillSpecialSideEff();
2367
2368     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2369     {
2370         GenTreePtr tree = verCurrentState.esStack[level].val;
2371
2372         /* If the tree may throw an exception, and the block has a handler,
2373            then we need to spill assignments to the local if the local is
2374            live on entry to the handler.
2375            Just spill 'em all without considering the liveness */
2376
2377         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2378
2379         /* Skip the tree if it doesn't have an affected reference,
2380            unless xcptnCaught */
2381
2382         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2383         {
2384             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2385         }
2386     }
2387 }
2388
2389 /*****************************************************************************
2390  *
2391  *  Push catch arg onto the stack.
2392  *  If there are jumps to the beginning of the handler, insert basic block
2393  *  and spill catch arg to a temp. Update the handler block if necessary.
2394  *
2395  *  Returns the basic block of the actual handler.
2396  */
2397
2398 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2399 {
2400     // Do not inject the basic block twice on reimport. This should be
2401     // hit only under JIT stress. See if the block is the one we injected.
2402     // Note that EH canonicalization can inject internal blocks here. We might
2403     // be able to re-use such a block (but we don't, right now).
2404     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2405         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2406     {
2407         GenTreePtr tree = hndBlk->bbTreeList;
2408
2409         if (tree != nullptr && tree->gtOper == GT_STMT)
2410         {
2411             tree = tree->gtStmt.gtStmtExpr;
2412             assert(tree != nullptr);
2413
2414             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2415                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2416             {
2417                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2418
2419                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2420
2421                 return hndBlk->bbNext;
2422             }
2423         }
2424
2425         // If we get here, it must have been some other kind of internal block. It's possible that
2426         // someone prepended something to our injected block, but that's unlikely.
2427     }
2428
2429     /* Push the exception address value on the stack */
2430     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2431
2432     /* Mark the node as having a side-effect - i.e. cannot be
2433      * moved around since it is tied to a fixed location (EAX) */
2434     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2435
2436     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2437     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2438     {
2439         if (hndBlk->bbRefs == 1)
2440         {
2441             hndBlk->bbRefs++;
2442         }
2443
2444         /* Create extra basic block for the spill */
2445         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2446         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2447         newBlk->setBBWeight(hndBlk->bbWeight);
2448         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2449
2450         /* Account for the new link we are about to create */
2451         hndBlk->bbRefs++;
2452
2453         /* Spill into a temp */
2454         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2455         lvaTable[tempNum].lvType = TYP_REF;
2456         arg                      = gtNewTempAssign(tempNum, arg);
2457
2458         hndBlk->bbStkTempsIn = tempNum;
2459
2460         /* Report the debug info. impImportBlockCode won't treat
2461          * the actual handler as exception block and thus won't do it for us. */
2462         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2463         {
2464             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2465             arg            = gtNewStmt(arg, impCurStmtOffs);
2466         }
2467
2468         fgInsertStmtAtEnd(newBlk, arg);
2469
2470         arg = gtNewLclvNode(tempNum, TYP_REF);
2471     }
2472
2473     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2474
2475     return hndBlk;
2476 }
2477
2478 /*****************************************************************************
2479  *
2480  *  Given a tree, clone it. *pClone is set to the cloned tree.
2481  *  Returns the original tree if the cloning was easy,
2482  *   else returns the temp to which the tree had to be spilled to.
2483  *  If the tree has side-effects, it will be spilled to a temp.
2484  */
2485
2486 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2487                                   GenTreePtr*          pClone,
2488                                   CORINFO_CLASS_HANDLE structHnd,
2489                                   unsigned             curLevel,
2490                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2491 {
2492     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2493     {
2494         GenTreePtr clone = gtClone(tree, true);
2495
2496         if (clone)
2497         {
2498             *pClone = clone;
2499             return tree;
2500         }
2501     }
2502
2503     /* Store the operand in a temp and return the temp */
2504
2505     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2506
2507     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2508     // return a struct type. It also may modify the struct type to a more
2509     // specialized type (e.g. a SIMD type).  So we will get the type from
2510     // the lclVar AFTER calling impAssignTempGen().
2511
2512     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2513     var_types type = genActualType(lvaTable[temp].TypeGet());
2514
2515     *pClone = gtNewLclvNode(temp, type);
2516     return gtNewLclvNode(temp, type);
2517 }
2518
2519 /*****************************************************************************
2520  * Remember the IL offset (including stack-empty info) for the trees we will
2521  * generate now.
2522  */
2523
2524 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2525 {
2526     if (compIsForInlining())
2527     {
2528         GenTreePtr callStmt = impInlineInfo->iciStmt;
2529         assert(callStmt->gtOper == GT_STMT);
2530         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2531     }
2532     else
2533     {
2534         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2535         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2536         impCurStmtOffs    = offs | stkBit;
2537     }
2538 }
2539
2540 /*****************************************************************************
2541  * Returns current IL offset with stack-empty and call-instruction info incorporated
2542  */
2543 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2544 {
2545     if (compIsForInlining())
2546     {
2547         return BAD_IL_OFFSET;
2548     }
2549     else
2550     {
2551         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2552         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2553         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2554         return offs | stkBit | callInstructionBit;
2555     }
2556 }
2557
2558 /*****************************************************************************
2559  *
2560  *  Remember the instr offset for the statements
2561  *
2562  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2563  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2564  *  as some of the trees corresponding to code up to impCurOpcOffs might
2565  *  still be sitting on the stack.
2566  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2567  *  This should be called when an opcode finally/explicitly causes
2568  *  impAppendTree(tree) to be called (as opposed to being called because of
2569  *  a spill caused by the opcode)
2570  */
2571
2572 #ifdef DEBUG
2573
2574 void Compiler::impNoteLastILoffs()
2575 {
2576     if (impLastILoffsStmt == nullptr)
2577     {
2578         // We should have added a statement for the current basic block
2579         // Is this assert correct ?
2580
2581         assert(impTreeLast);
2582         assert(impTreeLast->gtOper == GT_STMT);
2583
2584         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2585     }
2586     else
2587     {
2588         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2589         impLastILoffsStmt                          = nullptr;
2590     }
2591 }
2592
2593 #endif // DEBUG
2594
2595 /*****************************************************************************
2596  * We don't create any GenTree (excluding spills) for a branch.
2597  * For debugging info, we need a placeholder so that we can note
2598  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2599  */
2600
2601 void Compiler::impNoteBranchOffs()
2602 {
2603     if (opts.compDbgCode)
2604     {
2605         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2606     }
2607 }
2608
2609 /*****************************************************************************
2610  * Locate the next stmt boundary for which we need to record info.
2611  * We will have to spill the stack at such boundaries if it is not
2612  * already empty.
2613  * Returns the next stmt boundary (after the start of the block)
2614  */
2615
2616 unsigned Compiler::impInitBlockLineInfo()
2617 {
2618     /* Assume the block does not correspond with any IL offset. This prevents
2619        us from reporting extra offsets. Extra mappings can cause confusing
2620        stepping, especially if the extra mapping is a jump-target, and the
2621        debugger does not ignore extra mappings, but instead rewinds to the
2622        nearest known offset */
2623
2624     impCurStmtOffsSet(BAD_IL_OFFSET);
2625
2626     if (compIsForInlining())
2627     {
2628         return ~0;
2629     }
2630
2631     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2632
2633     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2634     {
2635         impCurStmtOffsSet(blockOffs);
2636     }
2637
2638     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2639     {
2640         impCurStmtOffsSet(blockOffs);
2641     }
2642
2643     /* Always report IL offset 0 or some tests get confused.
2644        Probably a good idea anyways */
2645
2646     if (blockOffs == 0)
2647     {
2648         impCurStmtOffsSet(blockOffs);
2649     }
2650
2651     if (!info.compStmtOffsetsCount)
2652     {
2653         return ~0;
2654     }
2655
2656     /* Find the lowest explicit stmt boundary within the block */
2657
2658     /* Start looking at an entry that is based on our instr offset */
2659
2660     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2661
2662     if (index >= info.compStmtOffsetsCount)
2663     {
2664         index = info.compStmtOffsetsCount - 1;
2665     }
2666
2667     /* If we've guessed too far, back up */
2668
2669     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2670     {
2671         index--;
2672     }
2673
2674     /* If we guessed short, advance ahead */
2675
2676     while (info.compStmtOffsets[index] < blockOffs)
2677     {
2678         index++;
2679
2680         if (index == info.compStmtOffsetsCount)
2681         {
2682             return info.compStmtOffsetsCount;
2683         }
2684     }
2685
2686     assert(index < info.compStmtOffsetsCount);
2687
2688     if (info.compStmtOffsets[index] == blockOffs)
2689     {
2690         /* There is an explicit boundary for the start of this basic block.
2691            So we will start with bbCodeOffs. Else we will wait until we
2692            get to the next explicit boundary */
2693
2694         impCurStmtOffsSet(blockOffs);
2695
2696         index++;
2697     }
2698
2699     return index;
2700 }
2701
2702 /*****************************************************************************/
2703
2704 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2705 {
2706     switch (opcode)
2707     {
2708         case CEE_CALL:
2709         case CEE_CALLI:
2710         case CEE_CALLVIRT:
2711             return true;
2712
2713         default:
2714             return false;
2715     }
2716 }
2717
2718 /*****************************************************************************/
2719
2720 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2721 {
2722     switch (opcode)
2723     {
2724         case CEE_CALL:
2725         case CEE_CALLI:
2726         case CEE_CALLVIRT:
2727         case CEE_JMP:
2728         case CEE_NEWOBJ:
2729         case CEE_NEWARR:
2730             return true;
2731
2732         default:
2733             return false;
2734     }
2735 }
2736
2737 /*****************************************************************************/
2738
2739 // One might think it is worth caching these values, but results indicate
2740 // that it isn't.
2741 // In addition, caching them causes SuperPMI to be unable to completely
2742 // encapsulate an individual method context.
2743 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2744 {
2745     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2746     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2747     return refAnyClass;
2748 }
2749
2750 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2751 {
2752     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2753     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2754     return typeHandleClass;
2755 }
2756
2757 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2758 {
2759     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2760     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2761     return argIteratorClass;
2762 }
2763
2764 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2765 {
2766     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2767     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2768     return stringClass;
2769 }
2770
2771 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2772 {
2773     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2774     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2775     return objectClass;
2776 }
2777
2778 /*****************************************************************************
2779  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2780  *  set its type to TYP_BYREF when we create it. We know if it can be
2781  *  changed to TYP_I_IMPL only at the point where we use it
2782  */
2783
2784 /* static */
2785 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2786 {
2787     if (tree1->IsVarAddr())
2788     {
2789         tree1->gtType = TYP_I_IMPL;
2790     }
2791
2792     if (tree2 && tree2->IsVarAddr())
2793     {
2794         tree2->gtType = TYP_I_IMPL;
2795     }
2796 }
2797
2798 /*****************************************************************************
2799  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2800  *  to make that an explicit cast in our trees, so any implicit casts that
2801  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2802  *  turned into explicit casts here.
2803  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2804  */
2805
2806 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2807 {
2808     var_types currType   = genActualType(tree->gtType);
2809     var_types wantedType = genActualType(dstTyp);
2810
2811     if (wantedType != currType)
2812     {
2813         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2814         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2815         {
2816             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2817             {
2818                 tree->gtType = TYP_I_IMPL;
2819             }
2820         }
2821 #ifdef _TARGET_64BIT_
2822         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2823         {
2824             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2825             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2826         }
2827         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2828         {
2829             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2830             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2831         }
2832 #endif // _TARGET_64BIT_
2833     }
2834
2835     return tree;
2836 }
2837
2838 /*****************************************************************************
2839  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2840  *  but we want to make that an explicit cast in our trees, so any implicit casts
2841  *  that exist in the IL are turned into explicit casts here.
2842  */
2843
2844 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2845 {
2846 #ifndef LEGACY_BACKEND
2847     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2848     {
2849         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2850     }
2851 #endif // !LEGACY_BACKEND
2852
2853     return tree;
2854 }
2855
2856 //------------------------------------------------------------------------
2857 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2858 //    with a GT_COPYBLK node.
2859 //
2860 // Arguments:
2861 //    sig - The InitializeArray signature.
2862 //
2863 // Return Value:
2864 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2865 //    nullptr otherwise.
2866 //
2867 // Notes:
2868 //    The function recognizes the following IL pattern:
2869 //      ldc <length> or a list of ldc <lower bound>/<length>
2870 //      newarr or newobj
2871 //      dup
2872 //      ldtoken <field handle>
2873 //      call InitializeArray
2874 //    The lower bounds need not be constant except when the array rank is 1.
2875 //    The function recognizes all kinds of arrays thus enabling a small runtime
2876 //    such as CoreRT to skip providing an implementation for InitializeArray.
2877
2878 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2879 {
2880     assert(sig->numArgs == 2);
2881
2882     GenTreePtr fieldTokenNode = impStackTop(0).val;
2883     GenTreePtr arrayLocalNode = impStackTop(1).val;
2884
2885     //
2886     // Verify that the field token is known and valid.  Note that It's also
2887     // possible for the token to come from reflection, in which case we cannot do
2888     // the optimization and must therefore revert to calling the helper.  You can
2889     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2890     //
2891
2892     // Check to see if the ldtoken helper call is what we see here.
2893     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2894         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2895     {
2896         return nullptr;
2897     }
2898
2899     // Strip helper call away
2900     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2901
2902     if (fieldTokenNode->gtOper == GT_IND)
2903     {
2904         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2905     }
2906
2907     // Check for constant
2908     if (fieldTokenNode->gtOper != GT_CNS_INT)
2909     {
2910         return nullptr;
2911     }
2912
2913     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2914     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2915     {
2916         return nullptr;
2917     }
2918
2919     //
2920     // We need to get the number of elements in the array and the size of each element.
2921     // We verify that the newarr statement is exactly what we expect it to be.
2922     // If it's not then we just return NULL and we don't optimize this call
2923     //
2924
2925     //
2926     // It is possible the we don't have any statements in the block yet
2927     //
2928     if (impTreeLast->gtOper != GT_STMT)
2929     {
2930         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2931         return nullptr;
2932     }
2933
2934     //
2935     // We start by looking at the last statement, making sure it's an assignment, and
2936     // that the target of the assignment is the array passed to InitializeArray.
2937     //
2938     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2939     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2940         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2941         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2942     {
2943         return nullptr;
2944     }
2945
2946     //
2947     // Make sure that the object being assigned is a helper call.
2948     //
2949
2950     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2951     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2952     {
2953         return nullptr;
2954     }
2955
2956     //
2957     // Verify that it is one of the new array helpers.
2958     //
2959
2960     bool isMDArray = false;
2961
2962     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2963         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2964         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2965         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2966 #ifdef FEATURE_READYTORUN_COMPILER
2967         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2968 #endif
2969             )
2970     {
2971 #if COR_JIT_EE_VERSION > 460
2972         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
2973         {
2974             return nullptr;
2975         }
2976
2977         isMDArray = true;
2978 #endif
2979     }
2980
2981     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
2982
2983     //
2984     // Make sure we found a compile time handle to the array
2985     //
2986
2987     if (!arrayClsHnd)
2988     {
2989         return nullptr;
2990     }
2991
2992     unsigned rank = 0;
2993     S_UINT32 numElements;
2994
2995     if (isMDArray)
2996     {
2997         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
2998
2999         if (rank == 0)
3000         {
3001             return nullptr;
3002         }
3003
3004         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3005         assert(tokenArg != nullptr);
3006         GenTreeArgList* numArgsArg = tokenArg->Rest();
3007         assert(numArgsArg != nullptr);
3008         GenTreeArgList* argsArg = numArgsArg->Rest();
3009         assert(argsArg != nullptr);
3010
3011         //
3012         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3013         // so at least one length must be present and the rank can't exceed 32 so there can
3014         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3015         //
3016
3017         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3018             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3019         {
3020             return nullptr;
3021         }
3022
3023         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3024         bool     lowerBoundsSpecified;
3025
3026         if (numArgs == rank * 2)
3027         {
3028             lowerBoundsSpecified = true;
3029         }
3030         else if (numArgs == rank)
3031         {
3032             lowerBoundsSpecified = false;
3033
3034             //
3035             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3036             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3037             // we get a SDArray as well, see the for loop below.
3038             //
3039
3040             if (rank == 1)
3041             {
3042                 isMDArray = false;
3043             }
3044         }
3045         else
3046         {
3047             return nullptr;
3048         }
3049
3050         //
3051         // The rank is known to be at least 1 so we can start with numElements being 1
3052         // to avoid the need to special case the first dimension.
3053         //
3054
3055         numElements = S_UINT32(1);
3056
3057         struct Match
3058         {
3059             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3060             {
3061                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3062                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3063             }
3064
3065             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3066             {
3067                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3068                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3069                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3070             }
3071
3072             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3073             {
3074                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3075                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3076             }
3077
3078             static bool IsComma(GenTree* tree)
3079             {
3080                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3081             }
3082         };
3083
3084         unsigned argIndex = 0;
3085         GenTree* comma;
3086
3087         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3088         {
3089             if (lowerBoundsSpecified)
3090             {
3091                 //
3092                 // In general lower bounds can be ignored because they're not needed to
3093                 // calculate the total number of elements. But for single dimensional arrays
3094                 // we need to know if the lower bound is 0 because in this case the runtime
3095                 // creates a SDArray and this affects the way the array data offset is calculated.
3096                 //
3097
3098                 if (rank == 1)
3099                 {
3100                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3101                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3102                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3103
3104                     if (lowerBoundNode->IsIntegralConst(0))
3105                     {
3106                         isMDArray = false;
3107                     }
3108                 }
3109
3110                 comma = comma->gtGetOp2();
3111                 argIndex++;
3112             }
3113
3114             GenTree* lengthNodeAssign = comma->gtGetOp1();
3115             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3116             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3117
3118             if (!lengthNode->IsCnsIntOrI())
3119             {
3120                 return nullptr;
3121             }
3122
3123             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3124             argIndex++;
3125         }
3126
3127         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3128
3129         if (argIndex != numArgs)
3130         {
3131             return nullptr;
3132         }
3133     }
3134     else
3135     {
3136         //
3137         // Make sure there are exactly two arguments:  the array class and
3138         // the number of elements.
3139         //
3140
3141         GenTreePtr arrayLengthNode;
3142
3143         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3144 #ifdef FEATURE_READYTORUN_COMPILER
3145         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3146         {
3147             // Array length is 1st argument for readytorun helper
3148             arrayLengthNode = args->Current();
3149         }
3150         else
3151 #endif
3152         {
3153             // Array length is 2nd argument for regular helper
3154             arrayLengthNode = args->Rest()->Current();
3155         }
3156
3157         //
3158         // Make sure that the number of elements look valid.
3159         //
3160         if (arrayLengthNode->gtOper != GT_CNS_INT)
3161         {
3162             return nullptr;
3163         }
3164
3165         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3166
3167         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3168         {
3169             return nullptr;
3170         }
3171     }
3172
3173     CORINFO_CLASS_HANDLE elemClsHnd;
3174     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3175
3176     //
3177     // Note that genTypeSize will return zero for non primitive types, which is exactly
3178     // what we want (size will then be 0, and we will catch this in the conditional below).
3179     // Note that we don't expect this to fail for valid binaries, so we assert in the
3180     // non-verification case (the verification case should not assert but rather correctly
3181     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3182     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3183     // why.
3184     //
3185
3186     S_UINT32 elemSize(genTypeSize(elementType));
3187     S_UINT32 size = elemSize * S_UINT32(numElements);
3188
3189     if (size.IsOverflow())
3190     {
3191         return nullptr;
3192     }
3193
3194     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3195     {
3196         assert(verNeedsVerification());
3197         return nullptr;
3198     }
3199
3200     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3201     if (!initData)
3202     {
3203         return nullptr;
3204     }
3205
3206     //
3207     // At this point we are ready to commit to implementing the InitializeArray
3208     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3209     // return the struct assignment node.
3210     //
3211
3212     impPopStack();
3213     impPopStack();
3214
3215     const unsigned blkSize = size.Value();
3216     GenTreePtr     dst;
3217
3218     if (isMDArray)
3219     {
3220         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3221
3222         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3223     }
3224     else
3225     {
3226         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3227     }
3228     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3229     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3230     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3231
3232     return gtNewBlkOpNode(blk,     // dst
3233                           src,     // src
3234                           blkSize, // size
3235                           false,   // volatil
3236                           true);   // copyBlock
3237 }
3238
3239 /*****************************************************************************/
3240 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3241 // Returns NULL if an intrinsic cannot be used
3242
3243 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE  clsHnd,
3244                                   CORINFO_METHOD_HANDLE method,
3245                                   CORINFO_SIG_INFO*     sig,
3246                                   int                   memberRef,
3247                                   bool                  readonlyCall,
3248                                   bool                  tailCall,
3249                                   CorInfoIntrinsics*    pIntrinsicID)
3250 {
3251     bool mustExpand = false;
3252 #if COR_JIT_EE_VERSION > 460
3253     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3254 #else
3255     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3256 #endif
3257     *pIntrinsicID = intrinsicID;
3258
3259 #ifndef _TARGET_ARM_
3260     genTreeOps interlockedOperator;
3261 #endif
3262
3263     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3264     {
3265         // must be done regardless of DbgCode and MinOpts
3266         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3267     }
3268 #ifdef _TARGET_64BIT_
3269     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3270     {
3271         // must be done regardless of DbgCode and MinOpts
3272         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3273     }
3274 #else
3275     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3276 #endif
3277
3278     GenTreePtr retNode = nullptr;
3279
3280     //
3281     // We disable the inlining of instrinsics for MinOpts.
3282     //
3283     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3284     {
3285         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3286         return retNode;
3287     }
3288
3289     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3290     // seem to work properly for Infinity values, we don't do
3291     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3292
3293     var_types callType = JITtype2varType(sig->retType);
3294
3295     /* First do the intrinsics which are always smaller than a call */
3296
3297     switch (intrinsicID)
3298     {
3299         GenTreePtr op1, op2;
3300
3301         case CORINFO_INTRINSIC_Sin:
3302         case CORINFO_INTRINSIC_Sqrt:
3303         case CORINFO_INTRINSIC_Abs:
3304         case CORINFO_INTRINSIC_Cos:
3305         case CORINFO_INTRINSIC_Round:
3306         case CORINFO_INTRINSIC_Cosh:
3307         case CORINFO_INTRINSIC_Sinh:
3308         case CORINFO_INTRINSIC_Tan:
3309         case CORINFO_INTRINSIC_Tanh:
3310         case CORINFO_INTRINSIC_Asin:
3311         case CORINFO_INTRINSIC_Acos:
3312         case CORINFO_INTRINSIC_Atan:
3313         case CORINFO_INTRINSIC_Atan2:
3314         case CORINFO_INTRINSIC_Log10:
3315         case CORINFO_INTRINSIC_Pow:
3316         case CORINFO_INTRINSIC_Exp:
3317         case CORINFO_INTRINSIC_Ceiling:
3318         case CORINFO_INTRINSIC_Floor:
3319
3320             // These are math intrinsics
3321
3322             assert(callType != TYP_STRUCT);
3323
3324             op1 = nullptr;
3325
3326 #if defined(LEGACY_BACKEND)
3327             if (IsTargetIntrinsic(intrinsicID))
3328 #elif !defined(_TARGET_X86_)
3329             // Intrinsics that are not implemented directly by target instructions will
3330             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3331             // don't do this optimization, because
3332             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3333             //  b) It will be non-trivial task or too late to re-materialize a surviving
3334             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3335             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3336 #else
3337             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3338             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3339             // code generation for certain EH constructs.
3340             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3341 #endif
3342             {
3343                 switch (sig->numArgs)
3344                 {
3345                     case 1:
3346                         op1 = impPopStack().val;
3347
3348 #if FEATURE_X87_DOUBLES
3349
3350                         // X87 stack doesn't differentiate between float/double
3351                         // so it doesn't need a cast, but everybody else does
3352                         // Just double check it is at least a FP type
3353                         noway_assert(varTypeIsFloating(op1));
3354
3355 #else // FEATURE_X87_DOUBLES
3356
3357                         if (op1->TypeGet() != callType)
3358                         {
3359                             op1 = gtNewCastNode(callType, op1, callType);
3360                         }
3361
3362 #endif // FEATURE_X87_DOUBLES
3363
3364                         op1 = new (this, GT_INTRINSIC)
3365                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3366                         break;
3367
3368                     case 2:
3369                         op2 = impPopStack().val;
3370                         op1 = impPopStack().val;
3371
3372 #if FEATURE_X87_DOUBLES
3373
3374                         // X87 stack doesn't differentiate between float/double
3375                         // so it doesn't need a cast, but everybody else does
3376                         // Just double check it is at least a FP type
3377                         noway_assert(varTypeIsFloating(op2));
3378                         noway_assert(varTypeIsFloating(op1));
3379
3380 #else // FEATURE_X87_DOUBLES
3381
3382                         if (op2->TypeGet() != callType)
3383                         {
3384                             op2 = gtNewCastNode(callType, op2, callType);
3385                         }
3386                         if (op1->TypeGet() != callType)
3387                         {
3388                             op1 = gtNewCastNode(callType, op1, callType);
3389                         }
3390
3391 #endif // FEATURE_X87_DOUBLES
3392
3393                         op1 = new (this, GT_INTRINSIC)
3394                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3395                         break;
3396
3397                     default:
3398                         NO_WAY("Unsupported number of args for Math Instrinsic");
3399                 }
3400
3401 #ifndef LEGACY_BACKEND
3402                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3403                 {
3404                     op1->gtFlags |= GTF_CALL;
3405                 }
3406 #endif
3407             }
3408
3409             retNode = op1;
3410             break;
3411
3412 #ifdef _TARGET_XARCH_
3413         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3414         case CORINFO_INTRINSIC_InterlockedAdd32:
3415             interlockedOperator = GT_LOCKADD;
3416             goto InterlockedBinOpCommon;
3417         case CORINFO_INTRINSIC_InterlockedXAdd32:
3418             interlockedOperator = GT_XADD;
3419             goto InterlockedBinOpCommon;
3420         case CORINFO_INTRINSIC_InterlockedXchg32:
3421             interlockedOperator = GT_XCHG;
3422             goto InterlockedBinOpCommon;
3423
3424 #ifdef _TARGET_AMD64_
3425         case CORINFO_INTRINSIC_InterlockedAdd64:
3426             interlockedOperator = GT_LOCKADD;
3427             goto InterlockedBinOpCommon;
3428         case CORINFO_INTRINSIC_InterlockedXAdd64:
3429             interlockedOperator = GT_XADD;
3430             goto InterlockedBinOpCommon;
3431         case CORINFO_INTRINSIC_InterlockedXchg64:
3432             interlockedOperator = GT_XCHG;
3433             goto InterlockedBinOpCommon;
3434 #endif // _TARGET_AMD64_
3435
3436         InterlockedBinOpCommon:
3437             assert(callType != TYP_STRUCT);
3438             assert(sig->numArgs == 2);
3439
3440             op2 = impPopStack().val;
3441             op1 = impPopStack().val;
3442
3443             // This creates:
3444             //   val
3445             // XAdd
3446             //   addr
3447             //     field (for example)
3448             //
3449             // In the case where the first argument is the address of a local, we might
3450             // want to make this *not* make the var address-taken -- but atomic instructions
3451             // on a local are probably pretty useless anyway, so we probably don't care.
3452
3453             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3454             op1->gtFlags |= GTF_GLOB_EFFECT;
3455             retNode = op1;
3456             break;
3457 #endif // _TARGET_XARCH_
3458
3459         case CORINFO_INTRINSIC_MemoryBarrier:
3460
3461             assert(sig->numArgs == 0);
3462
3463             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3464             op1->gtFlags |= GTF_GLOB_EFFECT;
3465             retNode = op1;
3466             break;
3467
3468 #ifdef _TARGET_XARCH_
3469         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3470         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3471 #ifdef _TARGET_AMD64_
3472         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3473 #endif
3474         {
3475             assert(callType != TYP_STRUCT);
3476             assert(sig->numArgs == 3);
3477             GenTreePtr op3;
3478
3479             op3 = impPopStack().val; // comparand
3480             op2 = impPopStack().val; // value
3481             op1 = impPopStack().val; // location
3482
3483             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3484
3485             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3486             retNode = node;
3487             break;
3488         }
3489 #endif
3490
3491         case CORINFO_INTRINSIC_StringLength:
3492             op1 = impPopStack().val;
3493             if (!opts.MinOpts() && !opts.compDbgCode)
3494             {
3495                 GenTreeArrLen* arrLen =
3496                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3497                 op1 = arrLen;
3498             }
3499             else
3500             {
3501                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3502                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3503                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3504                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3505             }
3506             retNode = op1;
3507             break;
3508
3509         case CORINFO_INTRINSIC_StringGetChar:
3510             op2 = impPopStack().val;
3511             op1 = impPopStack().val;
3512             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3513             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3514             retNode = op1;
3515             break;
3516
3517         case CORINFO_INTRINSIC_InitializeArray:
3518             retNode = impInitializeArrayIntrinsic(sig);
3519             break;
3520
3521         case CORINFO_INTRINSIC_Array_Address:
3522         case CORINFO_INTRINSIC_Array_Get:
3523         case CORINFO_INTRINSIC_Array_Set:
3524             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3525             break;
3526
3527         case CORINFO_INTRINSIC_GetTypeFromHandle:
3528             op1 = impStackTop(0).val;
3529             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3530                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3531             {
3532                 op1 = impPopStack().val;
3533                 // Change call to return RuntimeType directly.
3534                 op1->gtType = TYP_REF;
3535                 retNode     = op1;
3536             }
3537             // Call the regular function.
3538             break;
3539
3540         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3541             op1 = impStackTop(0).val;
3542             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3543                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3544             {
3545                 // Old tree
3546                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3547                 //
3548                 // New tree
3549                 // TreeToGetNativeTypeHandle
3550
3551                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3552                 // to that helper.
3553
3554                 op1 = impPopStack().val;
3555
3556                 // Get native TypeHandle argument to old helper
3557                 op1 = op1->gtCall.gtCallArgs;
3558                 assert(op1->OperIsList());
3559                 assert(op1->gtOp.gtOp2 == nullptr);
3560                 op1     = op1->gtOp.gtOp1;
3561                 retNode = op1;
3562             }
3563             // Call the regular function.
3564             break;
3565
3566 #ifndef LEGACY_BACKEND
3567         case CORINFO_INTRINSIC_Object_GetType:
3568
3569             op1 = impPopStack().val;
3570             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3571
3572             // Set the CALL flag to indicate that the operator is implemented by a call.
3573             // Set also the EXCEPTION flag because the native implementation of
3574             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3575             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3576             retNode = op1;
3577             break;
3578 #endif
3579
3580         default:
3581             /* Unknown intrinsic */
3582             break;
3583     }
3584
3585     if (mustExpand)
3586     {
3587         if (retNode == nullptr)
3588         {
3589             NO_WAY("JIT must expand the intrinsic!");
3590         }
3591     }
3592
3593     return retNode;
3594 }
3595
3596 /*****************************************************************************/
3597
3598 GenTreePtr Compiler::impArrayAccessIntrinsic(
3599     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3600 {
3601     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3602        the following, as it generates fatter code.
3603     */
3604
3605     if (compCodeOpt() == SMALL_CODE)
3606     {
3607         return nullptr;
3608     }
3609
3610     /* These intrinsics generate fatter (but faster) code and are only
3611        done if we don't need SMALL_CODE */
3612
3613     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3614
3615     // The rank 1 case is special because it has to handle two array formats
3616     // we will simply not do that case
3617     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3618     {
3619         return nullptr;
3620     }
3621
3622     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3623     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3624
3625     // For the ref case, we will only be able to inline if the types match
3626     // (verifier checks for this, we don't care for the nonverified case and the
3627     // type is final (so we don't need to do the cast)
3628     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3629     {
3630         // Get the call site signature
3631         CORINFO_SIG_INFO LocalSig;
3632         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3633         assert(LocalSig.hasThis());
3634
3635         CORINFO_CLASS_HANDLE actualElemClsHnd;
3636
3637         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3638         {
3639             // Fetch the last argument, the one that indicates the type we are setting.
3640             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3641             for (unsigned r = 0; r < rank; r++)
3642             {
3643                 argType = info.compCompHnd->getArgNext(argType);
3644             }
3645
3646             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3647             actualElemClsHnd = argInfo.GetClassHandle();
3648         }
3649         else
3650         {
3651             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3652
3653             // Fetch the return type
3654             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3655             assert(retInfo.IsByRef());
3656             actualElemClsHnd = retInfo.GetClassHandle();
3657         }
3658
3659         // if it's not final, we can't do the optimization
3660         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3661         {
3662             return nullptr;
3663         }
3664     }
3665
3666     unsigned arrayElemSize;
3667     if (elemType == TYP_STRUCT)
3668     {
3669         assert(arrElemClsHnd);
3670
3671         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3672     }
3673     else
3674     {
3675         arrayElemSize = genTypeSize(elemType);
3676     }
3677
3678     if ((unsigned char)arrayElemSize != arrayElemSize)
3679     {
3680         // arrayElemSize would be truncated as an unsigned char.
3681         // This means the array element is too large. Don't do the optimization.
3682         return nullptr;
3683     }
3684
3685     GenTreePtr val = nullptr;
3686
3687     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3688     {
3689         // Assignment of a struct is more work, and there are more gets than sets.
3690         if (elemType == TYP_STRUCT)
3691         {
3692             return nullptr;
3693         }
3694
3695         val = impPopStack().val;
3696         assert(genActualType(elemType) == genActualType(val->gtType) ||
3697                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3698                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3699                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3700     }
3701
3702     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3703
3704     GenTreePtr inds[GT_ARR_MAX_RANK];
3705     for (unsigned k = rank; k > 0; k--)
3706     {
3707         inds[k - 1] = impPopStack().val;
3708     }
3709
3710     GenTreePtr arr = impPopStack().val;
3711     assert(arr->gtType == TYP_REF);
3712
3713     GenTreePtr arrElem =
3714         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3715                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3716
3717     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3718     {
3719         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3720     }
3721
3722     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3723     {
3724         assert(val != nullptr);
3725         return gtNewAssignNode(arrElem, val);
3726     }
3727     else
3728     {
3729         return arrElem;
3730     }
3731 }
3732
3733 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3734 {
3735     unsigned i;
3736
3737     // do some basic checks first
3738     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3739     {
3740         return FALSE;
3741     }
3742
3743     if (verCurrentState.esStackDepth > 0)
3744     {
3745         // merge stack types
3746         StackEntry* parentStack = block->bbStackOnEntry();
3747         StackEntry* childStack  = verCurrentState.esStack;
3748
3749         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3750         {
3751             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3752             {
3753                 return FALSE;
3754             }
3755         }
3756     }
3757
3758     // merge initialization status of this ptr
3759
3760     if (verTrackObjCtorInitState)
3761     {
3762         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3763         assert(verCurrentState.thisInitialized != TIS_Bottom);
3764
3765         // If the successor block's thisInit state is unknown, copy it from the current state.
3766         if (block->bbThisOnEntry() == TIS_Bottom)
3767         {
3768             *changed = true;
3769             verSetThisInit(block, verCurrentState.thisInitialized);
3770         }
3771         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3772         {
3773             if (block->bbThisOnEntry() != TIS_Top)
3774             {
3775                 *changed = true;
3776                 verSetThisInit(block, TIS_Top);
3777
3778                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3779                 {
3780                     // The block is bad. Control can flow through the block to any handler that catches the
3781                     // verification exception, but the importer ignores bad blocks and therefore won't model
3782                     // this flow in the normal way. To complete the merge into the bad block, the new state
3783                     // needs to be manually pushed to the handlers that may be reached after the verification
3784                     // exception occurs.
3785                     //
3786                     // Usually, the new state was already propagated to the relevant handlers while processing
3787                     // the predecessors of the bad block. The exception is when the bad block is at the start
3788                     // of a try region, meaning it is protected by additional handlers that do not protect its
3789                     // predecessors.
3790                     //
3791                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3792                     {
3793                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3794                         // recursive calls back into this code path (if successors of the current bad block are
3795                         // also bad blocks).
3796                         //
3797                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3798                         verCurrentState.thisInitialized = TIS_Top;
3799                         impVerifyEHBlock(block, true);
3800                         verCurrentState.thisInitialized = origTIS;
3801                     }
3802                 }
3803             }
3804         }
3805     }
3806     else
3807     {
3808         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3809     }
3810
3811     return TRUE;
3812 }
3813
3814 /*****************************************************************************
3815  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3816  *   already logged it (presumably in a more detailed fashion than done here)
3817  * 'bVerificationException' is true for a verification exception, false for a
3818  *   "call unauthorized by host" exception.
3819  */
3820
3821 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3822 {
3823     block->bbJumpKind = BBJ_THROW;
3824     block->bbFlags |= BBF_FAILED_VERIFICATION;
3825
3826     impCurStmtOffsSet(block->bbCodeOffs);
3827
3828 #ifdef DEBUG
3829     // we need this since BeginTreeList asserts otherwise
3830     impTreeList = impTreeLast = nullptr;
3831     block->bbFlags &= ~BBF_IMPORTED;
3832
3833     if (logMsg)
3834     {
3835         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3836                 block->bbCodeOffs, block->bbCodeOffsEnd));
3837         if (verbose)
3838         {
3839             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3840         }
3841     }
3842
3843     if (JitConfig.DebugBreakOnVerificationFailure())
3844     {
3845         DebugBreak();
3846     }
3847 #endif
3848
3849     impBeginTreeList();
3850
3851     // if the stack is non-empty evaluate all the side-effects
3852     if (verCurrentState.esStackDepth > 0)
3853     {
3854         impEvalSideEffects();
3855     }
3856     assert(verCurrentState.esStackDepth == 0);
3857
3858     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3859                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3860     // verCurrentState.esStackDepth = 0;
3861     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3862
3863     // The inliner is not able to handle methods that require throw block, so
3864     // make sure this methods never gets inlined.
3865     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3866 }
3867
3868 /*****************************************************************************
3869  *
3870  */
3871 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3872
3873 {
3874     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3875     // slightly different mechanism in which it calls the JIT to perform IL verification:
3876     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3877     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3878     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3879     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3880     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3881     // to fail upon runtime of the jitted method.
3882     //
3883     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3884     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3885     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3886     // we detect these two conditions, instead of generating a throw statement inside the offending
3887     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3888     // to return false and make RyuJIT behave the same way JIT64 does.
3889     //
3890     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3891     // RyuJIT for the time being until we completely replace JIT64.
3892     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3893
3894     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3895     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3896     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3897     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3898     // be turned off during importation).
3899     CLANG_FORMAT_COMMENT_ANCHOR;
3900
3901 #ifdef _TARGET_64BIT_
3902
3903 #ifdef DEBUG
3904     bool canSkipVerificationResult =
3905         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3906     assert(tiVerificationNeeded || canSkipVerificationResult);
3907 #endif // DEBUG
3908
3909     // Add the non verifiable flag to the compiler
3910     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3911     {
3912         tiIsVerifiableCode = FALSE;
3913     }
3914 #endif //_TARGET_64BIT_
3915     verResetCurrentState(block, &verCurrentState);
3916     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3917
3918 #ifdef DEBUG
3919     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3920 #endif                   // DEBUG
3921 }
3922
3923 /******************************************************************************/
3924 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3925 {
3926     assert(ciType < CORINFO_TYPE_COUNT);
3927
3928     typeInfo tiResult;
3929     switch (ciType)
3930     {
3931         case CORINFO_TYPE_STRING:
3932         case CORINFO_TYPE_CLASS:
3933             tiResult = verMakeTypeInfo(clsHnd);
3934             if (!tiResult.IsType(TI_REF))
3935             { // type must be consistent with element type
3936                 return typeInfo();
3937             }
3938             break;
3939
3940 #ifdef _TARGET_64BIT_
3941         case CORINFO_TYPE_NATIVEINT:
3942         case CORINFO_TYPE_NATIVEUINT:
3943             if (clsHnd)
3944             {
3945                 // If we have more precise information, use it
3946                 return verMakeTypeInfo(clsHnd);
3947             }
3948             else
3949             {
3950                 return typeInfo::nativeInt();
3951             }
3952             break;
3953 #endif // _TARGET_64BIT_
3954
3955         case CORINFO_TYPE_VALUECLASS:
3956         case CORINFO_TYPE_REFANY:
3957             tiResult = verMakeTypeInfo(clsHnd);
3958             // type must be constant with element type;
3959             if (!tiResult.IsValueClass())
3960             {
3961                 return typeInfo();
3962             }
3963             break;
3964         case CORINFO_TYPE_VAR:
3965             return verMakeTypeInfo(clsHnd);
3966
3967         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3968         case CORINFO_TYPE_VOID:
3969             return typeInfo();
3970             break;
3971
3972         case CORINFO_TYPE_BYREF:
3973         {
3974             CORINFO_CLASS_HANDLE childClassHandle;
3975             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
3976             return ByRef(verMakeTypeInfo(childType, childClassHandle));
3977         }
3978         break;
3979
3980         default:
3981             if (clsHnd)
3982             { // If we have more precise information, use it
3983                 return typeInfo(TI_STRUCT, clsHnd);
3984             }
3985             else
3986             {
3987                 return typeInfo(JITtype2tiType(ciType));
3988             }
3989     }
3990     return tiResult;
3991 }
3992
3993 /******************************************************************************/
3994
3995 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
3996 {
3997     if (clsHnd == nullptr)
3998     {
3999         return typeInfo();
4000     }
4001
4002     // Byrefs should only occur in method and local signatures, which are accessed
4003     // using ICorClassInfo and ICorClassInfo.getChildType.
4004     // So findClass() and getClassAttribs() should not be called for byrefs
4005
4006     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4007     {
4008         assert(!"Did findClass() return a Byref?");
4009         return typeInfo();
4010     }
4011
4012     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4013
4014     if (attribs & CORINFO_FLG_VALUECLASS)
4015     {
4016         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4017
4018         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4019         // not occur here, so we may want to change this to an assert instead.
4020         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4021         {
4022             return typeInfo();
4023         }
4024
4025 #ifdef _TARGET_64BIT_
4026         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4027         {
4028             return typeInfo::nativeInt();
4029         }
4030 #endif // _TARGET_64BIT_
4031
4032         if (t != CORINFO_TYPE_UNDEF)
4033         {
4034             return (typeInfo(JITtype2tiType(t)));
4035         }
4036         else if (bashStructToRef)
4037         {
4038             return (typeInfo(TI_REF, clsHnd));
4039         }
4040         else
4041         {
4042             return (typeInfo(TI_STRUCT, clsHnd));
4043         }
4044     }
4045     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4046     {
4047         // See comment in _typeInfo.h for why we do it this way.
4048         return (typeInfo(TI_REF, clsHnd, true));
4049     }
4050     else
4051     {
4052         return (typeInfo(TI_REF, clsHnd));
4053     }
4054 }
4055
4056 /******************************************************************************/
4057 BOOL Compiler::verIsSDArray(typeInfo ti)
4058 {
4059     if (ti.IsNullObjRef())
4060     { // nulls are SD arrays
4061         return TRUE;
4062     }
4063
4064     if (!ti.IsType(TI_REF))
4065     {
4066         return FALSE;
4067     }
4068
4069     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4070     {
4071         return FALSE;
4072     }
4073     return TRUE;
4074 }
4075
4076 /******************************************************************************/
4077 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4078 /* Returns an error type if anything goes wrong */
4079
4080 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4081 {
4082     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4083
4084     if (!verIsSDArray(arrayObjectType))
4085     {
4086         return typeInfo();
4087     }
4088
4089     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4090     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4091
4092     return verMakeTypeInfo(ciType, childClassHandle);
4093 }
4094
4095 /*****************************************************************************
4096  */
4097 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4098 {
4099     CORINFO_CLASS_HANDLE classHandle;
4100     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4101
4102     var_types type = JITtype2varType(ciType);
4103     if (varTypeIsGC(type))
4104     {
4105         // For efficiency, getArgType only returns something in classHandle for
4106         // value types.  For other types that have addition type info, you
4107         // have to call back explicitly
4108         classHandle = info.compCompHnd->getArgClass(sig, args);
4109         if (!classHandle)
4110         {
4111             NO_WAY("Could not figure out Class specified in argument or local signature");
4112         }
4113     }
4114
4115     return verMakeTypeInfo(ciType, classHandle);
4116 }
4117
4118 /*****************************************************************************/
4119
4120 // This does the expensive check to figure out whether the method
4121 // needs to be verified. It is called only when we fail verification,
4122 // just before throwing the verification exception.
4123
4124 BOOL Compiler::verNeedsVerification()
4125 {
4126     // If we have previously determined that verification is NOT needed
4127     // (for example in Compiler::compCompile), that means verification is really not needed.
4128     // Return the same decision we made before.
4129     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4130
4131     if (!tiVerificationNeeded)
4132     {
4133         return tiVerificationNeeded;
4134     }
4135
4136     assert(tiVerificationNeeded);
4137
4138     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4139     // obtain the answer.
4140     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4141         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4142
4143     // canSkipVerification will return one of the following three values:
4144     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4145     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4146     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4147     //     but need to insert a callout to the VM to ask during runtime
4148     //     whether to skip verification or not.
4149
4150     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4151     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4152     {
4153         tiRuntimeCalloutNeeded = true;
4154     }
4155
4156     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4157     {
4158         // Dev10 706080 - Testers don't like the assert, so just silence it
4159         // by not using the macros that invoke debugAssert.
4160         badCode();
4161     }
4162
4163     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4164     // The following line means we will NOT do jit time verification if canSkipVerification
4165     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4166     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4167     return tiVerificationNeeded;
4168 }
4169
4170 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4171 {
4172     if (ti.IsByRef())
4173     {
4174         return TRUE;
4175     }
4176     if (!ti.IsType(TI_STRUCT))
4177     {
4178         return FALSE;
4179     }
4180     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4181 }
4182
4183 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4184 {
4185     if (ti.IsPermanentHomeByRef())
4186     {
4187         return TRUE;
4188     }
4189     else
4190     {
4191         return FALSE;
4192     }
4193 }
4194
4195 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4196 {
4197     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4198             || ti.IsUnboxedGenericTypeVar() ||
4199             (ti.IsType(TI_STRUCT) &&
4200              // exclude byreflike structs
4201              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4202 }
4203
4204 // Is it a boxed value type?
4205 bool Compiler::verIsBoxedValueType(typeInfo ti)
4206 {
4207     if (ti.GetType() == TI_REF)
4208     {
4209         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4210         return !!eeIsValueClass(clsHnd);
4211     }
4212     else
4213     {
4214         return false;
4215     }
4216 }
4217
4218 /*****************************************************************************
4219  *
4220  *  Check if a TailCall is legal.
4221  */
4222
4223 bool Compiler::verCheckTailCallConstraint(
4224     OPCODE                  opcode,
4225     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4226     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4227     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4228                                                        // return false to the caller.
4229                                                        // If false, it will throw.
4230     )
4231 {
4232     DWORD            mflags;
4233     CORINFO_SIG_INFO sig;
4234     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4235                                    // this counter is used to keep track of how many items have been
4236                                    // virtually popped
4237
4238     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4239     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4240     unsigned              methodClassFlgs = 0;
4241
4242     assert(impOpcodeIsCallOpcode(opcode));
4243
4244     if (compIsForInlining())
4245     {
4246         return false;
4247     }
4248
4249     // for calli, VerifyOrReturn that this is not a virtual method
4250     if (opcode == CEE_CALLI)
4251     {
4252         /* Get the call sig */
4253         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4254
4255         // We don't know the target method, so we have to infer the flags, or
4256         // assume the worst-case.
4257         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4258     }
4259     else
4260     {
4261         methodHnd = pResolvedToken->hMethod;
4262
4263         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4264
4265         // When verifying generic code we pair the method handle with its
4266         // owning class to get the exact method signature.
4267         methodClassHnd = pResolvedToken->hClass;
4268         assert(methodClassHnd);
4269
4270         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4271
4272         // opcode specific check
4273         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4274     }
4275
4276     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4277     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4278
4279     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4280     {
4281         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4282     }
4283
4284     // check compatibility of the arguments
4285     unsigned int argCount;
4286     argCount = sig.numArgs;
4287     CORINFO_ARG_LIST_HANDLE args;
4288     args = sig.args;
4289     while (argCount--)
4290     {
4291         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4292
4293         // check that the argument is not a byref for tailcalls
4294         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4295
4296         // For unsafe code, we might have parameters containing pointer to the stack location.
4297         // Disallow the tailcall for this kind.
4298         CORINFO_CLASS_HANDLE classHandle;
4299         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4300         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4301
4302         args = info.compCompHnd->getArgNext(args);
4303     }
4304
4305     // update popCount
4306     popCount += sig.numArgs;
4307
4308     // check for 'this' which is on non-static methods, not called via NEWOBJ
4309     if (!(mflags & CORINFO_FLG_STATIC))
4310     {
4311         // Always update the popCount.
4312         // This is crucial for the stack calculation to be correct.
4313         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4314         popCount++;
4315
4316         if (opcode == CEE_CALLI)
4317         {
4318             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4319             // on the stack.
4320             if (tiThis.IsValueClass())
4321             {
4322                 tiThis.MakeByRef();
4323             }
4324             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4325         }
4326         else
4327         {
4328             // Check type compatibility of the this argument
4329             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4330             if (tiDeclaredThis.IsValueClass())
4331             {
4332                 tiDeclaredThis.MakeByRef();
4333             }
4334
4335             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4336         }
4337     }
4338
4339     // Tail calls on constrained calls should be illegal too:
4340     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4341     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4342
4343     // Get the exact view of the signature for an array method
4344     if (sig.retType != CORINFO_TYPE_VOID)
4345     {
4346         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4347         {
4348             assert(opcode != CEE_CALLI);
4349             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4350         }
4351     }
4352
4353     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4354     typeInfo tiCallerRetType =
4355         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4356
4357     // void return type gets morphed into the error type, so we have to treat them specially here
4358     if (sig.retType == CORINFO_TYPE_VOID)
4359     {
4360         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4361                                   speculative);
4362     }
4363     else
4364     {
4365         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4366                                                    NormaliseForStack(tiCallerRetType), true),
4367                                   "tailcall return mismatch", speculative);
4368     }
4369
4370     // for tailcall, stack must be empty
4371     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4372
4373     return true; // Yes, tailcall is legal
4374 }
4375
4376 /*****************************************************************************
4377  *
4378  *  Checks the IL verification rules for the call
4379  */
4380
4381 void Compiler::verVerifyCall(OPCODE                  opcode,
4382                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4383                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4384                              bool                    tailCall,
4385                              bool                    readonlyCall,
4386                              const BYTE*             delegateCreateStart,
4387                              const BYTE*             codeAddr,
4388                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4389 {
4390     DWORD             mflags;
4391     CORINFO_SIG_INFO* sig      = nullptr;
4392     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4393                                     // this counter is used to keep track of how many items have been
4394                                     // virtually popped
4395
4396     // for calli, VerifyOrReturn that this is not a virtual method
4397     if (opcode == CEE_CALLI)
4398     {
4399         Verify(false, "Calli not verifiable");
4400         return;
4401     }
4402
4403     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4404     mflags = callInfo->verMethodFlags;
4405
4406     sig = &callInfo->verSig;
4407
4408     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4409     {
4410         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4411     }
4412
4413     // opcode specific check
4414     unsigned methodClassFlgs = callInfo->classFlags;
4415     switch (opcode)
4416     {
4417         case CEE_CALLVIRT:
4418             // cannot do callvirt on valuetypes
4419             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4420             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4421             break;
4422
4423         case CEE_NEWOBJ:
4424         {
4425             assert(!tailCall); // Importer should not allow this
4426             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4427                            "newobj must be on instance");
4428
4429             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4430             {
4431                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4432                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4433                 typeInfo tiDeclaredFtn =
4434                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4435                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4436
4437                 assert(popCount == 0);
4438                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4439                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4440
4441                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4442                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4443                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4444                                "delegate object type mismatch");
4445
4446                 CORINFO_CLASS_HANDLE objTypeHandle =
4447                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4448
4449                 // the method signature must be compatible with the delegate's invoke method
4450
4451                 // check that for virtual functions, the type of the object used to get the
4452                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4453                 // since this is a bit of work to determine in general, we pattern match stylized
4454                 // code sequences
4455
4456                 // the delegate creation code check, which used to be done later, is now done here
4457                 // so we can read delegateMethodRef directly from
4458                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4459                 // we then use it in our call to isCompatibleDelegate().
4460
4461                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4462                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4463                                "must create delegates with certain IL");
4464
4465                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4466                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4467                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4468                 delegateResolvedToken.token        = delegateMethodRef;
4469                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4470                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4471
4472                 CORINFO_CALL_INFO delegateCallInfo;
4473                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4474                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4475
4476                 BOOL isOpenDelegate = FALSE;
4477                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4478                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4479                                                                       &isOpenDelegate),
4480                                "function incompatible with delegate");
4481
4482                 // check the constraints on the target method
4483                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4484                                "delegate target has unsatisfied class constraints");
4485                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4486                                                                             tiActualFtn.GetMethod()),
4487                                "delegate target has unsatisfied method constraints");
4488
4489                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4490                 // for additional verification rules for delegates
4491                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4492                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4493                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4494                 {
4495
4496                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4497 #ifdef DEBUG
4498                         && StrictCheckForNonVirtualCallToVirtualMethod()
4499 #endif
4500                             )
4501                     {
4502                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4503                         {
4504                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4505                                                verIsBoxedValueType(tiActualObj),
4506                                            "The 'this' parameter to the call must be either the calling method's "
4507                                            "'this' parameter or "
4508                                            "a boxed value type.");
4509                         }
4510                     }
4511                 }
4512
4513                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4514                 {
4515                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4516
4517                     Verify(targetIsStatic || !isOpenDelegate,
4518                            "Unverifiable creation of an open instance delegate for a protected member.");
4519
4520                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4521                                                                 ? info.compClassHnd
4522                                                                 : tiActualObj.GetClassHandleForObjRef();
4523
4524                     // In the case of protected methods, it is a requirement that the 'this'
4525                     // pointer be a subclass of the current context.  Perform this check.
4526                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4527                            "Accessing protected method through wrong type.");
4528                 }
4529                 goto DONE_ARGS;
4530             }
4531         }
4532         // fall thru to default checks
4533         default:
4534             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4535     }
4536     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4537                    "can only newobj a delegate constructor");
4538
4539     // check compatibility of the arguments
4540     unsigned int argCount;
4541     argCount = sig->numArgs;
4542     CORINFO_ARG_LIST_HANDLE args;
4543     args = sig->args;
4544     while (argCount--)
4545     {
4546         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4547
4548         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4549         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4550
4551         args = info.compCompHnd->getArgNext(args);
4552     }
4553
4554 DONE_ARGS:
4555
4556     // update popCount
4557     popCount += sig->numArgs;
4558
4559     // check for 'this' which are is non-static methods, not called via NEWOBJ
4560     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4561     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4562     {
4563         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4564         popCount++;
4565
4566         // If it is null, we assume we can access it (since it will AV shortly)
4567         // If it is anything but a reference class, there is no hierarchy, so
4568         // again, we don't need the precise instance class to compute 'protected' access
4569         if (tiThis.IsType(TI_REF))
4570         {
4571             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4572         }
4573
4574         // Check type compatibility of the this argument
4575         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4576         if (tiDeclaredThis.IsValueClass())
4577         {
4578             tiDeclaredThis.MakeByRef();
4579         }
4580
4581         // If this is a call to the base class .ctor, set thisPtr Init for
4582         // this block.
4583         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4584         {
4585             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4586                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4587             {
4588                 assert(verCurrentState.thisInitialized !=
4589                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4590                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4591                                "Call to base class constructor when 'this' is possibly initialized");
4592                 // Otherwise, 'this' is now initialized.
4593                 verCurrentState.thisInitialized = TIS_Init;
4594                 tiThis.SetInitialisedObjRef();
4595             }
4596             else
4597             {
4598                 // We allow direct calls to value type constructors
4599                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4600                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4601                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4602                                "Bad call to a constructor");
4603             }
4604         }
4605
4606         if (pConstrainedResolvedToken != nullptr)
4607         {
4608             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4609
4610             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4611
4612             // We just dereference this and test for equality
4613             tiThis.DereferenceByRef();
4614             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4615                            "this type mismatch with constrained type operand");
4616
4617             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4618             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4619         }
4620
4621         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4622         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4623         {
4624             tiDeclaredThis.SetIsReadonlyByRef();
4625         }
4626
4627         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4628
4629         if (tiThis.IsByRef())
4630         {
4631             // Find the actual type where the method exists (as opposed to what is declared
4632             // in the metadata). This is to prevent passing a byref as the "this" argument
4633             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4634
4635             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4636             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4637                            "Call to base type of valuetype (which is never a valuetype)");
4638         }
4639
4640         // Rules for non-virtual call to a non-final virtual method:
4641
4642         // Define:
4643         // The "this" pointer is considered to be "possibly written" if
4644         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4645         //   (or)
4646         //   2. It has been stored to (STARG.0) anywhere in the method.
4647
4648         // A non-virtual call to a non-final virtual method is only allowed if
4649         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4650         //   (or)
4651         //   2. The this pointer passed to the callee is the current method's this pointer.
4652         //      (and) The current method's this pointer is not "possibly written".
4653
4654         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4655         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4656         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4657         // hard and more error prone.
4658
4659         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4660 #ifdef DEBUG
4661             && StrictCheckForNonVirtualCallToVirtualMethod()
4662 #endif
4663                 )
4664         {
4665             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4666             {
4667                 VerifyOrReturn(
4668                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4669                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4670                     "a boxed value type.");
4671             }
4672         }
4673     }
4674
4675     // check any constraints on the callee's class and type parameters
4676     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4677                    "method has unsatisfied class constraints");
4678     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4679                    "method has unsatisfied method constraints");
4680
4681     if (mflags & CORINFO_FLG_PROTECTED)
4682     {
4683         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4684                        "Can't access protected method");
4685     }
4686
4687     // Get the exact view of the signature for an array method
4688     if (sig->retType != CORINFO_TYPE_VOID)
4689     {
4690         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4691     }
4692
4693     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4694     // The methods supported by array types are under the control of the EE
4695     // so we can trust that only the Address operation returns a byref.
4696     if (readonlyCall)
4697     {
4698         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4699         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4700                        "unexpected use of readonly prefix");
4701     }
4702
4703     // Verify the tailcall
4704     if (tailCall)
4705     {
4706         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4707     }
4708 }
4709
4710 /*****************************************************************************
4711  *  Checks that a delegate creation is done using the following pattern:
4712  *     dup
4713  *     ldvirtftn targetMemberRef
4714  *  OR
4715  *     ldftn targetMemberRef
4716  *
4717  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4718  *  not in this basic block)
4719  *
4720  *  targetMemberRef is read from the code sequence.
4721  *  targetMemberRef is validated iff verificationNeeded.
4722  */
4723
4724 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4725                                         const BYTE*  codeAddr,
4726                                         mdMemberRef& targetMemberRef)
4727 {
4728     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4729     {
4730         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4731         return TRUE;
4732     }
4733     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4734     {
4735         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4736         return TRUE;
4737     }
4738
4739     return FALSE;
4740 }
4741
4742 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4743 {
4744     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4745     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4746     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4747     if (!tiCompatibleWith(value, normPtrVal, true))
4748     {
4749         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4750         compUnsafeCastUsed = true;
4751     }
4752     return ptrVal;
4753 }
4754
4755 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4756 {
4757     assert(!instrType.IsStruct());
4758
4759     typeInfo ptrVal;
4760     if (ptr.IsByRef())
4761     {
4762         ptrVal = DereferenceByRef(ptr);
4763         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4764         {
4765             Verify(false, "bad pointer");
4766             compUnsafeCastUsed = true;
4767         }
4768         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4769         {
4770             Verify(false, "pointer not consistent with instr");
4771             compUnsafeCastUsed = true;
4772         }
4773     }
4774     else
4775     {
4776         Verify(false, "pointer not byref");
4777         compUnsafeCastUsed = true;
4778     }
4779
4780     return ptrVal;
4781 }
4782
4783 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4784 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4785 // ld*flda or a st*fld.
4786 // 'enclosingClass' is given if we are accessing a field in some specific type.
4787
4788 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4789                               const CORINFO_FIELD_INFO& fieldInfo,
4790                               const typeInfo*           tiThis,
4791                               BOOL                      mutator,
4792                               BOOL                      allowPlainStructAsThis)
4793 {
4794     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4795     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4796     CORINFO_CLASS_HANDLE instanceClass =
4797         info.compClassHnd; // for statics, we imagine the instance is the current class.
4798
4799     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4800     if (mutator)
4801     {
4802         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4803         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4804         {
4805             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4806                        info.compIsStatic == isStaticField,
4807                    "bad use of initonly field (set or address taken)");
4808         }
4809     }
4810
4811     if (tiThis == nullptr)
4812     {
4813         Verify(isStaticField, "used static opcode with non-static field");
4814     }
4815     else
4816     {
4817         typeInfo tThis = *tiThis;
4818
4819         if (allowPlainStructAsThis && tThis.IsValueClass())
4820         {
4821             tThis.MakeByRef();
4822         }
4823
4824         // If it is null, we assume we can access it (since it will AV shortly)
4825         // If it is anything but a refernce class, there is no hierarchy, so
4826         // again, we don't need the precise instance class to compute 'protected' access
4827         if (tiThis->IsType(TI_REF))
4828         {
4829             instanceClass = tiThis->GetClassHandleForObjRef();
4830         }
4831
4832         // Note that even if the field is static, we require that the this pointer
4833         // satisfy the same constraints as a non-static field  This happens to
4834         // be simpler and seems reasonable
4835         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4836         if (tiDeclaredThis.IsValueClass())
4837         {
4838             tiDeclaredThis.MakeByRef();
4839
4840             // we allow read-only tThis, on any field access (even stores!), because if the
4841             // class implementor wants to prohibit stores he should make the field private.
4842             // we do this by setting the read-only bit on the type we compare tThis to.
4843             tiDeclaredThis.SetIsReadonlyByRef();
4844         }
4845         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4846         {
4847             // Any field access is legal on "uninitialized" this pointers.
4848             // The easiest way to implement this is to simply set the
4849             // initialized bit for the duration of the type check on the
4850             // field access only.  It does not change the state of the "this"
4851             // for the function as a whole. Note that the "tThis" is a copy
4852             // of the original "this" type (*tiThis) passed in.
4853             tThis.SetInitialisedObjRef();
4854         }
4855
4856         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4857     }
4858
4859     // Presently the JIT does not check that we don't store or take the address of init-only fields
4860     // since we cannot guarantee their immutability and it is not a security issue.
4861
4862     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4863     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4864                    "field has unsatisfied class constraints");
4865     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4866     {
4867         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4868                "Accessing protected method through wrong type.");
4869     }
4870 }
4871
4872 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4873 {
4874     if (tiOp1.IsNumberType())
4875     {
4876 #ifdef _TARGET_64BIT_
4877         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4878 #else  // _TARGET_64BIT
4879         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4880         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4881         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4882         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4883 #endif // !_TARGET_64BIT_
4884     }
4885     else if (tiOp1.IsObjRef())
4886     {
4887         switch (opcode)
4888         {
4889             case CEE_BEQ_S:
4890             case CEE_BEQ:
4891             case CEE_BNE_UN_S:
4892             case CEE_BNE_UN:
4893             case CEE_CEQ:
4894             case CEE_CGT_UN:
4895                 break;
4896             default:
4897                 Verify(FALSE, "Cond not allowed on object types");
4898         }
4899         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4900     }
4901     else if (tiOp1.IsByRef())
4902     {
4903         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4904     }
4905     else
4906     {
4907         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4908     }
4909 }
4910
4911 void Compiler::verVerifyThisPtrInitialised()
4912 {
4913     if (verTrackObjCtorInitState)
4914     {
4915         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4916     }
4917 }
4918
4919 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4920 {
4921     // Either target == context, in this case calling an alternate .ctor
4922     // Or target is the immediate parent of context
4923
4924     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4925 }
4926
4927 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4928                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4929                                         CORINFO_CALL_INFO*      pCallInfo)
4930 {
4931     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4932     {
4933         NO_WAY("Virtual call to a function added via EnC is not supported");
4934     }
4935
4936 #ifdef FEATURE_READYTORUN_COMPILER
4937     if (opts.IsReadyToRun())
4938     {
4939         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4940         {
4941             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4942                                                     gtNewArgList(thisPtr));
4943
4944             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4945
4946             return call;
4947         }
4948
4949         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
4950         if (IsTargetAbi(CORINFO_CORERT_ABI))
4951         {
4952             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
4953
4954             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
4955                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
4956         }
4957     }
4958 #endif
4959
4960     // Get the exact descriptor for the static callsite
4961     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4962     if (exactTypeDesc == nullptr)
4963     { // compDonotInline()
4964         return nullptr;
4965     }
4966
4967     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4968     if (exactMethodDesc == nullptr)
4969     { // compDonotInline()
4970         return nullptr;
4971     }
4972
4973     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
4974
4975     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
4976
4977     helpArgs = gtNewListNode(thisPtr, helpArgs);
4978
4979     // Call helper function.  This gets the target address of the final destination callsite.
4980
4981     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
4982 }
4983
4984 /*****************************************************************************
4985  *
4986  *  Build and import a box node
4987  */
4988
4989 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
4990 {
4991     // Get the tree for the type handle for the boxed object.  In the case
4992     // of shared generic code or ngen'd code this might be an embedded
4993     // computation.
4994     // Note we can only box do it if the class construtor has been called
4995     // We can always do it on primitive types
4996
4997     GenTreePtr op1 = nullptr;
4998     GenTreePtr op2 = nullptr;
4999     var_types  lclTyp;
5000
5001     impSpillSpecialSideEff();
5002
5003     // Now get the expression to box from the stack.
5004     CORINFO_CLASS_HANDLE operCls;
5005     GenTreePtr           exprToBox = impPopStack(operCls).val;
5006
5007     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5008     if (boxHelper == CORINFO_HELP_BOX)
5009     {
5010         // we are doing 'normal' boxing.  This means that we can inline the box operation
5011         // Box(expr) gets morphed into
5012         // temp = new(clsHnd)
5013         // cpobj(temp+4, expr, clsHnd)
5014         // push temp
5015         // The code paths differ slightly below for structs and primitives because
5016         // "cpobj" differs in these cases.  In one case you get
5017         //    impAssignStructPtr(temp+4, expr, clsHnd)
5018         // and the other you get
5019         //    *(temp+4) = expr
5020
5021         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5022         {
5023             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5024         }
5025
5026         // needs to stay in use until this box expression is appended
5027         // some other node.  We approximate this by keeping it alive until
5028         // the opcode stack becomes empty
5029         impBoxTempInUse = true;
5030
5031 #ifdef FEATURE_READYTORUN_COMPILER
5032         bool usingReadyToRunHelper = false;
5033
5034         if (opts.IsReadyToRun())
5035         {
5036             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5037             usingReadyToRunHelper = (op1 != nullptr);
5038         }
5039
5040         if (!usingReadyToRunHelper)
5041 #endif
5042         {
5043             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5044             // and the newfast call with a single call to a dynamic R2R cell that will:
5045             //      1) Load the context
5046             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5047             //      3) Allocate and return the new object for boxing
5048             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5049
5050             // Ensure that the value class is restored
5051             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5052             if (op2 == nullptr)
5053             { // compDonotInline()
5054                 return;
5055             }
5056
5057             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5058                                       gtNewArgList(op2));
5059         }
5060
5061         /* Remember that this basic block contains 'new' of an array */
5062         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5063
5064         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5065
5066         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5067
5068         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5069         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5070         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5071
5072         if (varTypeIsStruct(exprToBox))
5073         {
5074             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5075             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5076         }
5077         else
5078         {
5079             lclTyp = exprToBox->TypeGet();
5080             if (lclTyp == TYP_BYREF)
5081             {
5082                 lclTyp = TYP_I_IMPL;
5083             }
5084             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5085             if (impIsPrimitive(jitType))
5086             {
5087                 lclTyp = JITtype2varType(jitType);
5088             }
5089             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5090                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5091             var_types srcTyp = exprToBox->TypeGet();
5092             var_types dstTyp = lclTyp;
5093
5094             if (srcTyp != dstTyp)
5095             {
5096                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5097                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5098                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5099             }
5100             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5101         }
5102
5103         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5104         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5105
5106         // Record that this is a "box" node.
5107         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5108
5109         // If it is a value class, mark the "box" node.  We can use this information
5110         // to optimise several cases:
5111         //    "box(x) == null" --> false
5112         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5113         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5114
5115         op1->gtFlags |= GTF_BOX_VALUE;
5116         assert(op1->IsBoxedValue());
5117         assert(asg->gtOper == GT_ASG);
5118     }
5119     else
5120     {
5121         // Don't optimize, just call the helper and be done with it
5122
5123         // Ensure that the value class is restored
5124         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5125         if (op2 == nullptr)
5126         { // compDonotInline()
5127             return;
5128         }
5129
5130         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5131         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5132     }
5133
5134     /* Push the result back on the stack, */
5135     /* even if clsHnd is a value class we want the TI_REF */
5136     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5137     impPushOnStack(op1, tiRetVal);
5138 }
5139
5140 //------------------------------------------------------------------------
5141 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5142 //
5143 // Arguments:
5144 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5145 //                     by a call to CEEInfo::resolveToken().
5146 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5147 //                by a call to CEEInfo::getCallInfo().
5148 //
5149 // Assumptions:
5150 //    The multi-dimensional array constructor arguments (array dimensions) are
5151 //    pushed on the IL stack on entry to this method.
5152 //
5153 // Notes:
5154 //    Multi-dimensional array constructors are imported as calls to a JIT
5155 //    helper, not as regular calls.
5156
5157 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5158 {
5159     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5160     if (classHandle == nullptr)
5161     { // compDonotInline()
5162         return;
5163     }
5164
5165     assert(pCallInfo->sig.numArgs);
5166
5167     GenTreePtr      node;
5168     GenTreeArgList* args;
5169
5170     //
5171     // There are two different JIT helpers that can be used to allocate
5172     // multi-dimensional arrays:
5173     //
5174     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5175     //      This variant is deprecated. It should be eventually removed.
5176     //
5177     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5178     //      pointer to block of int32s. This variant is more portable.
5179     //
5180     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5181     // unconditionally would require ReadyToRun version bump.
5182     //
5183     CLANG_FORMAT_COMMENT_ANCHOR;
5184
5185 #if COR_JIT_EE_VERSION > 460
5186     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5187     {
5188         LclVarDsc* newObjArrayArgsVar;
5189
5190         // Reuse the temp used to pass the array dimensions to avoid bloating
5191         // the stack frame in case there are multiple calls to multi-dim array
5192         // constructors within a single method.
5193         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5194         {
5195             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5196             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5197             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5198         }
5199
5200         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5201         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5202         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5203             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5204
5205         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5206         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5207         // to one allocation at a time.
5208         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5209
5210         //
5211         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5212         //  - Array class handle
5213         //  - Number of dimension arguments
5214         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5215         //
5216
5217         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5218         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5219
5220         // Pop dimension arguments from the stack one at a time and store it
5221         // into lvaNewObjArrayArgs temp.
5222         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5223         {
5224             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5225
5226             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5227             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5228             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5229                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5230             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5231
5232             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5233         }
5234
5235         args = gtNewArgList(node);
5236
5237         // pass number of arguments to the helper
5238         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5239
5240         args = gtNewListNode(classHandle, args);
5241
5242         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5243     }
5244     else
5245 #endif
5246     {
5247         //
5248         // The varargs helper needs the type and method handles as last
5249         // and  last-1 param (this is a cdecl call, so args will be
5250         // pushed in reverse order on the CPU stack)
5251         //
5252
5253         args = gtNewArgList(classHandle);
5254
5255         // pass number of arguments to the helper
5256         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5257
5258         unsigned argFlags = 0;
5259         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5260
5261         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5262
5263         // varargs, so we pop the arguments
5264         node->gtFlags |= GTF_CALL_POP_ARGS;
5265
5266 #ifdef DEBUG
5267         // At the present time we don't track Caller pop arguments
5268         // that have GC references in them
5269         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5270         {
5271             assert(temp->Current()->gtType != TYP_REF);
5272         }
5273 #endif
5274     }
5275
5276     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5277     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5278
5279     // Remember that this basic block contains 'new' of a md array
5280     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5281
5282     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5283 }
5284
5285 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5286                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5287                                       CORINFO_THIS_TRANSFORM  transform)
5288 {
5289     switch (transform)
5290     {
5291         case CORINFO_DEREF_THIS:
5292         {
5293             GenTreePtr obj = thisPtr;
5294
5295             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5296             impBashVarAddrsToI(obj);
5297             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5298             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5299
5300             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5301             // ldind could point anywhere, example a boxed class static int
5302             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5303
5304             return obj;
5305         }
5306
5307         case CORINFO_BOX_THIS:
5308         {
5309             // Constraint calls where there might be no
5310             // unboxed entry point require us to implement the call via helper.
5311             // These only occur when a possible target of the call
5312             // may have inherited an implementation of an interface
5313             // method from System.Object or System.ValueType.  The EE does not provide us with
5314             // "unboxed" versions of these methods.
5315
5316             GenTreePtr obj = thisPtr;
5317
5318             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5319             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5320             obj->gtFlags |= GTF_EXCEPT;
5321
5322             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5323             var_types   objType = JITtype2varType(jitTyp);
5324             if (impIsPrimitive(jitTyp))
5325             {
5326                 if (obj->OperIsBlk())
5327                 {
5328                     obj->ChangeOperUnchecked(GT_IND);
5329
5330                     // Obj could point anywhere, example a boxed class static int
5331                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5332                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5333                 }
5334
5335                 obj->gtType = JITtype2varType(jitTyp);
5336                 assert(varTypeIsArithmetic(obj->gtType));
5337             }
5338
5339             // This pushes on the dereferenced byref
5340             // This is then used immediately to box.
5341             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5342
5343             // This pops off the byref-to-a-value-type remaining on the stack and
5344             // replaces it with a boxed object.
5345             // This is then used as the object to the virtual call immediately below.
5346             impImportAndPushBox(pConstrainedResolvedToken);
5347             if (compDonotInline())
5348             {
5349                 return nullptr;
5350             }
5351
5352             obj = impPopStack().val;
5353             return obj;
5354         }
5355         case CORINFO_NO_THIS_TRANSFORM:
5356         default:
5357             return thisPtr;
5358     }
5359 }
5360
5361 //------------------------------------------------------------------------
5362 // impCanPInvokeInline: examine information from a call to see if the call
5363 // qualifies as an inline pinvoke.
5364 //
5365 // Arguments:
5366 //    block      - block contaning the call, or for inlinees, block
5367 //                 containing the call being inlined
5368 //
5369 // Return Value:
5370 //    true if this call qualifies as an inline pinvoke, false otherwise
5371 //
5372 // Notes:
5373 //    Checks basic legality and then a number of ambient conditions
5374 //    where we could pinvoke but choose not to
5375
5376 bool Compiler::impCanPInvokeInline(BasicBlock* block)
5377 {
5378     return impCanPInvokeInlineCallSite(block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
5379            (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5380         ;
5381 }
5382
5383 //------------------------------------------------------------------------
5384 // impCanPInvokeInlineSallSite: basic legality checks using information
5385 // from a call to see if the call qualifies as an inline pinvoke.
5386 //
5387 // Arguments:
5388 //    block      - block contaning the call, or for inlinees, block
5389 //                 containing the call being inlined
5390 //
5391 // Return Value:
5392 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5393 //
5394 // Notes:
5395 //    For runtimes that support exception handling interop there are
5396 //    restrictions on using inline pinvoke in handler regions.
5397 //
5398 //    * We have to disable pinvoke inlining inside of filters because
5399 //    in case the main execution (i.e. in the try block) is inside
5400 //    unmanaged code, we cannot reuse the inlined stub (we still need
5401 //    the original state until we are in the catch handler)
5402 //
5403 //    * We disable pinvoke inlining inside handlers since the GSCookie
5404 //    is in the inlined Frame (see
5405 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5406 //    this would not protect framelets/return-address of handlers.
5407 //
5408 //    These restrictions are currently also in place for CoreCLR but
5409 //    can be relaxed when coreclr/#8459 is addressed.
5410
5411 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5412 {
5413 #ifdef _TARGET_AMD64_
5414     // On x64, we disable pinvoke inlining inside of try regions.
5415     // Here is the comment from JIT64 explaining why:
5416     //
5417     //   [VSWhidbey: 611015] - because the jitted code links in the
5418     //   Frame (instead of the stub) we rely on the Frame not being
5419     //   'active' until inside the stub.  This normally happens by the
5420     //   stub setting the return address pointer in the Frame object
5421     //   inside the stub.  On a normal return, the return address
5422     //   pointer is zeroed out so the Frame can be safely re-used, but
5423     //   if an exception occurs, nobody zeros out the return address
5424     //   pointer.  Thus if we re-used the Frame object, it would go
5425     //   'active' as soon as we link it into the Frame chain.
5426     //
5427     //   Technically we only need to disable PInvoke inlining if we're
5428     //   in a handler or if we're in a try body with a catch or
5429     //   filter/except where other non-handler code in this method
5430     //   might run and try to re-use the dirty Frame object.
5431     //
5432     //   A desktop test case where this seems to matter is
5433     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5434     const bool inX64Try = block->hasTryIndex();
5435 #else
5436     const bool inX64Try = false;
5437 #endif // _TARGET_AMD64_
5438
5439     return !inX64Try && !block->hasHndIndex();
5440 }
5441
5442 //------------------------------------------------------------------------
5443 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5444 // if it can be expressed as an inline pinvoke.
5445 //
5446 // Arguments:
5447 //    call       - tree for the call
5448 //    methHnd    - handle for the method being called (may be null)
5449 //    sig        - signature of the method being called
5450 //    mflags     - method flags for the method being called
5451 //    block      - block contaning the call, or for inlinees, block
5452 //                 containing the call being inlined
5453 //
5454 // Notes:
5455 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5456 //
5457 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5458 //   call passes a combination of legality and profitabilty checks.
5459 //
5460 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5461
5462 void Compiler::impCheckForPInvokeCall(
5463     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5464 {
5465     CorInfoUnmanagedCallConv unmanagedCallConv;
5466
5467     // If VM flagged it as Pinvoke, flag the call node accordingly
5468     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5469     {
5470         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5471     }
5472
5473     if (methHnd)
5474     {
5475         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5476         {
5477             return;
5478         }
5479
5480         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5481     }
5482     else
5483     {
5484         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5485         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5486         {
5487             // Used by the IL Stubs.
5488             callConv = CORINFO_CALLCONV_C;
5489         }
5490         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5491         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5492         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5493         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5494
5495         assert(!call->gtCall.gtCallCookie);
5496     }
5497
5498     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5499         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5500     {
5501         return;
5502     }
5503     optNativeCallCount++;
5504
5505     if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
5506     {
5507         // Always inline pinvoke.
5508     }
5509     else
5510     {
5511         // Check legality and profitability.
5512         if (!impCanPInvokeInline(block))
5513         {
5514             return;
5515         }
5516
5517         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5518         {
5519             return;
5520         }
5521
5522         // Size-speed tradeoff: don't use inline pinvoke at rarely
5523         // executed call sites.  The non-inline version is more
5524         // compact.
5525         if (block->isRunRarely())
5526         {
5527             return;
5528         }
5529     }
5530
5531     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5532
5533     call->gtFlags |= GTF_CALL_UNMANAGED;
5534     info.compCallUnmanaged++;
5535
5536     // AMD64 convention is same for native and managed
5537     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5538     {
5539         call->gtFlags |= GTF_CALL_POP_ARGS;
5540     }
5541
5542     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5543     {
5544         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5545     }
5546 }
5547
5548 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5549 {
5550     var_types callRetTyp = JITtype2varType(sig->retType);
5551
5552     /* The function pointer is on top of the stack - It may be a
5553      * complex expression. As it is evaluated after the args,
5554      * it may cause registered args to be spilled. Simply spill it.
5555      */
5556
5557     // Ignore this trivial case.
5558     if (impStackTop().val->gtOper != GT_LCL_VAR)
5559     {
5560         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5561                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5562     }
5563
5564     /* Get the function pointer */
5565
5566     GenTreePtr fptr = impPopStack().val;
5567     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5568
5569 #ifdef DEBUG
5570     // This temporary must never be converted to a double in stress mode,
5571     // because that can introduce a call to the cast helper after the
5572     // arguments have already been evaluated.
5573
5574     if (fptr->OperGet() == GT_LCL_VAR)
5575     {
5576         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5577     }
5578 #endif
5579
5580     /* Create the call node */
5581
5582     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5583
5584     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5585
5586     return call;
5587 }
5588
5589 /*****************************************************************************/
5590
5591 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5592 {
5593     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5594
5595     /* Since we push the arguments in reverse order (i.e. right -> left)
5596      * spill any side effects from the stack
5597      *
5598      * OBS: If there is only one side effect we do not need to spill it
5599      *      thus we have to spill all side-effects except last one
5600      */
5601
5602     unsigned lastLevelWithSideEffects = UINT_MAX;
5603
5604     unsigned argsToReverse = sig->numArgs;
5605
5606     // For "thiscall", the first argument goes in a register. Since its
5607     // order does not need to be changed, we do not need to spill it
5608
5609     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5610     {
5611         assert(argsToReverse);
5612         argsToReverse--;
5613     }
5614
5615 #ifndef _TARGET_X86_
5616     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5617     argsToReverse = 0;
5618 #endif
5619
5620     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5621     {
5622         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5623         {
5624             assert(lastLevelWithSideEffects == UINT_MAX);
5625
5626             impSpillStackEntry(level,
5627                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5628         }
5629         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5630         {
5631             if (lastLevelWithSideEffects != UINT_MAX)
5632             {
5633                 /* We had a previous side effect - must spill it */
5634                 impSpillStackEntry(lastLevelWithSideEffects,
5635                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5636
5637                 /* Record the level for the current side effect in case we will spill it */
5638                 lastLevelWithSideEffects = level;
5639             }
5640             else
5641             {
5642                 /* This is the first side effect encountered - record its level */
5643
5644                 lastLevelWithSideEffects = level;
5645             }
5646         }
5647     }
5648
5649     /* The argument list is now "clean" - no out-of-order side effects
5650      * Pop the argument list in reverse order */
5651
5652     unsigned   argFlags = 0;
5653     GenTreePtr args     = call->gtCall.gtCallArgs =
5654         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5655
5656     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5657     {
5658         GenTreePtr thisPtr = args->Current();
5659         impBashVarAddrsToI(thisPtr);
5660         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5661     }
5662
5663     if (args)
5664     {
5665         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5666     }
5667 }
5668
5669 //------------------------------------------------------------------------
5670 // impInitClass: Build a node to initialize the class before accessing the
5671 //               field if necessary
5672 //
5673 // Arguments:
5674 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5675 //                     by a call to CEEInfo::resolveToken().
5676 //
5677 // Return Value: If needed, a pointer to the node that will perform the class
5678 //               initializtion.  Otherwise, nullptr.
5679 //
5680
5681 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5682 {
5683     CorInfoInitClassResult initClassResult =
5684         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5685
5686     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5687     {
5688         return nullptr;
5689     }
5690     BOOL runtimeLookup;
5691
5692     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5693
5694     if (node == nullptr)
5695     {
5696         assert(compDonotInline());
5697         return nullptr;
5698     }
5699
5700     if (runtimeLookup)
5701     {
5702         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5703     }
5704     else
5705     {
5706         // Call the shared non gc static helper, as its the fastest
5707         node = fgGetSharedCCtor(pResolvedToken->hClass);
5708     }
5709
5710     return node;
5711 }
5712
5713 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5714 {
5715     GenTreePtr op1 = nullptr;
5716
5717     switch (lclTyp)
5718     {
5719         int     ival;
5720         __int64 lval;
5721         double  dval;
5722
5723         case TYP_BOOL:
5724             ival = *((bool*)fldAddr);
5725             goto IVAL_COMMON;
5726
5727         case TYP_BYTE:
5728             ival = *((signed char*)fldAddr);
5729             goto IVAL_COMMON;
5730
5731         case TYP_UBYTE:
5732             ival = *((unsigned char*)fldAddr);
5733             goto IVAL_COMMON;
5734
5735         case TYP_SHORT:
5736             ival = *((short*)fldAddr);
5737             goto IVAL_COMMON;
5738
5739         case TYP_CHAR:
5740         case TYP_USHORT:
5741             ival = *((unsigned short*)fldAddr);
5742             goto IVAL_COMMON;
5743
5744         case TYP_UINT:
5745         case TYP_INT:
5746             ival = *((int*)fldAddr);
5747         IVAL_COMMON:
5748             op1 = gtNewIconNode(ival);
5749             break;
5750
5751         case TYP_LONG:
5752         case TYP_ULONG:
5753             lval = *((__int64*)fldAddr);
5754             op1  = gtNewLconNode(lval);
5755             break;
5756
5757         case TYP_FLOAT:
5758             dval = *((float*)fldAddr);
5759             op1  = gtNewDconNode(dval);
5760 #if !FEATURE_X87_DOUBLES
5761             // X87 stack doesn't differentiate between float/double
5762             // so R4 is treated as R8, but everybody else does
5763             op1->gtType = TYP_FLOAT;
5764 #endif // FEATURE_X87_DOUBLES
5765             break;
5766
5767         case TYP_DOUBLE:
5768             dval = *((double*)fldAddr);
5769             op1  = gtNewDconNode(dval);
5770             break;
5771
5772         default:
5773             assert(!"Unexpected lclTyp");
5774             break;
5775     }
5776
5777     return op1;
5778 }
5779
5780 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5781                                                 CORINFO_ACCESS_FLAGS    access,
5782                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5783                                                 var_types               lclTyp)
5784 {
5785     GenTreePtr op1;
5786
5787     switch (pFieldInfo->fieldAccessor)
5788     {
5789         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5790         {
5791             assert(!compIsForInlining());
5792
5793             // We first call a special helper to get the statics base pointer
5794             op1 = impParentClassTokenToHandle(pResolvedToken);
5795
5796             // compIsForInlining() is false so we should not neve get NULL here
5797             assert(op1 != nullptr);
5798
5799             var_types type = TYP_BYREF;
5800
5801             switch (pFieldInfo->helper)
5802             {
5803                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5804                     type = TYP_I_IMPL;
5805                     break;
5806                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5807                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5808                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5809                     break;
5810                 default:
5811                     assert(!"unknown generic statics helper");
5812                     break;
5813             }
5814
5815             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5816
5817             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5818             op1              = gtNewOperNode(GT_ADD, type, op1,
5819                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5820         }
5821         break;
5822
5823         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5824         {
5825 #ifdef FEATURE_READYTORUN_COMPILER
5826             if (opts.IsReadyToRun())
5827             {
5828                 unsigned callFlags = 0;
5829
5830                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5831                 {
5832                     callFlags |= GTF_CALL_HOISTABLE;
5833                 }
5834
5835                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5836
5837                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5838             }
5839             else
5840 #endif
5841             {
5842                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5843             }
5844
5845             {
5846                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5847                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5848                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5849             }
5850             break;
5851         }
5852 #if COR_JIT_EE_VERSION > 460
5853         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5854         {
5855 #ifdef FEATURE_READYTORUN_COMPILER
5856             noway_assert(opts.IsReadyToRun());
5857             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5858             assert(kind.needsRuntimeLookup);
5859
5860             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5861             GenTreeArgList* args    = gtNewArgList(ctxTree);
5862
5863             unsigned callFlags = 0;
5864
5865             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5866             {
5867                 callFlags |= GTF_CALL_HOISTABLE;
5868             }
5869             var_types type = TYP_BYREF;
5870             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5871
5872             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5873             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5874             op1              = gtNewOperNode(GT_ADD, type, op1,
5875                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5876 #else
5877             unreached();
5878 #endif // FEATURE_READYTORUN_COMPILER
5879         }
5880         break;
5881 #endif // COR_JIT_EE_VERSION > 460
5882         default:
5883         {
5884             if (!(access & CORINFO_ACCESS_ADDRESS))
5885             {
5886                 // In future, it may be better to just create the right tree here instead of folding it later.
5887                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5888
5889                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5890                 {
5891                     op1->gtType = TYP_REF; // points at boxed object
5892                     FieldSeqNode* firstElemFldSeq =
5893                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5894                     op1 =
5895                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5896                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5897
5898                     if (varTypeIsStruct(lclTyp))
5899                     {
5900                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5901                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5902                     }
5903                     else
5904                     {
5905                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5906                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5907                     }
5908                 }
5909
5910                 return op1;
5911             }
5912             else
5913             {
5914                 void** pFldAddr = nullptr;
5915                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5916
5917                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5918
5919                 /* Create the data member node */
5920                 if (pFldAddr == nullptr)
5921                 {
5922                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5923                 }
5924                 else
5925                 {
5926                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5927
5928                     // There are two cases here, either the static is RVA based,
5929                     // in which case the type of the FIELD node is not a GC type
5930                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
5931                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
5932                     // because handles to statics now go into the large object heap
5933
5934                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5935                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
5936                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5937                 }
5938             }
5939             break;
5940         }
5941     }
5942
5943     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5944     {
5945         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5946
5947         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5948
5949         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5950                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
5951     }
5952
5953     if (!(access & CORINFO_ACCESS_ADDRESS))
5954     {
5955         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5956         op1->gtFlags |= GTF_GLOB_REF;
5957     }
5958
5959     return op1;
5960 }
5961
5962 // In general try to call this before most of the verification work.  Most people expect the access
5963 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
5964 // out if you can't access something we also think that you're unverifiable for other reasons.
5965 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5966 {
5967     if (result != CORINFO_ACCESS_ALLOWED)
5968     {
5969         impHandleAccessAllowedInternal(result, helperCall);
5970     }
5971 }
5972
5973 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5974 {
5975     switch (result)
5976     {
5977         case CORINFO_ACCESS_ALLOWED:
5978             break;
5979         case CORINFO_ACCESS_ILLEGAL:
5980             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
5981             // method is verifiable.  Otherwise, delay the exception to runtime.
5982             if (compIsForImportOnly())
5983             {
5984                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
5985             }
5986             else
5987             {
5988                 impInsertHelperCall(helperCall);
5989             }
5990             break;
5991         case CORINFO_ACCESS_RUNTIME_CHECK:
5992             impInsertHelperCall(helperCall);
5993             break;
5994     }
5995 }
5996
5997 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
5998 {
5999     // Construct the argument list
6000     GenTreeArgList* args = nullptr;
6001     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6002     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6003     {
6004         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6005         GenTreePtr                currentArg = nullptr;
6006         switch (helperArg.argType)
6007         {
6008             case CORINFO_HELPER_ARG_TYPE_Field:
6009                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6010                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6011                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6012                 break;
6013             case CORINFO_HELPER_ARG_TYPE_Method:
6014                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6015                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6016                 break;
6017             case CORINFO_HELPER_ARG_TYPE_Class:
6018                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6019                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6020                 break;
6021             case CORINFO_HELPER_ARG_TYPE_Module:
6022                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6023                 break;
6024             case CORINFO_HELPER_ARG_TYPE_Const:
6025                 currentArg = gtNewIconNode(helperArg.constant);
6026                 break;
6027             default:
6028                 NO_WAY("Illegal helper arg type");
6029         }
6030         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6031     }
6032
6033     /* TODO-Review:
6034      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6035      * Also, consider sticking this in the first basic block.
6036      */
6037     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6038     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6039 }
6040
6041 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6042                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6043                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6044 {
6045 #ifdef FEATURE_CORECLR
6046     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6047     {
6048         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6049         // This helper throws an exception if the CLR host disallows the call.
6050
6051         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6052                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6053                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6054         // Append the callout statement
6055         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6056     }
6057 #endif // FEATURE_CORECLR
6058 }
6059
6060 // Checks whether the return types of caller and callee are compatible
6061 // so that callee can be tail called. Note that here we don't check
6062 // compatibility in IL Verifier sense, but on the lines of return type
6063 // sizes are equal and get returned in the same return register.
6064 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6065                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6066                                             var_types            calleeRetType,
6067                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6068 {
6069     // Note that we can not relax this condition with genActualType() as the
6070     // calling convention dictates that the caller of a function with a small
6071     // typed return value is responsible for normalizing the return val.
6072     if (callerRetType == calleeRetType)
6073     {
6074         return true;
6075     }
6076
6077 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6078     // Jit64 compat:
6079     if (callerRetType == TYP_VOID)
6080     {
6081         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6082         //     tail.call
6083         //     pop
6084         //     ret
6085         //
6086         // Note that the above IL pattern is not valid as per IL verification rules.
6087         // Therefore, only full trust code can take advantage of this pattern.
6088         return true;
6089     }
6090
6091     // These checks return true if the return value type sizes are the same and
6092     // get returned in the same return register i.e. caller doesn't need to normalize
6093     // return value. Some of the tail calls permitted by below checks would have
6094     // been rejected by IL Verifier before we reached here.  Therefore, only full
6095     // trust code can make those tail calls.
6096     unsigned callerRetTypeSize = 0;
6097     unsigned calleeRetTypeSize = 0;
6098     bool     isCallerRetTypMBEnreg =
6099         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6100     bool isCalleeRetTypMBEnreg =
6101         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6102
6103     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6104     {
6105         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6106     }
6107 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6108
6109     return false;
6110 }
6111
6112 // For prefixFlags
6113 enum
6114 {
6115     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6116     PREFIX_TAILCALL_IMPLICIT =
6117         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6118     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6119     PREFIX_VOLATILE    = 0x00000100,
6120     PREFIX_UNALIGNED   = 0x00001000,
6121     PREFIX_CONSTRAINED = 0x00010000,
6122     PREFIX_READONLY    = 0x00100000
6123 };
6124
6125 /********************************************************************************
6126  *
6127  * Returns true if the current opcode and and the opcodes following it correspond
6128  * to a supported tail call IL pattern.
6129  *
6130  */
6131 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6132                                       OPCODE      curOpcode,
6133                                       const BYTE* codeAddrOfNextOpcode,
6134                                       const BYTE* codeEnd,
6135                                       bool        isRecursive,
6136                                       bool*       isCallPopAndRet /* = nullptr */)
6137 {
6138     // Bail out if the current opcode is not a call.
6139     if (!impOpcodeIsCallOpcode(curOpcode))
6140     {
6141         return false;
6142     }
6143
6144 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6145     // If shared ret tail opt is not enabled, we will enable
6146     // it for recursive methods.
6147     if (isRecursive)
6148 #endif
6149     {
6150         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6151         // sequence. Make sure we don't go past the end of the IL however.
6152         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6153     }
6154
6155     // Bail out if there is no next opcode after call
6156     if (codeAddrOfNextOpcode >= codeEnd)
6157     {
6158         return false;
6159     }
6160
6161     // Scan the opcodes to look for the following IL patterns if either
6162     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6163     //  ii) if tail prefixed, IL verification is not needed for the method.
6164     //
6165     // Only in the above two cases we can allow the below tail call patterns
6166     // violating ECMA spec.
6167     //
6168     // Pattern1:
6169     //       call
6170     //       nop*
6171     //       ret
6172     //
6173     // Pattern2:
6174     //       call
6175     //       nop*
6176     //       pop
6177     //       nop*
6178     //       ret
6179     int    cntPop = 0;
6180     OPCODE nextOpcode;
6181
6182 #ifdef _TARGET_AMD64_
6183     do
6184     {
6185         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6186         codeAddrOfNextOpcode += sizeof(__int8);
6187     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6188              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6189              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6190                                                                                          // one pop seen so far.
6191 #else
6192     nextOpcode          = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6193 #endif
6194
6195     if (isCallPopAndRet)
6196     {
6197         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6198         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6199     }
6200
6201 #ifdef _TARGET_AMD64_
6202     // Jit64 Compat:
6203     // Tail call IL pattern could be either of the following
6204     // 1) call/callvirt/calli + ret
6205     // 2) call/callvirt/calli + pop + ret in a method returning void.
6206     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6207 #else //!_TARGET_AMD64_
6208     return (nextOpcode == CEE_RET) && (cntPop == 0);
6209 #endif
6210 }
6211
6212 /*****************************************************************************
6213  *
6214  * Determine whether the call could be converted to an implicit tail call
6215  *
6216  */
6217 bool Compiler::impIsImplicitTailCallCandidate(
6218     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6219 {
6220
6221 #if FEATURE_TAILCALL_OPT
6222     if (!opts.compTailCallOpt)
6223     {
6224         return false;
6225     }
6226
6227     if (opts.compDbgCode || opts.MinOpts())
6228     {
6229         return false;
6230     }
6231
6232     // must not be tail prefixed
6233     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6234     {
6235         return false;
6236     }
6237
6238 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6239     // the block containing call is marked as BBJ_RETURN
6240     // We allow shared ret tail call optimization on recursive calls even under
6241     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6242     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6243         return false;
6244 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6245
6246     // must be call+ret or call+pop+ret
6247     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6248     {
6249         return false;
6250     }
6251
6252     return true;
6253 #else
6254     return false;
6255 #endif // FEATURE_TAILCALL_OPT
6256 }
6257
6258 //------------------------------------------------------------------------
6259 // impImportCall: import a call-inspiring opcode
6260 //
6261 // Arguments:
6262 //    opcode                    - opcode that inspires the call
6263 //    pResolvedToken            - resolved token for the call target
6264 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6265 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6266 //    prefixFlags               - IL prefix flags for the call
6267 //    callInfo                  - EE supplied info for the call
6268 //    rawILOffset               - IL offset of the opcode
6269 //
6270 // Returns:
6271 //    Type of the call's return value.
6272 //
6273 // Notes:
6274 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6275 //
6276 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6277 //    uninitalized object.
6278
6279 #ifdef _PREFAST_
6280 #pragma warning(push)
6281 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6282 #endif
6283
6284 var_types Compiler::impImportCall(OPCODE                  opcode,
6285                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6286                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6287                                   GenTreePtr              newobjThis,
6288                                   int                     prefixFlags,
6289                                   CORINFO_CALL_INFO*      callInfo,
6290                                   IL_OFFSET               rawILOffset)
6291 {
6292     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6293
6294     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6295     var_types              callRetTyp                     = TYP_COUNT;
6296     CORINFO_SIG_INFO*      sig                            = nullptr;
6297     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6298     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6299     unsigned               clsFlags                       = 0;
6300     unsigned               mflags                         = 0;
6301     unsigned               argFlags                       = 0;
6302     GenTreePtr             call                           = nullptr;
6303     GenTreeArgList*        args                           = nullptr;
6304     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6305     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6306     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6307     bool                   canTailCall                    = true;
6308     const char*            szCanTailCallFailReason        = nullptr;
6309     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6310     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6311
6312     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6313     // do that before tailcalls, but that is probably not the intended
6314     // semantic. So just disallow tailcalls from synchronized methods.
6315     // Also, popping arguments in a varargs function is more work and NYI
6316     // If we have a security object, we have to keep our frame around for callers
6317     // to see any imperative security.
6318     if (info.compFlags & CORINFO_FLG_SYNCH)
6319     {
6320         canTailCall             = false;
6321         szCanTailCallFailReason = "Caller is synchronized";
6322     }
6323 #if !FEATURE_FIXED_OUT_ARGS
6324     else if (info.compIsVarArgs)
6325     {
6326         canTailCall             = false;
6327         szCanTailCallFailReason = "Caller is varargs";
6328     }
6329 #endif // FEATURE_FIXED_OUT_ARGS
6330     else if (opts.compNeedSecurityCheck)
6331     {
6332         canTailCall             = false;
6333         szCanTailCallFailReason = "Caller requires a security check.";
6334     }
6335
6336     // We only need to cast the return value of pinvoke inlined calls that return small types
6337
6338     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6339     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6340     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6341     // the time being that the callee might be compiled by the other JIT and thus the return
6342     // value will need to be widened by us (or not widened at all...)
6343
6344     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6345
6346     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6347     bool bIntrinsicImported = false;
6348
6349     CORINFO_SIG_INFO calliSig;
6350     GenTreeArgList*  extraArg = nullptr;
6351
6352     /*-------------------------------------------------------------------------
6353      * First create the call node
6354      */
6355
6356     if (opcode == CEE_CALLI)
6357     {
6358         /* Get the call site sig */
6359         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6360
6361         callRetTyp = JITtype2varType(calliSig.retType);
6362
6363         call = impImportIndirectCall(&calliSig, ilOffset);
6364
6365         // We don't know the target method, so we have to infer the flags, or
6366         // assume the worst-case.
6367         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6368
6369 #ifdef DEBUG
6370         if (verbose)
6371         {
6372             unsigned structSize =
6373                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6374             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6375                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6376         }
6377 #endif
6378         // This should be checked in impImportBlockCode.
6379         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6380
6381         sig = &calliSig;
6382
6383 #ifdef DEBUG
6384         // We cannot lazily obtain the signature of a CALLI call because it has no method
6385         // handle that we can use, so we need to save its full call signature here.
6386         assert(call->gtCall.callSig == nullptr);
6387         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6388         *call->gtCall.callSig = calliSig;
6389 #endif // DEBUG
6390     }
6391     else // (opcode != CEE_CALLI)
6392     {
6393         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6394
6395         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6396         // supply the instantiation parameters necessary to make direct calls to underlying
6397         // shared generic code, rather than calling through instantiating stubs.  If the
6398         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6399         // must indeed pass an instantiation parameter.
6400
6401         methHnd = callInfo->hMethod;
6402
6403         sig        = &(callInfo->sig);
6404         callRetTyp = JITtype2varType(sig->retType);
6405
6406         mflags = callInfo->methodFlags;
6407
6408 #ifdef DEBUG
6409         if (verbose)
6410         {
6411             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6412             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6413                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6414         }
6415 #endif
6416         if (compIsForInlining())
6417         {
6418             /* Does this call site have security boundary restrictions? */
6419
6420             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6421             {
6422                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6423                 return callRetTyp;
6424             }
6425
6426             /* Does the inlinee need a security check token on the frame */
6427
6428             if (mflags & CORINFO_FLG_SECURITYCHECK)
6429             {
6430                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6431                 return callRetTyp;
6432             }
6433
6434             /* Does the inlinee use StackCrawlMark */
6435
6436             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6437             {
6438                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6439                 return callRetTyp;
6440             }
6441
6442             /* For now ignore delegate invoke */
6443
6444             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6445             {
6446                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6447                 return callRetTyp;
6448             }
6449
6450             /* For now ignore varargs */
6451             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6452             {
6453                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6454                 return callRetTyp;
6455             }
6456
6457             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6458             {
6459                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6460                 return callRetTyp;
6461             }
6462
6463             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6464             {
6465                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6466                 return callRetTyp;
6467             }
6468         }
6469
6470         clsHnd = pResolvedToken->hClass;
6471
6472         clsFlags = callInfo->classFlags;
6473
6474 #ifdef DEBUG
6475         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6476
6477         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6478         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6479         const char* modName;
6480         const char* className;
6481         const char* methodName;
6482         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6483             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6484             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6485         {
6486             return impImportJitTestLabelMark(sig->numArgs);
6487         }
6488 #endif // DEBUG
6489
6490         // <NICE> Factor this into getCallInfo </NICE>
6491         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6492         {
6493             call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6494                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6495
6496             if (call != nullptr)
6497             {
6498                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6499                        (clsFlags & CORINFO_FLG_FINAL));
6500
6501 #ifdef FEATURE_READYTORUN_COMPILER
6502                 if (call->OperGet() == GT_INTRINSIC)
6503                 {
6504                     if (opts.IsReadyToRun())
6505                     {
6506                         noway_assert(callInfo->kind == CORINFO_CALL);
6507                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6508                     }
6509                     else
6510                     {
6511                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6512                     }
6513                 }
6514 #endif
6515
6516                 bIntrinsicImported = true;
6517                 goto DONE_CALL;
6518             }
6519         }
6520
6521 #ifdef FEATURE_SIMD
6522         if (featureSIMD)
6523         {
6524             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6525             if (call != nullptr)
6526             {
6527                 bIntrinsicImported = true;
6528                 goto DONE_CALL;
6529             }
6530         }
6531 #endif // FEATURE_SIMD
6532
6533         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6534         {
6535             NO_WAY("Virtual call to a function added via EnC is not supported");
6536             goto DONE_CALL;
6537         }
6538
6539         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6540             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6541             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6542         {
6543             BADCODE("Bad calling convention");
6544         }
6545
6546         //-------------------------------------------------------------------------
6547         //  Construct the call node
6548         //
6549         // Work out what sort of call we're making.
6550         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6551
6552         constraintCallThisTransform = callInfo->thisTransform;
6553
6554         exactContextHnd                = callInfo->contextHandle;
6555         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6556
6557         // Recursive call is treaded as a loop to the begining of the method.
6558         if (methHnd == info.compMethodHnd)
6559         {
6560 #ifdef DEBUG
6561             if (verbose)
6562             {
6563                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6564                         fgFirstBB->bbNum, compCurBB->bbNum);
6565             }
6566 #endif
6567             fgMarkBackwardJump(fgFirstBB, compCurBB);
6568         }
6569
6570         switch (callInfo->kind)
6571         {
6572
6573             case CORINFO_VIRTUALCALL_STUB:
6574             {
6575                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6576                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6577                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6578                 {
6579
6580                     if (compIsForInlining())
6581                     {
6582                         // Don't import runtime lookups when inlining
6583                         // Inlining has to be aborted in such a case
6584                         /* XXX Fri 3/20/2009
6585                          * By the way, this would never succeed.  If the handle lookup is into the generic
6586                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6587                          * inlined code will crash.
6588                          *
6589                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6590                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6591                          * failing here.
6592                          */
6593                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6594                         return callRetTyp;
6595                     }
6596
6597                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6598                     assert(!compDonotInline());
6599
6600                     // This is the rough code to set up an indirect stub call
6601                     assert(stubAddr != nullptr);
6602
6603                     // The stubAddr may be a
6604                     // complex expression. As it is evaluated after the args,
6605                     // it may cause registered args to be spilled. Simply spill it.
6606
6607                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6608                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6609                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6610
6611                     // Create the actual call node
6612
6613                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6614                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6615
6616                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6617
6618                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6619                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6620
6621 #ifdef _TARGET_X86_
6622                     // No tailcalls allowed for these yet...
6623                     canTailCall             = false;
6624                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6625 #endif
6626                 }
6627                 else
6628                 {
6629                     // ok, the stub is available at compile type.
6630
6631                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6632                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6633                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6634                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6635                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6636                     {
6637                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6638                     }
6639                 }
6640
6641 #ifdef FEATURE_READYTORUN_COMPILER
6642                 if (opts.IsReadyToRun())
6643                 {
6644                     // Null check is sometimes needed for ready to run to handle
6645                     // non-virtual <-> virtual changes between versions
6646                     if (callInfo->nullInstanceCheck)
6647                     {
6648                         call->gtFlags |= GTF_CALL_NULLCHECK;
6649                     }
6650                 }
6651 #endif
6652
6653                 break;
6654             }
6655
6656             case CORINFO_VIRTUALCALL_VTABLE:
6657             {
6658                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6659                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6660                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6661                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6662                 break;
6663             }
6664
6665             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6666             {
6667                 if (compIsForInlining())
6668                 {
6669                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6670                     return callRetTyp;
6671                 }
6672
6673                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6674                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6675                 // OK, We've been told to call via LDVIRTFTN, so just
6676                 // take the call now....
6677
6678                 args = impPopList(sig->numArgs, &argFlags, sig);
6679
6680                 GenTreePtr thisPtr = impPopStack().val;
6681                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6682                 if (compDonotInline())
6683                 {
6684                     return callRetTyp;
6685                 }
6686
6687                 // Clone the (possibly transformed) "this" pointer
6688                 GenTreePtr thisPtrCopy;
6689                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6690                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6691
6692                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6693                 if (compDonotInline())
6694                 {
6695                     return callRetTyp;
6696                 }
6697
6698                 thisPtr = nullptr; // can't reuse it
6699
6700                 // Now make an indirect call through the function pointer
6701
6702                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6703                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6704                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6705
6706                 // Create the actual call node
6707
6708                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6709                 call->gtCall.gtCallObjp = thisPtrCopy;
6710                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6711
6712 #ifdef FEATURE_READYTORUN_COMPILER
6713                 if (opts.IsReadyToRun())
6714                 {
6715                     // Null check is needed for ready to run to handle
6716                     // non-virtual <-> virtual changes between versions
6717                     call->gtFlags |= GTF_CALL_NULLCHECK;
6718                 }
6719 #endif
6720
6721                 // Sine we are jumping over some code, check that its OK to skip that code
6722                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6723                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6724                 goto DONE;
6725             }
6726
6727             case CORINFO_CALL:
6728             {
6729                 // This is for a non-virtual, non-interface etc. call
6730                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6731
6732                 // We remove the nullcheck for the GetType call instrinsic.
6733                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6734                 // and instrinsics.
6735                 if (callInfo->nullInstanceCheck &&
6736                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6737                 {
6738                     call->gtFlags |= GTF_CALL_NULLCHECK;
6739                 }
6740
6741 #ifdef FEATURE_READYTORUN_COMPILER
6742                 if (opts.IsReadyToRun())
6743                 {
6744                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6745                 }
6746 #endif
6747                 break;
6748             }
6749
6750             case CORINFO_CALL_CODE_POINTER:
6751             {
6752                 // The EE has asked us to call by computing a code pointer and then doing an
6753                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6754
6755                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6756                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6757
6758                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6759                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6760
6761                 GenTreePtr fptr =
6762                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6763
6764                 if (compDonotInline())
6765                 {
6766                     return callRetTyp;
6767                 }
6768
6769                 // Now make an indirect call through the function pointer
6770
6771                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6772                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6773                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6774
6775                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6776                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6777                 if (callInfo->nullInstanceCheck)
6778                 {
6779                     call->gtFlags |= GTF_CALL_NULLCHECK;
6780                 }
6781
6782                 break;
6783             }
6784
6785             default:
6786                 assert(!"unknown call kind");
6787                 break;
6788         }
6789
6790         //-------------------------------------------------------------------------
6791         // Set more flags
6792
6793         PREFIX_ASSUME(call != nullptr);
6794
6795         if (mflags & CORINFO_FLG_NOGCCHECK)
6796         {
6797             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6798         }
6799
6800         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6801         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6802             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6803             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6804         {
6805             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6806         }
6807     }
6808     assert(sig);
6809     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6810
6811     /* Some sanity checks */
6812
6813     // CALL_VIRT and NEWOBJ must have a THIS pointer
6814     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6815     // static bit and hasThis are negations of one another
6816     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6817     assert(call != nullptr);
6818
6819     /*-------------------------------------------------------------------------
6820      * Check special-cases etc
6821      */
6822
6823     /* Special case - Check if it is a call to Delegate.Invoke(). */
6824
6825     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6826     {
6827         assert(!compIsForInlining());
6828         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6829         assert(mflags & CORINFO_FLG_FINAL);
6830
6831         /* Set the delegate flag */
6832         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6833
6834         if (callInfo->secureDelegateInvoke)
6835         {
6836             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6837         }
6838
6839         if (opcode == CEE_CALLVIRT)
6840         {
6841             assert(mflags & CORINFO_FLG_FINAL);
6842
6843             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6844             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6845             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6846         }
6847     }
6848
6849     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6850     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6851     if (varTypeIsStruct(callRetTyp))
6852     {
6853         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6854         call->gtType = callRetTyp;
6855     }
6856
6857 #if !FEATURE_VARARG
6858     /* Check for varargs */
6859     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6860         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6861     {
6862         BADCODE("Varargs not supported.");
6863     }
6864 #endif // !FEATURE_VARARG
6865
6866     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6867         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6868     {
6869         assert(!compIsForInlining());
6870
6871         /* Set the right flags */
6872
6873         call->gtFlags |= GTF_CALL_POP_ARGS;
6874         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6875
6876         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6877            will be expecting to pop a certain number of arguments, but if we
6878            tailcall to a function with a different number of arguments, we
6879            are hosed. There are ways around this (caller remembers esp value,
6880            varargs is not caller-pop, etc), but not worth it. */
6881         CLANG_FORMAT_COMMENT_ANCHOR;
6882
6883 #ifdef _TARGET_X86_
6884         if (canTailCall)
6885         {
6886             canTailCall             = false;
6887             szCanTailCallFailReason = "Callee is varargs";
6888         }
6889 #endif
6890
6891         /* Get the total number of arguments - this is already correct
6892          * for CALLI - for methods we have to get it from the call site */
6893
6894         if (opcode != CEE_CALLI)
6895         {
6896 #ifdef DEBUG
6897             unsigned numArgsDef = sig->numArgs;
6898 #endif
6899             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6900
6901 #ifdef DEBUG
6902             // We cannot lazily obtain the signature of a vararg call because using its method
6903             // handle will give us only the declared argument list, not the full argument list.
6904             assert(call->gtCall.callSig == nullptr);
6905             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6906             *call->gtCall.callSig = *sig;
6907 #endif
6908
6909             // For vararg calls we must be sure to load the return type of the
6910             // method actually being called, as well as the return types of the
6911             // specified in the vararg signature. With type equivalency, these types
6912             // may not be the same.
6913             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6914             {
6915                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6916                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6917                     sig->retType != CORINFO_TYPE_VAR)
6918                 {
6919                     // Make sure that all valuetypes (including enums) that we push are loaded.
6920                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6921                     // all valuetypes in the method signature are already loaded.
6922                     // We need to be able to find the size of the valuetypes, but we cannot
6923                     // do a class-load from within GC.
6924                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6925                 }
6926             }
6927
6928             assert(numArgsDef <= sig->numArgs);
6929         }
6930
6931         /* We will have "cookie" as the last argument but we cannot push
6932          * it on the operand stack because we may overflow, so we append it
6933          * to the arg list next after we pop them */
6934     }
6935
6936     if (mflags & CORINFO_FLG_SECURITYCHECK)
6937     {
6938         assert(!compIsForInlining());
6939
6940         // Need security prolog/epilog callouts when there is
6941         // imperative security in the method. This is to give security a
6942         // chance to do any setup in the prolog and cleanup in the epilog if needed.
6943
6944         if (compIsForInlining())
6945         {
6946             // Cannot handle this if the method being imported is an inlinee by itself.
6947             // Because inlinee method does not have its own frame.
6948
6949             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6950             return callRetTyp;
6951         }
6952         else
6953         {
6954             tiSecurityCalloutNeeded = true;
6955
6956             // If the current method calls a method which needs a security check,
6957             // (i.e. the method being compiled has imperative security)
6958             // we need to reserve a slot for the security object in
6959             // the current method's stack frame
6960             opts.compNeedSecurityCheck = true;
6961         }
6962     }
6963
6964     //--------------------------- Inline NDirect ------------------------------
6965
6966     // For inline cases we technically should look at both the current
6967     // block and the call site block (or just the latter if we've
6968     // fused the EH trees). However the block-related checks pertain to
6969     // EH and we currently won't inline a method with EH. So for
6970     // inlinees, just checking the call site block is sufficient.
6971     {
6972         // New lexical block here to avoid compilation errors because of GOTOs.
6973         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
6974         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
6975     }
6976
6977     if (call->gtFlags & GTF_CALL_UNMANAGED)
6978     {
6979         // We set up the unmanaged call by linking the frame, disabling GC, etc
6980         // This needs to be cleaned up on return
6981         if (canTailCall)
6982         {
6983             canTailCall             = false;
6984             szCanTailCallFailReason = "Callee is native";
6985         }
6986
6987         checkForSmallType = true;
6988
6989         impPopArgsForUnmanagedCall(call, sig);
6990
6991         goto DONE;
6992     }
6993     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
6994                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
6995                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
6996                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
6997     {
6998         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
6999         {
7000             // Normally this only happens with inlining.
7001             // However, a generic method (or type) being NGENd into another module
7002             // can run into this issue as well.  There's not an easy fall-back for NGEN
7003             // so instead we fallback to JIT.
7004             if (compIsForInlining())
7005             {
7006                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7007             }
7008             else
7009             {
7010                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7011             }
7012
7013             return callRetTyp;
7014         }
7015
7016         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7017
7018         // This cookie is required to be either a simple GT_CNS_INT or
7019         // an indirection of a GT_CNS_INT
7020         //
7021         GenTreePtr cookieConst = cookie;
7022         if (cookie->gtOper == GT_IND)
7023         {
7024             cookieConst = cookie->gtOp.gtOp1;
7025         }
7026         assert(cookieConst->gtOper == GT_CNS_INT);
7027
7028         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7029         // we won't allow this tree to participate in any CSE logic
7030         //
7031         cookie->gtFlags |= GTF_DONT_CSE;
7032         cookieConst->gtFlags |= GTF_DONT_CSE;
7033
7034         call->gtCall.gtCallCookie = cookie;
7035
7036         if (canTailCall)
7037         {
7038             canTailCall             = false;
7039             szCanTailCallFailReason = "PInvoke calli";
7040         }
7041     }
7042
7043     /*-------------------------------------------------------------------------
7044      * Create the argument list
7045      */
7046
7047     //-------------------------------------------------------------------------
7048     // Special case - for varargs we have an implicit last argument
7049
7050     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7051     {
7052         assert(!compIsForInlining());
7053
7054         void *varCookie, *pVarCookie;
7055         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7056         {
7057             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7058             return callRetTyp;
7059         }
7060
7061         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7062         assert((!varCookie) != (!pVarCookie));
7063         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7064
7065         assert(extraArg == nullptr);
7066         extraArg = gtNewArgList(cookie);
7067     }
7068
7069     //-------------------------------------------------------------------------
7070     // Extra arg for shared generic code and array methods
7071     //
7072     // Extra argument containing instantiation information is passed in the
7073     // following circumstances:
7074     // (a) To the "Address" method on array classes; the extra parameter is
7075     //     the array's type handle (a TypeDesc)
7076     // (b) To shared-code instance methods in generic structs; the extra parameter
7077     //     is the struct's type handle (a vtable ptr)
7078     // (c) To shared-code per-instantiation non-generic static methods in generic
7079     //     classes and structs; the extra parameter is the type handle
7080     // (d) To shared-code generic methods; the extra parameter is an
7081     //     exact-instantiation MethodDesc
7082     //
7083     // We also set the exact type context associated with the call so we can
7084     // inline the call correctly later on.
7085
7086     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7087     {
7088         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7089         if (clsHnd == nullptr)
7090         {
7091             NO_WAY("CALLI on parameterized type");
7092         }
7093
7094         assert(opcode != CEE_CALLI);
7095
7096         GenTreePtr instParam;
7097         BOOL       runtimeLookup;
7098
7099         // Instantiated generic method
7100         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7101         {
7102             CORINFO_METHOD_HANDLE exactMethodHandle =
7103                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7104
7105             if (!exactContextNeedsRuntimeLookup)
7106             {
7107 #ifdef FEATURE_READYTORUN_COMPILER
7108                 if (opts.IsReadyToRun())
7109                 {
7110                     instParam =
7111                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7112                     if (instParam == nullptr)
7113                     {
7114                         return callRetTyp;
7115                     }
7116                 }
7117                 else
7118 #endif
7119                 {
7120                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7121                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7122                 }
7123             }
7124             else
7125             {
7126                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7127                 if (instParam == nullptr)
7128                 {
7129                     return callRetTyp;
7130                 }
7131             }
7132         }
7133
7134         // otherwise must be an instance method in a generic struct,
7135         // a static method in a generic type, or a runtime-generated array method
7136         else
7137         {
7138             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7139             CORINFO_CLASS_HANDLE exactClassHandle =
7140                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7141
7142             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7143             {
7144                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7145                 return callRetTyp;
7146             }
7147
7148             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7149             {
7150                 // We indicate "readonly" to the Address operation by using a null
7151                 // instParam.
7152                 instParam = gtNewIconNode(0, TYP_REF);
7153             }
7154
7155             if (!exactContextNeedsRuntimeLookup)
7156             {
7157 #ifdef FEATURE_READYTORUN_COMPILER
7158                 if (opts.IsReadyToRun())
7159                 {
7160                     instParam =
7161                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7162                     if (instParam == nullptr)
7163                     {
7164                         return callRetTyp;
7165                     }
7166                 }
7167                 else
7168 #endif
7169                 {
7170                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7171                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7172                 }
7173             }
7174             else
7175             {
7176                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7177                 if (instParam == nullptr)
7178                 {
7179                     return callRetTyp;
7180                 }
7181             }
7182         }
7183
7184         assert(extraArg == nullptr);
7185         extraArg = gtNewArgList(instParam);
7186     }
7187
7188     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7189     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7190     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7191     // exactContextHnd is not currently required when inlining shared generic code into shared
7192     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7193     // (e.g. anything marked needsRuntimeLookup)
7194     if (exactContextNeedsRuntimeLookup)
7195     {
7196         exactContextHnd = nullptr;
7197     }
7198
7199     //-------------------------------------------------------------------------
7200     // The main group of arguments
7201
7202     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7203
7204     if (args)
7205     {
7206         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7207     }
7208
7209     //-------------------------------------------------------------------------
7210     // The "this" pointer
7211
7212     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7213     {
7214         GenTreePtr obj;
7215
7216         if (opcode == CEE_NEWOBJ)
7217         {
7218             obj = newobjThis;
7219         }
7220         else
7221         {
7222             obj = impPopStack().val;
7223             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7224             if (compDonotInline())
7225             {
7226                 return callRetTyp;
7227             }
7228         }
7229
7230         /* Is this a virtual or interface call? */
7231
7232         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7233         {
7234             /* only true object pointers can be virtual */
7235
7236             assert(obj->gtType == TYP_REF);
7237         }
7238         else
7239         {
7240             if (impIsThis(obj))
7241             {
7242                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7243             }
7244         }
7245
7246         /* Store the "this" value in the call */
7247
7248         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7249         call->gtCall.gtCallObjp = obj;
7250     }
7251
7252     //-------------------------------------------------------------------------
7253     // The "this" pointer for "newobj"
7254
7255     if (opcode == CEE_NEWOBJ)
7256     {
7257         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7258         {
7259             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7260             // This is a 'new' of a variable sized object, wher
7261             // the constructor is to return the object.  In this case
7262             // the constructor claims to return VOID but we know it
7263             // actually returns the new object
7264             assert(callRetTyp == TYP_VOID);
7265             callRetTyp   = TYP_REF;
7266             call->gtType = TYP_REF;
7267             impSpillSpecialSideEff();
7268
7269             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7270         }
7271         else
7272         {
7273             if (clsFlags & CORINFO_FLG_DELEGATE)
7274             {
7275                 // New inliner morph it in impImportCall.
7276                 // This will allow us to inline the call to the delegate constructor.
7277                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7278             }
7279
7280             if (!bIntrinsicImported)
7281             {
7282
7283 #if defined(DEBUG) || defined(INLINE_DATA)
7284
7285                 // Keep track of the raw IL offset of the call
7286                 call->gtCall.gtRawILOffset = rawILOffset;
7287
7288 #endif // defined(DEBUG) || defined(INLINE_DATA)
7289
7290                 // Is it an inline candidate?
7291                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7292             }
7293
7294             // append the call node.
7295             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7296
7297             // Now push the value of the 'new onto the stack
7298
7299             // This is a 'new' of a non-variable sized object.
7300             // Append the new node (op1) to the statement list,
7301             // and then push the local holding the value of this
7302             // new instruction on the stack.
7303
7304             if (clsFlags & CORINFO_FLG_VALUECLASS)
7305             {
7306                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7307
7308                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7309                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7310             }
7311             else
7312             {
7313                 if (newobjThis->gtOper == GT_COMMA)
7314                 {
7315                     // In coreclr the callout can be inserted even if verification is disabled
7316                     // so we cannot rely on tiVerificationNeeded alone
7317
7318                     // We must have inserted the callout. Get the real newobj.
7319                     newobjThis = newobjThis->gtOp.gtOp2;
7320                 }
7321
7322                 assert(newobjThis->gtOper == GT_LCL_VAR);
7323                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7324             }
7325         }
7326         return callRetTyp;
7327     }
7328
7329 DONE:
7330
7331     if (tailCall)
7332     {
7333         // This check cannot be performed for implicit tail calls for the reason
7334         // that impIsImplicitTailCallCandidate() is not checking whether return
7335         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7336         // As a result it is possible that in the following case, we find that
7337         // the type stack is non-empty if Callee() is considered for implicit
7338         // tail calling.
7339         //      int Caller(..) { .... void Callee(); ret val; ... }
7340         //
7341         // Note that we cannot check return type compatibility before ImpImportCall()
7342         // as we don't have required info or need to duplicate some of the logic of
7343         // ImpImportCall().
7344         //
7345         // For implicit tail calls, we perform this check after return types are
7346         // known to be compatible.
7347         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7348         {
7349             BADCODE("Stack should be empty after tailcall");
7350         }
7351
7352         // Note that we can not relax this condition with genActualType() as
7353         // the calling convention dictates that the caller of a function with
7354         // a small-typed return value is responsible for normalizing the return val
7355
7356         if (canTailCall &&
7357             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7358                                           callInfo->sig.retTypeClass))
7359         {
7360             canTailCall             = false;
7361             szCanTailCallFailReason = "Return types are not tail call compatible";
7362         }
7363
7364         // Stack empty check for implicit tail calls.
7365         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7366         {
7367 #ifdef _TARGET_AMD64_
7368             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7369             // in JIT64, not an InvalidProgramException.
7370             Verify(false, "Stack should be empty after tailcall");
7371 #else  // _TARGET_64BIT_
7372             BADCODE("Stack should be empty after tailcall");
7373 #endif //!_TARGET_64BIT_
7374         }
7375
7376         // assert(compCurBB is not a catch, finally or filter block);
7377         // assert(compCurBB is not a try block protected by a finally block);
7378
7379         // Check for permission to tailcall
7380         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7381
7382         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7383
7384         if (canTailCall)
7385         {
7386             // True virtual or indirect calls, shouldn't pass in a callee handle.
7387             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7388                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7389                                                        ? nullptr
7390                                                        : methHnd;
7391             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7392
7393             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7394             {
7395                 canTailCall = true;
7396                 if (explicitTailCall)
7397                 {
7398                     // In case of explicit tail calls, mark it so that it is not considered
7399                     // for in-lining.
7400                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7401 #ifdef DEBUG
7402                     if (verbose)
7403                     {
7404                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7405                         printTreeID(call);
7406                         printf("\n");
7407                     }
7408 #endif
7409                 }
7410                 else
7411                 {
7412 #if FEATURE_TAILCALL_OPT
7413                     // Must be an implicit tail call.
7414                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7415
7416                     // It is possible that a call node is both an inline candidate and marked
7417                     // for opportunistic tail calling.  In-lining happens before morhphing of
7418                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7419                     // reason, it will survive to the morphing stage at which point it will be
7420                     // transformed into a tail call after performing additional checks.
7421
7422                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7423 #ifdef DEBUG
7424                     if (verbose)
7425                     {
7426                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7427                         printTreeID(call);
7428                         printf("\n");
7429                     }
7430 #endif
7431
7432 #else //! FEATURE_TAILCALL_OPT
7433                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7434
7435 #endif // FEATURE_TAILCALL_OPT
7436                 }
7437
7438                 // we can't report success just yet...
7439             }
7440             else
7441             {
7442                 canTailCall = false;
7443 // canTailCall reported its reasons already
7444 #ifdef DEBUG
7445                 if (verbose)
7446                 {
7447                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7448                     printTreeID(call);
7449                     printf("\n");
7450                 }
7451 #endif
7452             }
7453         }
7454         else
7455         {
7456             // If this assert fires it means that canTailCall was set to false without setting a reason!
7457             assert(szCanTailCallFailReason != nullptr);
7458
7459 #ifdef DEBUG
7460             if (verbose)
7461             {
7462                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7463                 printTreeID(call);
7464                 printf(": %s\n", szCanTailCallFailReason);
7465             }
7466 #endif
7467             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7468                                                      szCanTailCallFailReason);
7469         }
7470     }
7471
7472 // Note: we assume that small return types are already normalized by the managed callee
7473 // or by the pinvoke stub for calls to unmanaged code.
7474
7475 DONE_CALL:
7476
7477     if (!bIntrinsicImported)
7478     {
7479         //
7480         // Things needed to be checked when bIntrinsicImported is false.
7481         //
7482
7483         assert(call->gtOper == GT_CALL);
7484         assert(sig != nullptr);
7485
7486         // Tail calls require us to save the call site's sig info so we can obtain an argument
7487         // copying thunk from the EE later on.
7488         if (call->gtCall.callSig == nullptr)
7489         {
7490             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7491             *call->gtCall.callSig = *sig;
7492         }
7493
7494         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7495         {
7496             GenTreePtr callObj = call->gtCall.gtCallObjp;
7497             assert(callObj != nullptr);
7498
7499             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7500
7501             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7502                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7503                                                                    impInlineInfo->inlArgInfo))
7504             {
7505                 impInlineInfo->thisDereferencedFirst = true;
7506             }
7507         }
7508
7509 #if defined(DEBUG) || defined(INLINE_DATA)
7510
7511         // Keep track of the raw IL offset of the call
7512         call->gtCall.gtRawILOffset = rawILOffset;
7513
7514 #endif // defined(DEBUG) || defined(INLINE_DATA)
7515
7516         // Is it an inline candidate?
7517         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7518     }
7519
7520     // Push or append the result of the call
7521     if (callRetTyp == TYP_VOID)
7522     {
7523         if (opcode == CEE_NEWOBJ)
7524         {
7525             // we actually did push something, so don't spill the thing we just pushed.
7526             assert(verCurrentState.esStackDepth > 0);
7527             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7528         }
7529         else
7530         {
7531             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7532         }
7533     }
7534     else
7535     {
7536         impSpillSpecialSideEff();
7537
7538         if (clsFlags & CORINFO_FLG_ARRAY)
7539         {
7540             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7541         }
7542
7543         // Find the return type used for verification by interpreting the method signature.
7544         // NB: we are clobbering the already established sig.
7545         if (tiVerificationNeeded)
7546         {
7547             // Actually, we never get the sig for the original method.
7548             sig = &(callInfo->verSig);
7549         }
7550
7551         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7552         tiRetVal.NormaliseForStack();
7553
7554         // The CEE_READONLY prefix modifies the verification semantics of an Address
7555         // operation on an array type.
7556         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7557         {
7558             tiRetVal.SetIsReadonlyByRef();
7559         }
7560
7561         if (tiVerificationNeeded)
7562         {
7563             // We assume all calls return permanent home byrefs. If they
7564             // didn't they wouldn't be verifiable. This is also covering
7565             // the Address() helper for multidimensional arrays.
7566             if (tiRetVal.IsByRef())
7567             {
7568                 tiRetVal.SetIsPermanentHomeByRef();
7569             }
7570         }
7571
7572         if (call->gtOper == GT_CALL)
7573         {
7574             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7575             if (varTypeIsStruct(callRetTyp))
7576             {
7577                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7578             }
7579
7580             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7581             {
7582                 assert(opts.OptEnabled(CLFLG_INLINING));
7583
7584                 // Make the call its own tree (spill the stack if needed).
7585                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7586
7587                 // TODO: Still using the widened type.
7588                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7589             }
7590             else
7591             {
7592                 // For non-candidates we must also spill, since we
7593                 // might have locals live on the eval stack that this
7594                 // call can modify.
7595                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7596             }
7597         }
7598
7599         if (!bIntrinsicImported)
7600         {
7601             //-------------------------------------------------------------------------
7602             //
7603             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7604                 before returning.
7605                 However, we need to normalize small type values returned by unmanaged
7606                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7607                 if we use the shorter inlined pinvoke stub. */
7608
7609             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7610             {
7611                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7612             }
7613         }
7614
7615         impPushOnStack(call, tiRetVal);
7616     }
7617
7618     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7619     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7620     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7621     //  callInfoCache.uncacheCallInfo();
7622
7623     return callRetTyp;
7624 }
7625 #ifdef _PREFAST_
7626 #pragma warning(pop)
7627 #endif
7628
7629 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7630 {
7631     CorInfoType corType = methInfo->args.retType;
7632
7633     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7634     {
7635         // We have some kind of STRUCT being returned
7636
7637         structPassingKind howToReturnStruct = SPK_Unknown;
7638
7639         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7640
7641         if (howToReturnStruct == SPK_ByReference)
7642         {
7643             return true;
7644         }
7645     }
7646
7647     return false;
7648 }
7649
7650 #ifdef DEBUG
7651 //
7652 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7653 {
7654     TestLabelAndNum tlAndN;
7655     if (numArgs == 2)
7656     {
7657         tlAndN.m_num  = 0;
7658         StackEntry se = impPopStack();
7659         assert(se.seTypeInfo.GetType() == TI_INT);
7660         GenTreePtr val = se.val;
7661         assert(val->IsCnsIntOrI());
7662         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7663     }
7664     else if (numArgs == 3)
7665     {
7666         StackEntry se = impPopStack();
7667         assert(se.seTypeInfo.GetType() == TI_INT);
7668         GenTreePtr val = se.val;
7669         assert(val->IsCnsIntOrI());
7670         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7671         se           = impPopStack();
7672         assert(se.seTypeInfo.GetType() == TI_INT);
7673         val = se.val;
7674         assert(val->IsCnsIntOrI());
7675         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7676     }
7677     else
7678     {
7679         assert(false);
7680     }
7681
7682     StackEntry expSe = impPopStack();
7683     GenTreePtr node  = expSe.val;
7684
7685     // There are a small number of special cases, where we actually put the annotation on a subnode.
7686     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7687     {
7688         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7689         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7690         // offset within the the static field block whose address is returned by the helper call.
7691         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7692         GenTreePtr helperCall = nullptr;
7693         assert(node->OperGet() == GT_IND);
7694         tlAndN.m_num -= 100;
7695         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7696         GetNodeTestData()->Remove(node);
7697     }
7698     else
7699     {
7700         GetNodeTestData()->Set(node, tlAndN);
7701     }
7702
7703     impPushOnStack(node, expSe.seTypeInfo);
7704     return node->TypeGet();
7705 }
7706 #endif // DEBUG
7707
7708 //-----------------------------------------------------------------------------------
7709 //  impFixupCallStructReturn: For a call node that returns a struct type either
7710 //  adjust the return type to an enregisterable type, or set the flag to indicate
7711 //  struct return via retbuf arg.
7712 //
7713 //  Arguments:
7714 //    call       -  GT_CALL GenTree node
7715 //    retClsHnd  -  Class handle of return type of the call
7716 //
7717 //  Return Value:
7718 //    Returns new GenTree node after fixing struct return of call node
7719 //
7720 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7721 {
7722     assert(call->gtOper == GT_CALL);
7723
7724     if (!varTypeIsStruct(call))
7725     {
7726         return call;
7727     }
7728
7729     call->gtCall.gtRetClsHnd = retClsHnd;
7730
7731     GenTreeCall* callNode = call->AsCall();
7732
7733 #if FEATURE_MULTIREG_RET
7734     // Initialize Return type descriptor of call node
7735     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7736     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7737 #endif // FEATURE_MULTIREG_RET
7738
7739 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7740
7741     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7742     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7743
7744     // The return type will remain as the incoming struct type unless normalized to a
7745     // single eightbyte return type below.
7746     callNode->gtReturnType = call->gtType;
7747
7748     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7749     if (retRegCount != 0)
7750     {
7751         if (retRegCount == 1)
7752         {
7753             // struct returned in a single register
7754             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7755         }
7756         else
7757         {
7758             // must be a struct returned in two registers
7759             assert(retRegCount == 2);
7760
7761             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7762             {
7763                 // Force a call returning multi-reg struct to be always of the IR form
7764                 //   tmp = call
7765                 //
7766                 // No need to assign a multi-reg struct to a local var if:
7767                 //  - It is a tail call or
7768                 //  - The call is marked for in-lining later
7769                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7770             }
7771         }
7772     }
7773     else
7774     {
7775         // struct not returned in registers i.e returned via hiddden retbuf arg.
7776         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7777     }
7778
7779 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7780
7781 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7782     // There is no fixup necessary if the return type is a HFA struct.
7783     // HFA structs are returned in registers for ARM32 and ARM64
7784     //
7785     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7786     {
7787         if (call->gtCall.CanTailCall())
7788         {
7789             if (info.compIsVarArgs)
7790             {
7791                 // We cannot tail call because control needs to return to fixup the calling
7792                 // convention for result return.
7793                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7794             }
7795             else
7796             {
7797                 // If we can tail call returning HFA, then don't assign it to
7798                 // a variable back and forth.
7799                 return call;
7800             }
7801         }
7802
7803         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7804         {
7805             return call;
7806         }
7807
7808         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7809         if (retRegCount >= 2)
7810         {
7811             return impAssignMultiRegTypeToVar(call, retClsHnd);
7812         }
7813     }
7814 #endif // _TARGET_ARM_
7815
7816     // Check for TYP_STRUCT type that wraps a primitive type
7817     // Such structs are returned using a single register
7818     // and we change the return type on those calls here.
7819     //
7820     structPassingKind howToReturnStruct;
7821     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7822
7823     if (howToReturnStruct == SPK_ByReference)
7824     {
7825         assert(returnType == TYP_UNKNOWN);
7826         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7827     }
7828     else
7829     {
7830         assert(returnType != TYP_UNKNOWN);
7831         call->gtCall.gtReturnType = returnType;
7832
7833         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7834         if ((returnType == TYP_LONG) && (compLongUsed == false))
7835         {
7836             compLongUsed = true;
7837         }
7838         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7839         {
7840             compFloatingPointUsed = true;
7841         }
7842
7843 #if FEATURE_MULTIREG_RET
7844         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7845         assert(retRegCount != 0);
7846
7847         if (retRegCount >= 2)
7848         {
7849             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7850             {
7851                 // Force a call returning multi-reg struct to be always of the IR form
7852                 //   tmp = call
7853                 //
7854                 // No need to assign a multi-reg struct to a local var if:
7855                 //  - It is a tail call or
7856                 //  - The call is marked for in-lining later
7857                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7858             }
7859         }
7860 #endif // FEATURE_MULTIREG_RET
7861     }
7862
7863 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7864
7865     return call;
7866 }
7867
7868 /*****************************************************************************
7869    For struct return values, re-type the operand in the case where the ABI
7870    does not use a struct return buffer
7871    Note that this method is only call for !_TARGET_X86_
7872  */
7873
7874 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7875 {
7876     assert(varTypeIsStruct(info.compRetType));
7877     assert(info.compRetBuffArg == BAD_VAR_NUM);
7878
7879 #if defined(_TARGET_XARCH_)
7880
7881 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7882     // No VarArgs for CoreCLR on x64 Unix
7883     assert(!info.compIsVarArgs);
7884
7885     // Is method returning a multi-reg struct?
7886     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7887     {
7888         // In case of multi-reg struct return, we force IR to be one of the following:
7889         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7890         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7891
7892         if (op->gtOper == GT_LCL_VAR)
7893         {
7894             // Make sure that this struct stays in memory and doesn't get promoted.
7895             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7896             lvaTable[lclNum].lvIsMultiRegRet = true;
7897
7898             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7899             op->gtFlags |= GTF_DONT_CSE;
7900
7901             return op;
7902         }
7903
7904         if (op->gtOper == GT_CALL)
7905         {
7906             return op;
7907         }
7908
7909         return impAssignMultiRegTypeToVar(op, retClsHnd);
7910     }
7911 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7912     assert(info.compRetNativeType != TYP_STRUCT);
7913 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7914
7915 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7916
7917     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7918     {
7919         if (op->gtOper == GT_LCL_VAR)
7920         {
7921             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7922             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7923             // Make sure this struct type stays as struct so that we can return it as an HFA
7924             lvaTable[lclNum].lvIsMultiRegRet = true;
7925
7926             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7927             op->gtFlags |= GTF_DONT_CSE;
7928
7929             return op;
7930         }
7931
7932         if (op->gtOper == GT_CALL)
7933         {
7934             if (op->gtCall.IsVarargs())
7935             {
7936                 // We cannot tail call because control needs to return to fixup the calling
7937                 // convention for result return.
7938                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7939                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7940             }
7941             else
7942             {
7943                 return op;
7944             }
7945         }
7946         return impAssignMultiRegTypeToVar(op, retClsHnd);
7947     }
7948
7949 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7950
7951     // Is method returning a multi-reg struct?
7952     if (IsMultiRegReturnedType(retClsHnd))
7953     {
7954         if (op->gtOper == GT_LCL_VAR)
7955         {
7956             // This LCL_VAR stays as a TYP_STRUCT
7957             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7958
7959             // Make sure this struct type is not struct promoted
7960             lvaTable[lclNum].lvIsMultiRegRet = true;
7961
7962             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7963             op->gtFlags |= GTF_DONT_CSE;
7964
7965             return op;
7966         }
7967
7968         if (op->gtOper == GT_CALL)
7969         {
7970             if (op->gtCall.IsVarargs())
7971             {
7972                 // We cannot tail call because control needs to return to fixup the calling
7973                 // convention for result return.
7974                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7975                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7976             }
7977             else
7978             {
7979                 return op;
7980             }
7981         }
7982         return impAssignMultiRegTypeToVar(op, retClsHnd);
7983     }
7984
7985 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
7986
7987 REDO_RETURN_NODE:
7988     // adjust the type away from struct to integral
7989     // and no normalizing
7990     if (op->gtOper == GT_LCL_VAR)
7991     {
7992         op->ChangeOper(GT_LCL_FLD);
7993     }
7994     else if (op->gtOper == GT_OBJ)
7995     {
7996         GenTreePtr op1 = op->AsObj()->Addr();
7997
7998         // We will fold away OBJ/ADDR
7999         // except for OBJ/ADDR/INDEX
8000         //     as the array type influences the array element's offset
8001         //     Later in this method we change op->gtType to info.compRetNativeType
8002         //     This is not correct when op is a GT_INDEX as the starting offset
8003         //     for the array elements 'elemOffs' is different for an array of
8004         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8005         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8006         //
8007         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8008         {
8009             // Change '*(&X)' to 'X' and see if we can do better
8010             op = op1->gtOp.gtOp1;
8011             goto REDO_RETURN_NODE;
8012         }
8013         op->gtObj.gtClass = NO_CLASS_HANDLE;
8014         op->ChangeOperUnchecked(GT_IND);
8015         op->gtFlags |= GTF_IND_TGTANYWHERE;
8016     }
8017     else if (op->gtOper == GT_CALL)
8018     {
8019         if (op->AsCall()->TreatAsHasRetBufArg(this))
8020         {
8021             // This must be one of those 'special' helpers that don't
8022             // really have a return buffer, but instead use it as a way
8023             // to keep the trees cleaner with fewer address-taken temps.
8024             //
8025             // Well now we have to materialize the the return buffer as
8026             // an address-taken temp. Then we can return the temp.
8027             //
8028             // NOTE: this code assumes that since the call directly
8029             // feeds the return, then the call must be returning the
8030             // same structure/class/type.
8031             //
8032             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8033
8034             // No need to spill anything as we're about to return.
8035             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8036
8037             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8038             // jump directly to a GT_LCL_FLD.
8039             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8040             op->ChangeOper(GT_LCL_FLD);
8041         }
8042         else
8043         {
8044             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8045
8046             // Don't change the gtType of the node just yet, it will get changed later.
8047             return op;
8048         }
8049     }
8050     else if (op->gtOper == GT_COMMA)
8051     {
8052         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8053     }
8054
8055     op->gtType = info.compRetNativeType;
8056
8057     return op;
8058 }
8059
8060 /*****************************************************************************
8061    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8062    finally-protected try. We find the finally blocks protecting the current
8063    offset (in order) by walking over the complete exception table and
8064    finding enclosing clauses. This assumes that the table is sorted.
8065    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8066
8067    If we are leaving a catch handler, we need to attach the
8068    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8069
8070    After this function, the BBJ_LEAVE block has been converted to a different type.
8071  */
8072
8073 #if !FEATURE_EH_FUNCLETS
8074
8075 void Compiler::impImportLeave(BasicBlock* block)
8076 {
8077 #ifdef DEBUG
8078     if (verbose)
8079     {
8080         printf("\nBefore import CEE_LEAVE:\n");
8081         fgDispBasicBlocks();
8082         fgDispHandlerTab();
8083     }
8084 #endif // DEBUG
8085
8086     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8087     unsigned    blkAddr         = block->bbCodeOffs;
8088     BasicBlock* leaveTarget     = block->bbJumpDest;
8089     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8090
8091     // LEAVE clears the stack, spill side effects, and set stack to 0
8092
8093     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8094     verCurrentState.esStackDepth = 0;
8095
8096     assert(block->bbJumpKind == BBJ_LEAVE);
8097     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8098
8099     BasicBlock* step         = DUMMY_INIT(NULL);
8100     unsigned    encFinallies = 0; // Number of enclosing finallies.
8101     GenTreePtr  endCatches   = NULL;
8102     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8103
8104     unsigned  XTnum;
8105     EHblkDsc* HBtab;
8106
8107     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8108     {
8109         // Grab the handler offsets
8110
8111         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8112         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8113         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8114         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8115
8116         /* Is this a catch-handler we are CEE_LEAVEing out of?
8117          * If so, we need to call CORINFO_HELP_ENDCATCH.
8118          */
8119
8120         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8121         {
8122             // Can't CEE_LEAVE out of a finally/fault handler
8123             if (HBtab->HasFinallyOrFaultHandler())
8124                 BADCODE("leave out of fault/finally block");
8125
8126             // Create the call to CORINFO_HELP_ENDCATCH
8127             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8128
8129             // Make a list of all the currently pending endCatches
8130             if (endCatches)
8131                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8132             else
8133                 endCatches = endCatch;
8134
8135 #ifdef DEBUG
8136             if (verbose)
8137             {
8138                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8139                        "CORINFO_HELP_ENDCATCH\n",
8140                        block->bbNum, XTnum);
8141             }
8142 #endif
8143         }
8144         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8145                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8146         {
8147             /* This is a finally-protected try we are jumping out of */
8148
8149             /* If there are any pending endCatches, and we have already
8150                jumped out of a finally-protected try, then the endCatches
8151                have to be put in a block in an outer try for async
8152                exceptions to work correctly.
8153                Else, just use append to the original block */
8154
8155             BasicBlock* callBlock;
8156
8157             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8158
8159             if (encFinallies == 0)
8160             {
8161                 assert(step == DUMMY_INIT(NULL));
8162                 callBlock             = block;
8163                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8164
8165                 if (endCatches)
8166                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8167
8168 #ifdef DEBUG
8169                 if (verbose)
8170                 {
8171                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8172                            "block BB%02u [%08p]\n",
8173                            callBlock->bbNum, dspPtr(callBlock));
8174                 }
8175 #endif
8176             }
8177             else
8178             {
8179                 assert(step != DUMMY_INIT(NULL));
8180
8181                 /* Calling the finally block */
8182                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8183                 assert(step->bbJumpKind == BBJ_ALWAYS);
8184                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8185                                               // finally in the chain)
8186                 step->bbJumpDest->bbRefs++;
8187
8188                 /* The new block will inherit this block's weight */
8189                 callBlock->setBBWeight(block->bbWeight);
8190                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8191
8192 #ifdef DEBUG
8193                 if (verbose)
8194                 {
8195                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8196                            "[%08p]\n",
8197                            callBlock->bbNum, dspPtr(callBlock));
8198                 }
8199 #endif
8200
8201                 GenTreePtr lastStmt;
8202
8203                 if (endCatches)
8204                 {
8205                     lastStmt         = gtNewStmt(endCatches);
8206                     endLFin->gtNext  = lastStmt;
8207                     lastStmt->gtPrev = endLFin;
8208                 }
8209                 else
8210                 {
8211                     lastStmt = endLFin;
8212                 }
8213
8214                 // note that this sets BBF_IMPORTED on the block
8215                 impEndTreeList(callBlock, endLFin, lastStmt);
8216             }
8217
8218             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8219             /* The new block will inherit this block's weight */
8220             step->setBBWeight(block->bbWeight);
8221             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8222
8223 #ifdef DEBUG
8224             if (verbose)
8225             {
8226                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8227                        "BB%02u [%08p]\n",
8228                        step->bbNum, dspPtr(step));
8229             }
8230 #endif
8231
8232             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8233             assert(finallyNesting <= compHndBBtabCount);
8234
8235             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8236             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8237             endLFin               = gtNewStmt(endLFin);
8238             endCatches            = NULL;
8239
8240             encFinallies++;
8241
8242             invalidatePreds = true;
8243         }
8244     }
8245
8246     /* Append any remaining endCatches, if any */
8247
8248     assert(!encFinallies == !endLFin);
8249
8250     if (encFinallies == 0)
8251     {
8252         assert(step == DUMMY_INIT(NULL));
8253         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8254
8255         if (endCatches)
8256             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8257
8258 #ifdef DEBUG
8259         if (verbose)
8260         {
8261             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8262                    "block BB%02u [%08p]\n",
8263                    block->bbNum, dspPtr(block));
8264         }
8265 #endif
8266     }
8267     else
8268     {
8269         // If leaveTarget is the start of another try block, we want to make sure that
8270         // we do not insert finalStep into that try block. Hence, we find the enclosing
8271         // try block.
8272         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8273
8274         // Insert a new BB either in the try region indicated by tryIndex or
8275         // the handler region indicated by leaveTarget->bbHndIndex,
8276         // depending on which is the inner region.
8277         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8278         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8279         step->bbJumpDest = finalStep;
8280
8281         /* The new block will inherit this block's weight */
8282         finalStep->setBBWeight(block->bbWeight);
8283         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8284
8285 #ifdef DEBUG
8286         if (verbose)
8287         {
8288             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8289                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8290         }
8291 #endif
8292
8293         GenTreePtr lastStmt;
8294
8295         if (endCatches)
8296         {
8297             lastStmt         = gtNewStmt(endCatches);
8298             endLFin->gtNext  = lastStmt;
8299             lastStmt->gtPrev = endLFin;
8300         }
8301         else
8302         {
8303             lastStmt = endLFin;
8304         }
8305
8306         impEndTreeList(finalStep, endLFin, lastStmt);
8307
8308         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8309
8310         // Queue up the jump target for importing
8311
8312         impImportBlockPending(leaveTarget);
8313
8314         invalidatePreds = true;
8315     }
8316
8317     if (invalidatePreds && fgComputePredsDone)
8318     {
8319         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8320         fgRemovePreds();
8321     }
8322
8323 #ifdef DEBUG
8324     fgVerifyHandlerTab();
8325
8326     if (verbose)
8327     {
8328         printf("\nAfter import CEE_LEAVE:\n");
8329         fgDispBasicBlocks();
8330         fgDispHandlerTab();
8331     }
8332 #endif // DEBUG
8333 }
8334
8335 #else // FEATURE_EH_FUNCLETS
8336
8337 void Compiler::impImportLeave(BasicBlock* block)
8338 {
8339 #ifdef DEBUG
8340     if (verbose)
8341     {
8342         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8343         fgDispBasicBlocks();
8344         fgDispHandlerTab();
8345     }
8346 #endif // DEBUG
8347
8348     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8349     unsigned    blkAddr         = block->bbCodeOffs;
8350     BasicBlock* leaveTarget     = block->bbJumpDest;
8351     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8352
8353     // LEAVE clears the stack, spill side effects, and set stack to 0
8354
8355     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8356     verCurrentState.esStackDepth = 0;
8357
8358     assert(block->bbJumpKind == BBJ_LEAVE);
8359     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8360
8361     BasicBlock* step = nullptr;
8362
8363     enum StepType
8364     {
8365         // No step type; step == NULL.
8366         ST_None,
8367
8368         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8369         // That is, is step->bbJumpDest where a finally will return to?
8370         ST_FinallyReturn,
8371
8372         // The step block is a catch return.
8373         ST_Catch,
8374
8375         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8376         ST_Try
8377     };
8378     StepType stepType = ST_None;
8379
8380     unsigned  XTnum;
8381     EHblkDsc* HBtab;
8382
8383     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8384     {
8385         // Grab the handler offsets
8386
8387         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8388         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8389         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8390         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8391
8392         /* Is this a catch-handler we are CEE_LEAVEing out of?
8393          */
8394
8395         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8396         {
8397             // Can't CEE_LEAVE out of a finally/fault handler
8398             if (HBtab->HasFinallyOrFaultHandler())
8399             {
8400                 BADCODE("leave out of fault/finally block");
8401             }
8402
8403             /* We are jumping out of a catch */
8404
8405             if (step == nullptr)
8406             {
8407                 step             = block;
8408                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8409                 stepType         = ST_Catch;
8410
8411 #ifdef DEBUG
8412                 if (verbose)
8413                 {
8414                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8415                            "block\n",
8416                            XTnum, step->bbNum);
8417                 }
8418 #endif
8419             }
8420             else
8421             {
8422                 BasicBlock* exitBlock;
8423
8424                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8425                  * scope */
8426                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8427
8428                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8429                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8430                                               // exit) returns to this block
8431                 step->bbJumpDest->bbRefs++;
8432
8433 #if defined(_TARGET_ARM_)
8434                 if (stepType == ST_FinallyReturn)
8435                 {
8436                     assert(step->bbJumpKind == BBJ_ALWAYS);
8437                     // Mark the target of a finally return
8438                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8439                 }
8440 #endif // defined(_TARGET_ARM_)
8441
8442                 /* The new block will inherit this block's weight */
8443                 exitBlock->setBBWeight(block->bbWeight);
8444                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8445
8446                 /* This exit block is the new step */
8447                 step     = exitBlock;
8448                 stepType = ST_Catch;
8449
8450                 invalidatePreds = true;
8451
8452 #ifdef DEBUG
8453                 if (verbose)
8454                 {
8455                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8456                            exitBlock->bbNum);
8457                 }
8458 #endif
8459             }
8460         }
8461         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8462                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8463         {
8464             /* We are jumping out of a finally-protected try */
8465
8466             BasicBlock* callBlock;
8467
8468             if (step == nullptr)
8469             {
8470 #if FEATURE_EH_CALLFINALLY_THUNKS
8471
8472                 // Put the call to the finally in the enclosing region.
8473                 unsigned callFinallyTryIndex =
8474                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8475                 unsigned callFinallyHndIndex =
8476                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8477                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8478
8479                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8480                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8481                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8482                 // next block, and flow optimizations will remove it.
8483                 block->bbJumpKind = BBJ_ALWAYS;
8484                 block->bbJumpDest = callBlock;
8485                 block->bbJumpDest->bbRefs++;
8486
8487                 /* The new block will inherit this block's weight */
8488                 callBlock->setBBWeight(block->bbWeight);
8489                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8490
8491 #ifdef DEBUG
8492                 if (verbose)
8493                 {
8494                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8495                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8496                            XTnum, block->bbNum, callBlock->bbNum);
8497                 }
8498 #endif
8499
8500 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8501
8502                 callBlock             = block;
8503                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8504
8505 #ifdef DEBUG
8506                 if (verbose)
8507                 {
8508                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8509                            "BBJ_CALLFINALLY block\n",
8510                            XTnum, callBlock->bbNum);
8511                 }
8512 #endif
8513
8514 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8515             }
8516             else
8517             {
8518                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8519                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8520                 // a 'finally'), or the step block is the return from a catch.
8521                 //
8522                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8523                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8524                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8525                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8526                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8527                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8528                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8529                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8530                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8531                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8532                 // stack walks.)
8533
8534                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8535
8536 #if FEATURE_EH_CALLFINALLY_THUNKS
8537                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8538                 {
8539                     // Need to create another step block in the 'try' region that will actually branch to the
8540                     // call-to-finally thunk.
8541                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8542                     step->bbJumpDest  = step2;
8543                     step->bbJumpDest->bbRefs++;
8544                     step2->setBBWeight(block->bbWeight);
8545                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8546
8547 #ifdef DEBUG
8548                     if (verbose)
8549                     {
8550                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8551                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8552                                XTnum, step->bbNum, step2->bbNum);
8553                     }
8554 #endif
8555
8556                     step = step2;
8557                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8558                 }
8559 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8560
8561 #if FEATURE_EH_CALLFINALLY_THUNKS
8562                 unsigned callFinallyTryIndex =
8563                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8564                 unsigned callFinallyHndIndex =
8565                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8566 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8567                 unsigned callFinallyTryIndex = XTnum + 1;
8568                 unsigned callFinallyHndIndex = 0; // don't care
8569 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8570
8571                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8572                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8573                                               // finally in the chain)
8574                 step->bbJumpDest->bbRefs++;
8575
8576 #if defined(_TARGET_ARM_)
8577                 if (stepType == ST_FinallyReturn)
8578                 {
8579                     assert(step->bbJumpKind == BBJ_ALWAYS);
8580                     // Mark the target of a finally return
8581                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8582                 }
8583 #endif // defined(_TARGET_ARM_)
8584
8585                 /* The new block will inherit this block's weight */
8586                 callBlock->setBBWeight(block->bbWeight);
8587                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8588
8589 #ifdef DEBUG
8590                 if (verbose)
8591                 {
8592                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8593                            "BB%02u\n",
8594                            XTnum, callBlock->bbNum);
8595                 }
8596 #endif
8597             }
8598
8599             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8600             stepType = ST_FinallyReturn;
8601
8602             /* The new block will inherit this block's weight */
8603             step->setBBWeight(block->bbWeight);
8604             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8605
8606 #ifdef DEBUG
8607             if (verbose)
8608             {
8609                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8610                        "block BB%02u\n",
8611                        XTnum, step->bbNum);
8612             }
8613 #endif
8614
8615             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8616
8617             invalidatePreds = true;
8618         }
8619         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8620                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8621         {
8622             // We are jumping out of a catch-protected try.
8623             //
8624             // If we are returning from a call to a finally, then we must have a step block within a try
8625             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8626             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8627             // and invoke the appropriate catch.
8628             //
8629             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8630             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8631             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8632             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8633             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8634             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8635             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8636             // For example:
8637             //
8638             // try {
8639             //    try {
8640             //       // something here raises ThreadAbortException
8641             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8642             //    } catch (Exception) {
8643             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8644             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8645             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8646             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8647             //       // need to do this transformation if the current EH block is a try/catch that catches
8648             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8649             //       // information, so currently we do it for all catch types.
8650             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8651             //    }
8652             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8653             // } catch (ThreadAbortException) {
8654             // }
8655             // LABEL_1:
8656             //
8657             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8658             // compiler.
8659
8660             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8661             {
8662                 BasicBlock* catchStep;
8663
8664                 assert(step);
8665
8666                 if (stepType == ST_FinallyReturn)
8667                 {
8668                     assert(step->bbJumpKind == BBJ_ALWAYS);
8669                 }
8670                 else
8671                 {
8672                     assert(stepType == ST_Catch);
8673                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8674                 }
8675
8676                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8677                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8678                 step->bbJumpDest = catchStep;
8679                 step->bbJumpDest->bbRefs++;
8680
8681 #if defined(_TARGET_ARM_)
8682                 if (stepType == ST_FinallyReturn)
8683                 {
8684                     // Mark the target of a finally return
8685                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8686                 }
8687 #endif // defined(_TARGET_ARM_)
8688
8689                 /* The new block will inherit this block's weight */
8690                 catchStep->setBBWeight(block->bbWeight);
8691                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8692
8693 #ifdef DEBUG
8694                 if (verbose)
8695                 {
8696                     if (stepType == ST_FinallyReturn)
8697                     {
8698                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8699                                "BBJ_ALWAYS block BB%02u\n",
8700                                XTnum, catchStep->bbNum);
8701                     }
8702                     else
8703                     {
8704                         assert(stepType == ST_Catch);
8705                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8706                                "BBJ_ALWAYS block BB%02u\n",
8707                                XTnum, catchStep->bbNum);
8708                     }
8709                 }
8710 #endif // DEBUG
8711
8712                 /* This block is the new step */
8713                 step     = catchStep;
8714                 stepType = ST_Try;
8715
8716                 invalidatePreds = true;
8717             }
8718         }
8719     }
8720
8721     if (step == nullptr)
8722     {
8723         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8724
8725 #ifdef DEBUG
8726         if (verbose)
8727         {
8728             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8729                    "block BB%02u to BBJ_ALWAYS\n",
8730                    block->bbNum);
8731         }
8732 #endif
8733     }
8734     else
8735     {
8736         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8737
8738 #if defined(_TARGET_ARM_)
8739         if (stepType == ST_FinallyReturn)
8740         {
8741             assert(step->bbJumpKind == BBJ_ALWAYS);
8742             // Mark the target of a finally return
8743             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8744         }
8745 #endif // defined(_TARGET_ARM_)
8746
8747 #ifdef DEBUG
8748         if (verbose)
8749         {
8750             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8751         }
8752 #endif
8753
8754         // Queue up the jump target for importing
8755
8756         impImportBlockPending(leaveTarget);
8757     }
8758
8759     if (invalidatePreds && fgComputePredsDone)
8760     {
8761         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8762         fgRemovePreds();
8763     }
8764
8765 #ifdef DEBUG
8766     fgVerifyHandlerTab();
8767
8768     if (verbose)
8769     {
8770         printf("\nAfter import CEE_LEAVE:\n");
8771         fgDispBasicBlocks();
8772         fgDispHandlerTab();
8773     }
8774 #endif // DEBUG
8775 }
8776
8777 #endif // FEATURE_EH_FUNCLETS
8778
8779 /*****************************************************************************/
8780 // This is called when reimporting a leave block. It resets the JumpKind,
8781 // JumpDest, and bbNext to the original values
8782
8783 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8784 {
8785 #if FEATURE_EH_FUNCLETS
8786     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8787     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8788     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8789     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8790     // only predecessor are also considered orphans and attempted to be deleted.
8791     //
8792     //  try  {
8793     //     ....
8794     //     try
8795     //     {
8796     //         ....
8797     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8798     //     } finally { }
8799     //  } finally { }
8800     //  OUTSIDE:
8801     //
8802     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8803     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8804     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8805     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8806     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8807     // will be treated as pair and handled correctly.
8808     if (block->bbJumpKind == BBJ_CALLFINALLY)
8809     {
8810         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8811         dupBlock->bbFlags    = block->bbFlags;
8812         dupBlock->bbJumpDest = block->bbJumpDest;
8813         dupBlock->copyEHRegion(block);
8814         dupBlock->bbCatchTyp = block->bbCatchTyp;
8815
8816         // Mark this block as
8817         //  a) not referenced by any other block to make sure that it gets deleted
8818         //  b) weight zero
8819         //  c) prevent from being imported
8820         //  d) as internal
8821         //  e) as rarely run
8822         dupBlock->bbRefs   = 0;
8823         dupBlock->bbWeight = 0;
8824         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8825
8826         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8827         // will be next to each other.
8828         fgInsertBBafter(block, dupBlock);
8829
8830 #ifdef DEBUG
8831         if (verbose)
8832         {
8833             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8834         }
8835 #endif
8836     }
8837 #endif // FEATURE_EH_FUNCLETS
8838
8839     block->bbJumpKind = BBJ_LEAVE;
8840     fgInitBBLookup();
8841     block->bbJumpDest = fgLookupBB(jmpAddr);
8842
8843     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8844     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8845     // reason we don't want to remove the block at this point is that if we call
8846     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8847     // added and the linked list length will be different than fgBBcount.
8848 }
8849
8850 /*****************************************************************************/
8851 // Get the first non-prefix opcode. Used for verification of valid combinations
8852 // of prefixes and actual opcodes.
8853
8854 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8855 {
8856     while (codeAddr < codeEndp)
8857     {
8858         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8859         codeAddr += sizeof(__int8);
8860
8861         if (opcode == CEE_PREFIX1)
8862         {
8863             if (codeAddr >= codeEndp)
8864             {
8865                 break;
8866             }
8867             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8868             codeAddr += sizeof(__int8);
8869         }
8870
8871         switch (opcode)
8872         {
8873             case CEE_UNALIGNED:
8874             case CEE_VOLATILE:
8875             case CEE_TAILCALL:
8876             case CEE_CONSTRAINED:
8877             case CEE_READONLY:
8878                 break;
8879             default:
8880                 return opcode;
8881         }
8882
8883         codeAddr += opcodeSizes[opcode];
8884     }
8885
8886     return CEE_ILLEGAL;
8887 }
8888
8889 /*****************************************************************************/
8890 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8891
8892 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8893 {
8894     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8895
8896     if (!(
8897             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8898             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8899             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8900             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8901             // volatile. prefix is allowed with the ldsfld and stsfld
8902             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8903     {
8904         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8905     }
8906 }
8907
8908 /*****************************************************************************/
8909
8910 #ifdef DEBUG
8911
8912 #undef RETURN // undef contracts RETURN macro
8913
8914 enum controlFlow_t
8915 {
8916     NEXT,
8917     CALL,
8918     RETURN,
8919     THROW,
8920     BRANCH,
8921     COND_BRANCH,
8922     BREAK,
8923     PHI,
8924     META,
8925 };
8926
8927 const static controlFlow_t controlFlow[] = {
8928 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8929 #include "opcode.def"
8930 #undef OPDEF
8931 };
8932
8933 #endif // DEBUG
8934
8935 /*****************************************************************************
8936  *  Determine the result type of an arithemetic operation
8937  *  On 64-bit inserts upcasts when native int is mixed with int32
8938  */
8939 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8940 {
8941     var_types  type = TYP_UNDEF;
8942     GenTreePtr op1 = *pOp1, op2 = *pOp2;
8943
8944     // Arithemetic operations are generally only allowed with
8945     // primitive types, but certain operations are allowed
8946     // with byrefs
8947
8948     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8949     {
8950         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8951         {
8952             // byref1-byref2 => gives a native int
8953             type = TYP_I_IMPL;
8954         }
8955         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8956         {
8957             // [native] int - byref => gives a native int
8958
8959             //
8960             // The reason is that it is possible, in managed C++,
8961             // to have a tree like this:
8962             //
8963             //              -
8964             //             / \
8965             //            /   \
8966             //           /     \
8967             //          /       \
8968             // const(h) int     addr byref
8969             //
8970             // <BUGNUM> VSW 318822 </BUGNUM>
8971             //
8972             // So here we decide to make the resulting type to be a native int.
8973             CLANG_FORMAT_COMMENT_ANCHOR;
8974
8975 #ifdef _TARGET_64BIT_
8976             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
8977             {
8978                 // insert an explicit upcast
8979                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8980             }
8981 #endif // _TARGET_64BIT_
8982
8983             type = TYP_I_IMPL;
8984         }
8985         else
8986         {
8987             // byref - [native] int => gives a byref
8988             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
8989
8990 #ifdef _TARGET_64BIT_
8991             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
8992             {
8993                 // insert an explicit upcast
8994                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8995             }
8996 #endif // _TARGET_64BIT_
8997
8998             type = TYP_BYREF;
8999         }
9000     }
9001     else if ((oper == GT_ADD) &&
9002              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9003     {
9004         // byref + [native] int => gives a byref
9005         // (or)
9006         // [native] int + byref => gives a byref
9007
9008         // only one can be a byref : byref op byref not allowed
9009         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9010         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9011
9012 #ifdef _TARGET_64BIT_
9013         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9014         {
9015             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9016             {
9017                 // insert an explicit upcast
9018                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9019             }
9020         }
9021         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9022         {
9023             // insert an explicit upcast
9024             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9025         }
9026 #endif // _TARGET_64BIT_
9027
9028         type = TYP_BYREF;
9029     }
9030 #ifdef _TARGET_64BIT_
9031     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9032     {
9033         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9034
9035         // int + long => gives long
9036         // long + int => gives long
9037         // we get this because in the IL the long isn't Int64, it's just IntPtr
9038
9039         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9040         {
9041             // insert an explicit upcast
9042             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9043         }
9044         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9045         {
9046             // insert an explicit upcast
9047             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9048         }
9049
9050         type = TYP_I_IMPL;
9051     }
9052 #else  // 32-bit TARGET
9053     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9054     {
9055         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9056
9057         // int + long => gives long
9058         // long + int => gives long
9059
9060         type = TYP_LONG;
9061     }
9062 #endif // _TARGET_64BIT_
9063     else
9064     {
9065         // int + int => gives an int
9066         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9067
9068         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9069                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9070
9071         type = genActualType(op1->gtType);
9072
9073 #if FEATURE_X87_DOUBLES
9074
9075         // For x87, since we only have 1 size of registers, prefer double
9076         // For everybody else, be more precise
9077         if (type == TYP_FLOAT)
9078             type = TYP_DOUBLE;
9079
9080 #else // !FEATURE_X87_DOUBLES
9081
9082         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9083         // Otherwise, turn floats into doubles
9084         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9085         {
9086             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9087             type = TYP_DOUBLE;
9088         }
9089
9090 #endif // FEATURE_X87_DOUBLES
9091     }
9092
9093 #if FEATURE_X87_DOUBLES
9094     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9095 #else  // FEATURE_X87_DOUBLES
9096     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9097 #endif // FEATURE_X87_DOUBLES
9098
9099     return type;
9100 }
9101
9102 /*****************************************************************************
9103  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9104  *
9105  * typeRef contains the token, op1 to contain the value being cast,
9106  * and op2 to contain code that creates the type handle corresponding to typeRef
9107  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9108  */
9109 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9110                                                 GenTreePtr              op2,
9111                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9112                                                 bool                    isCastClass)
9113 {
9114     bool expandInline;
9115
9116     assert(op1->TypeGet() == TYP_REF);
9117
9118     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9119
9120     if (isCastClass)
9121     {
9122         // We only want to expand inline the normal CHKCASTCLASS helper;
9123         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9124     }
9125     else
9126     {
9127         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9128         {
9129             // Get the Class Handle abd class attributes for the type we are casting to
9130             //
9131             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9132
9133             //
9134             // If the class handle is marked as final we can also expand the IsInst check inline
9135             //
9136             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9137
9138             //
9139             // But don't expand inline these two cases
9140             //
9141             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9142             {
9143                 expandInline = false;
9144             }
9145             else if (flags & CORINFO_FLG_CONTEXTFUL)
9146             {
9147                 expandInline = false;
9148             }
9149         }
9150         else
9151         {
9152             //
9153             // We can't expand inline any other helpers
9154             //
9155             expandInline = false;
9156         }
9157     }
9158
9159     if (expandInline)
9160     {
9161         if (compCurBB->isRunRarely())
9162         {
9163             expandInline = false; // not worth the code expansion in a rarely run block
9164         }
9165
9166         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9167         {
9168             expandInline = false; // not worth creating an untracked local variable
9169         }
9170     }
9171
9172     if (!expandInline)
9173     {
9174         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9175         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9176         //
9177         op2->gtFlags |= GTF_DONT_CSE;
9178
9179         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9180     }
9181
9182     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9183
9184     GenTreePtr temp;
9185     GenTreePtr condMT;
9186     //
9187     // expand the methodtable match:
9188     //
9189     //  condMT ==>   GT_NE
9190     //               /    \
9191     //           GT_IND   op2 (typically CNS_INT)
9192     //              |
9193     //           op1Copy
9194     //
9195
9196     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9197     //
9198     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9199     //
9200     // op1 is now known to be a non-complex tree
9201     // thus we can use gtClone(op1) from now on
9202     //
9203
9204     GenTreePtr op2Var = op2;
9205     if (isCastClass)
9206     {
9207         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9208         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9209     }
9210     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9211     temp->gtFlags |= GTF_EXCEPT;
9212     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9213
9214     GenTreePtr condNull;
9215     //
9216     // expand the null check:
9217     //
9218     //  condNull ==>   GT_EQ
9219     //                 /    \
9220     //             op1Copy CNS_INT
9221     //                      null
9222     //
9223     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9224
9225     //
9226     // expand the true and false trees for the condMT
9227     //
9228     GenTreePtr condFalse = gtClone(op1);
9229     GenTreePtr condTrue;
9230     if (isCastClass)
9231     {
9232         //
9233         // use the special helper that skips the cases checked by our inlined cast
9234         //
9235         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9236
9237         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9238     }
9239     else
9240     {
9241         condTrue = gtNewIconNode(0, TYP_REF);
9242     }
9243
9244 #define USE_QMARK_TREES
9245
9246 #ifdef USE_QMARK_TREES
9247     GenTreePtr qmarkMT;
9248     //
9249     // Generate first QMARK - COLON tree
9250     //
9251     //  qmarkMT ==>   GT_QMARK
9252     //                 /     \
9253     //            condMT   GT_COLON
9254     //                      /     \
9255     //                condFalse  condTrue
9256     //
9257     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9258     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9259     condMT->gtFlags |= GTF_RELOP_QMARK;
9260
9261     GenTreePtr qmarkNull;
9262     //
9263     // Generate second QMARK - COLON tree
9264     //
9265     //  qmarkNull ==>  GT_QMARK
9266     //                 /     \
9267     //           condNull  GT_COLON
9268     //                      /     \
9269     //                qmarkMT   op1Copy
9270     //
9271     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9272     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9273     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9274     condNull->gtFlags |= GTF_RELOP_QMARK;
9275
9276     // Make QMark node a top level node by spilling it.
9277     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9278     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9279     return gtNewLclvNode(tmp, TYP_REF);
9280 #endif
9281 }
9282
9283 #ifndef DEBUG
9284 #define assertImp(cond) ((void)0)
9285 #else
9286 #define assertImp(cond)                                                                                                \
9287     do                                                                                                                 \
9288     {                                                                                                                  \
9289         if (!(cond))                                                                                                   \
9290         {                                                                                                              \
9291             const int cchAssertImpBuf = 600;                                                                           \
9292             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9293             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9294                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9295                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9296                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9297             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9298         }                                                                                                              \
9299     } while (0)
9300 #endif // DEBUG
9301
9302 #ifdef _PREFAST_
9303 #pragma warning(push)
9304 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9305 #endif
9306 /*****************************************************************************
9307  *  Import the instr for the given basic block
9308  */
9309 void Compiler::impImportBlockCode(BasicBlock* block)
9310 {
9311 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9312
9313 #ifdef DEBUG
9314
9315     if (verbose)
9316     {
9317         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9318     }
9319 #endif
9320
9321     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9322     IL_OFFSET nxtStmtOffs;
9323
9324     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9325     bool                         expandInline;
9326     CorInfoHelpFunc              helper;
9327     CorInfoIsAccessAllowedResult accessAllowedResult;
9328     CORINFO_HELPER_DESC          calloutHelper;
9329     const BYTE*                  lastLoadToken = nullptr;
9330
9331     // reject cyclic constraints
9332     if (tiVerificationNeeded)
9333     {
9334         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9335         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9336     }
9337
9338     /* Get the tree list started */
9339
9340     impBeginTreeList();
9341
9342     /* Walk the opcodes that comprise the basic block */
9343
9344     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9345     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9346
9347     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9348     IL_OFFSET lastSpillOffs = opcodeOffs;
9349
9350     signed jmpDist;
9351
9352     /* remember the start of the delegate creation sequence (used for verification) */
9353     const BYTE* delegateCreateStart = nullptr;
9354
9355     int  prefixFlags = 0;
9356     bool explicitTailCall, constraintCall, readonlyCall;
9357
9358     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9359     typeInfo tiRetVal;
9360
9361     unsigned numArgs = info.compArgsCount;
9362
9363     /* Now process all the opcodes in the block */
9364
9365     var_types callTyp    = TYP_COUNT;
9366     OPCODE    prevOpcode = CEE_ILLEGAL;
9367
9368     if (block->bbCatchTyp)
9369     {
9370         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9371         {
9372             impCurStmtOffsSet(block->bbCodeOffs);
9373         }
9374
9375         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9376         // to a temp. This is a trade off for code simplicity
9377         impSpillSpecialSideEff();
9378     }
9379
9380     while (codeAddr < codeEndp)
9381     {
9382         bool                   usingReadyToRunHelper = false;
9383         CORINFO_RESOLVED_TOKEN resolvedToken;
9384         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9385         CORINFO_CALL_INFO      callInfo;
9386         CORINFO_FIELD_INFO     fieldInfo;
9387
9388         tiRetVal = typeInfo(); // Default type info
9389
9390         //---------------------------------------------------------------------
9391
9392         /* We need to restrict the max tree depth as many of the Compiler
9393            functions are recursive. We do this by spilling the stack */
9394
9395         if (verCurrentState.esStackDepth)
9396         {
9397             /* Has it been a while since we last saw a non-empty stack (which
9398                guarantees that the tree depth isnt accumulating. */
9399
9400             if ((opcodeOffs - lastSpillOffs) > 200)
9401             {
9402                 impSpillStackEnsure();
9403                 lastSpillOffs = opcodeOffs;
9404             }
9405         }
9406         else
9407         {
9408             lastSpillOffs   = opcodeOffs;
9409             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9410         }
9411
9412         /* Compute the current instr offset */
9413
9414         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9415
9416 #ifndef DEBUG
9417         if (opts.compDbgInfo)
9418 #endif
9419         {
9420             if (!compIsForInlining())
9421             {
9422                 nxtStmtOffs =
9423                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9424
9425                 /* Have we reached the next stmt boundary ? */
9426
9427                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9428                 {
9429                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9430
9431                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9432                     {
9433                         /* We need to provide accurate IP-mapping at this point.
9434                            So spill anything on the stack so that it will form
9435                            gtStmts with the correct stmt offset noted */
9436
9437                         impSpillStackEnsure(true);
9438                     }
9439
9440                     // Has impCurStmtOffs been reported in any tree?
9441
9442                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9443                     {
9444                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9445                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9446
9447                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9448                     }
9449
9450                     if (impCurStmtOffs == BAD_IL_OFFSET)
9451                     {
9452                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9453                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9454
9455                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9456                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9457                         {
9458                             nxtStmtIndex++;
9459                         }
9460
9461                         /* Go to the new stmt */
9462
9463                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9464
9465                         /* Update the stmt boundary index */
9466
9467                         nxtStmtIndex++;
9468                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9469
9470                         /* Are there any more line# entries after this one? */
9471
9472                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9473                         {
9474                             /* Remember where the next line# starts */
9475
9476                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9477                         }
9478                         else
9479                         {
9480                             /* No more line# entries */
9481
9482                             nxtStmtOffs = BAD_IL_OFFSET;
9483                         }
9484                     }
9485                 }
9486                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9487                          (verCurrentState.esStackDepth == 0))
9488                 {
9489                     /* At stack-empty locations, we have already added the tree to
9490                        the stmt list with the last offset. We just need to update
9491                        impCurStmtOffs
9492                      */
9493
9494                     impCurStmtOffsSet(opcodeOffs);
9495                 }
9496                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9497                          impOpcodeIsCallSiteBoundary(prevOpcode))
9498                 {
9499                     /* Make sure we have a type cached */
9500                     assert(callTyp != TYP_COUNT);
9501
9502                     if (callTyp == TYP_VOID)
9503                     {
9504                         impCurStmtOffsSet(opcodeOffs);
9505                     }
9506                     else if (opts.compDbgCode)
9507                     {
9508                         impSpillStackEnsure(true);
9509                         impCurStmtOffsSet(opcodeOffs);
9510                     }
9511                 }
9512                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9513                 {
9514                     if (opts.compDbgCode)
9515                     {
9516                         impSpillStackEnsure(true);
9517                     }
9518
9519                     impCurStmtOffsSet(opcodeOffs);
9520                 }
9521
9522                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9523                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9524             }
9525         }
9526
9527         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9528         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9529         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9530
9531         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9532         GenTreePtr      op1           = DUMMY_INIT(NULL);
9533         GenTreePtr      op2           = DUMMY_INIT(NULL);
9534         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9535         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9536         bool            uns           = DUMMY_INIT(false);
9537
9538         /* Get the next opcode and the size of its parameters */
9539
9540         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9541         codeAddr += sizeof(__int8);
9542
9543 #ifdef DEBUG
9544         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9545         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9546 #endif
9547
9548     DECODE_OPCODE:
9549
9550         // Return if any previous code has caused inline to fail.
9551         if (compDonotInline())
9552         {
9553             return;
9554         }
9555
9556         /* Get the size of additional parameters */
9557
9558         signed int sz = opcodeSizes[opcode];
9559
9560 #ifdef DEBUG
9561         clsHnd  = NO_CLASS_HANDLE;
9562         lclTyp  = TYP_COUNT;
9563         callTyp = TYP_COUNT;
9564
9565         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9566         impCurOpcName = opcodeNames[opcode];
9567
9568         if (verbose && (opcode != CEE_PREFIX1))
9569         {
9570             printf("%s", impCurOpcName);
9571         }
9572
9573         /* Use assertImp() to display the opcode */
9574
9575         op1 = op2 = nullptr;
9576 #endif
9577
9578         /* See what kind of an opcode we have, then */
9579
9580         unsigned mflags   = 0;
9581         unsigned clsFlags = 0;
9582
9583         switch (opcode)
9584         {
9585             unsigned  lclNum;
9586             var_types type;
9587
9588             GenTreePtr op3;
9589             genTreeOps oper;
9590             unsigned   size;
9591
9592             int val;
9593
9594             CORINFO_SIG_INFO     sig;
9595             unsigned             flags;
9596             IL_OFFSET            jmpAddr;
9597             bool                 ovfl, unordered, callNode;
9598             bool                 ldstruct;
9599             CORINFO_CLASS_HANDLE tokenType;
9600
9601             union {
9602                 int     intVal;
9603                 float   fltVal;
9604                 __int64 lngVal;
9605                 double  dblVal;
9606             } cval;
9607
9608             case CEE_PREFIX1:
9609                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9610                 codeAddr += sizeof(__int8);
9611                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9612                 goto DECODE_OPCODE;
9613
9614             SPILL_APPEND:
9615
9616                 // We need to call impSpillLclRefs() for a struct type lclVar.
9617                 // This is done for non-block assignments in the handling of stloc.
9618                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9619                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9620                 {
9621                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9622                 }
9623
9624                 /* Append 'op1' to the list of statements */
9625                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9626                 goto DONE_APPEND;
9627
9628             APPEND:
9629
9630                 /* Append 'op1' to the list of statements */
9631
9632                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9633                 goto DONE_APPEND;
9634
9635             DONE_APPEND:
9636
9637 #ifdef DEBUG
9638                 // Remember at which BC offset the tree was finished
9639                 impNoteLastILoffs();
9640 #endif
9641                 break;
9642
9643             case CEE_LDNULL:
9644                 impPushNullObjRefOnStack();
9645                 break;
9646
9647             case CEE_LDC_I4_M1:
9648             case CEE_LDC_I4_0:
9649             case CEE_LDC_I4_1:
9650             case CEE_LDC_I4_2:
9651             case CEE_LDC_I4_3:
9652             case CEE_LDC_I4_4:
9653             case CEE_LDC_I4_5:
9654             case CEE_LDC_I4_6:
9655             case CEE_LDC_I4_7:
9656             case CEE_LDC_I4_8:
9657                 cval.intVal = (opcode - CEE_LDC_I4_0);
9658                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9659                 goto PUSH_I4CON;
9660
9661             case CEE_LDC_I4_S:
9662                 cval.intVal = getI1LittleEndian(codeAddr);
9663                 goto PUSH_I4CON;
9664             case CEE_LDC_I4:
9665                 cval.intVal = getI4LittleEndian(codeAddr);
9666                 goto PUSH_I4CON;
9667             PUSH_I4CON:
9668                 JITDUMP(" %d", cval.intVal);
9669                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9670                 break;
9671
9672             case CEE_LDC_I8:
9673                 cval.lngVal = getI8LittleEndian(codeAddr);
9674                 JITDUMP(" 0x%016llx", cval.lngVal);
9675                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9676                 break;
9677
9678             case CEE_LDC_R8:
9679                 cval.dblVal = getR8LittleEndian(codeAddr);
9680                 JITDUMP(" %#.17g", cval.dblVal);
9681                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9682                 break;
9683
9684             case CEE_LDC_R4:
9685                 cval.dblVal = getR4LittleEndian(codeAddr);
9686                 JITDUMP(" %#.17g", cval.dblVal);
9687                 {
9688                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9689 #if !FEATURE_X87_DOUBLES
9690                     // X87 stack doesn't differentiate between float/double
9691                     // so R4 is treated as R8, but everybody else does
9692                     cnsOp->gtType = TYP_FLOAT;
9693 #endif // FEATURE_X87_DOUBLES
9694                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9695                 }
9696                 break;
9697
9698             case CEE_LDSTR:
9699
9700                 if (compIsForInlining())
9701                 {
9702                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9703                     {
9704                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9705                         return;
9706                     }
9707                 }
9708
9709                 val = getU4LittleEndian(codeAddr);
9710                 JITDUMP(" %08X", val);
9711                 if (tiVerificationNeeded)
9712                 {
9713                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9714                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9715                 }
9716                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9717
9718                 break;
9719
9720             case CEE_LDARG:
9721                 lclNum = getU2LittleEndian(codeAddr);
9722                 JITDUMP(" %u", lclNum);
9723                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9724                 break;
9725
9726             case CEE_LDARG_S:
9727                 lclNum = getU1LittleEndian(codeAddr);
9728                 JITDUMP(" %u", lclNum);
9729                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9730                 break;
9731
9732             case CEE_LDARG_0:
9733             case CEE_LDARG_1:
9734             case CEE_LDARG_2:
9735             case CEE_LDARG_3:
9736                 lclNum = (opcode - CEE_LDARG_0);
9737                 assert(lclNum >= 0 && lclNum < 4);
9738                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9739                 break;
9740
9741             case CEE_LDLOC:
9742                 lclNum = getU2LittleEndian(codeAddr);
9743                 JITDUMP(" %u", lclNum);
9744                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9745                 break;
9746
9747             case CEE_LDLOC_S:
9748                 lclNum = getU1LittleEndian(codeAddr);
9749                 JITDUMP(" %u", lclNum);
9750                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9751                 break;
9752
9753             case CEE_LDLOC_0:
9754             case CEE_LDLOC_1:
9755             case CEE_LDLOC_2:
9756             case CEE_LDLOC_3:
9757                 lclNum = (opcode - CEE_LDLOC_0);
9758                 assert(lclNum >= 0 && lclNum < 4);
9759                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9760                 break;
9761
9762             case CEE_STARG:
9763                 lclNum = getU2LittleEndian(codeAddr);
9764                 goto STARG;
9765
9766             case CEE_STARG_S:
9767                 lclNum = getU1LittleEndian(codeAddr);
9768             STARG:
9769                 JITDUMP(" %u", lclNum);
9770
9771                 if (tiVerificationNeeded)
9772                 {
9773                     Verify(lclNum < info.compILargsCount, "bad arg num");
9774                 }
9775
9776                 if (compIsForInlining())
9777                 {
9778                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9779                     noway_assert(op1->gtOper == GT_LCL_VAR);
9780                     lclNum = op1->AsLclVar()->gtLclNum;
9781
9782                     goto VAR_ST_VALID;
9783                 }
9784
9785                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9786                 assertImp(lclNum < numArgs);
9787
9788                 if (lclNum == info.compThisArg)
9789                 {
9790                     lclNum = lvaArg0Var;
9791                 }
9792                 lvaTable[lclNum].lvArgWrite = 1;
9793
9794                 if (tiVerificationNeeded)
9795                 {
9796                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9797                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9798                            "type mismatch");
9799
9800                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9801                     {
9802                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9803                     }
9804                 }
9805
9806                 goto VAR_ST;
9807
9808             case CEE_STLOC:
9809                 lclNum = getU2LittleEndian(codeAddr);
9810                 JITDUMP(" %u", lclNum);
9811                 goto LOC_ST;
9812
9813             case CEE_STLOC_S:
9814                 lclNum = getU1LittleEndian(codeAddr);
9815                 JITDUMP(" %u", lclNum);
9816                 goto LOC_ST;
9817
9818             case CEE_STLOC_0:
9819             case CEE_STLOC_1:
9820             case CEE_STLOC_2:
9821             case CEE_STLOC_3:
9822                 lclNum = (opcode - CEE_STLOC_0);
9823                 assert(lclNum >= 0 && lclNum < 4);
9824
9825             LOC_ST:
9826                 if (tiVerificationNeeded)
9827                 {
9828                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9829                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9830                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9831                            "type mismatch");
9832                 }
9833
9834                 if (compIsForInlining())
9835                 {
9836                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9837
9838                     /* Have we allocated a temp for this local? */
9839
9840                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9841
9842                     goto _PopValue;
9843                 }
9844
9845                 lclNum += numArgs;
9846
9847             VAR_ST:
9848
9849                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9850                 {
9851                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9852                     BADCODE("Bad IL");
9853                 }
9854
9855             VAR_ST_VALID:
9856
9857                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9858                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9859
9860                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9861                 {
9862                     lclTyp = lvaGetRealType(lclNum);
9863                 }
9864                 else
9865                 {
9866                     lclTyp = lvaGetActualType(lclNum);
9867                 }
9868
9869             _PopValue:
9870                 /* Pop the value being assigned */
9871
9872                 {
9873                     StackEntry se = impPopStack(clsHnd);
9874                     op1           = se.val;
9875                     tiRetVal      = se.seTypeInfo;
9876                 }
9877
9878 #ifdef FEATURE_SIMD
9879                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9880                 {
9881                     assert(op1->TypeGet() == TYP_STRUCT);
9882                     op1->gtType = lclTyp;
9883                 }
9884 #endif // FEATURE_SIMD
9885
9886                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9887
9888 #ifdef _TARGET_64BIT_
9889                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9890                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9891                 {
9892                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9893                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9894                 }
9895 #endif // _TARGET_64BIT_
9896
9897                 // We had better assign it a value of the correct type
9898                 assertImp(
9899                     genActualType(lclTyp) == genActualType(op1->gtType) ||
9900                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9901                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9902                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9903                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9904                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9905
9906                 /* If op1 is "&var" then its type is the transient "*" and it can
9907                    be used either as TYP_BYREF or TYP_I_IMPL */
9908
9909                 if (op1->IsVarAddr())
9910                 {
9911                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9912
9913                     /* When "&var" is created, we assume it is a byref. If it is
9914                        being assigned to a TYP_I_IMPL var, change the type to
9915                        prevent unnecessary GC info */
9916
9917                     if (genActualType(lclTyp) == TYP_I_IMPL)
9918                     {
9919                         op1->gtType = TYP_I_IMPL;
9920                     }
9921                 }
9922
9923                 /* Filter out simple assignments to itself */
9924
9925                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9926                 {
9927                     if (insertLdloc)
9928                     {
9929                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
9930                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
9931                         CLANG_FORMAT_COMMENT_ANCHOR;
9932
9933 #ifdef DEBUG
9934                         if (tiVerificationNeeded)
9935                         {
9936                             assert(
9937                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9938                         }
9939 #endif
9940
9941                         op1         = nullptr;
9942                         insertLdloc = false;
9943
9944                         impLoadVar(lclNum, opcodeOffs + sz + 1);
9945                         break;
9946                     }
9947                     else if (opts.compDbgCode)
9948                     {
9949                         op1 = gtNewNothingNode();
9950                         goto SPILL_APPEND;
9951                     }
9952                     else
9953                     {
9954                         break;
9955                     }
9956                 }
9957
9958                 /* Create the assignment node */
9959
9960                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
9961
9962                 /* If the local is aliased, we need to spill calls and
9963                    indirections from the stack. */
9964
9965                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
9966                     verCurrentState.esStackDepth > 0)
9967                 {
9968                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
9969                 }
9970
9971                 /* Spill any refs to the local from the stack */
9972
9973                 impSpillLclRefs(lclNum);
9974
9975 #if !FEATURE_X87_DOUBLES
9976                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
9977                 // We insert a cast to the dest 'op2' type
9978                 //
9979                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
9980                     varTypeIsFloating(op2->gtType))
9981                 {
9982                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
9983                 }
9984 #endif // !FEATURE_X87_DOUBLES
9985
9986                 if (varTypeIsStruct(lclTyp))
9987                 {
9988                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
9989                 }
9990                 else
9991                 {
9992                     // The code generator generates GC tracking information
9993                     // based on the RHS of the assignment.  Later the LHS (which is
9994                     // is a BYREF) gets used and the emitter checks that that variable
9995                     // is being tracked.  It is not (since the RHS was an int and did
9996                     // not need tracking).  To keep this assert happy, we change the RHS
9997                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
9998                     {
9999                         op1->gtType = TYP_BYREF;
10000                     }
10001                     op1 = gtNewAssignNode(op2, op1);
10002                 }
10003
10004                 /* If insertLdloc is true, then we need to insert a ldloc following the
10005                    stloc.  This is done when converting a (dup, stloc) sequence into
10006                    a (stloc, ldloc) sequence. */
10007
10008                 if (insertLdloc)
10009                 {
10010                     // From SPILL_APPEND
10011                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10012
10013 #ifdef DEBUG
10014                     // From DONE_APPEND
10015                     impNoteLastILoffs();
10016 #endif
10017                     op1         = nullptr;
10018                     insertLdloc = false;
10019
10020                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10021                     break;
10022                 }
10023
10024                 goto SPILL_APPEND;
10025
10026             case CEE_LDLOCA:
10027                 lclNum = getU2LittleEndian(codeAddr);
10028                 goto LDLOCA;
10029
10030             case CEE_LDLOCA_S:
10031                 lclNum = getU1LittleEndian(codeAddr);
10032             LDLOCA:
10033                 JITDUMP(" %u", lclNum);
10034                 if (tiVerificationNeeded)
10035                 {
10036                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10037                     Verify(info.compInitMem, "initLocals not set");
10038                 }
10039
10040                 if (compIsForInlining())
10041                 {
10042                     // Get the local type
10043                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10044
10045                     /* Have we allocated a temp for this local? */
10046
10047                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10048
10049                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10050
10051                     goto _PUSH_ADRVAR;
10052                 }
10053
10054                 lclNum += numArgs;
10055                 assertImp(lclNum < info.compLocalsCount);
10056                 goto ADRVAR;
10057
10058             case CEE_LDARGA:
10059                 lclNum = getU2LittleEndian(codeAddr);
10060                 goto LDARGA;
10061
10062             case CEE_LDARGA_S:
10063                 lclNum = getU1LittleEndian(codeAddr);
10064             LDARGA:
10065                 JITDUMP(" %u", lclNum);
10066                 Verify(lclNum < info.compILargsCount, "bad arg num");
10067
10068                 if (compIsForInlining())
10069                 {
10070                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10071                     // followed by a ldfld to load the field.
10072
10073                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10074                     if (op1->gtOper != GT_LCL_VAR)
10075                     {
10076                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10077                         return;
10078                     }
10079
10080                     assert(op1->gtOper == GT_LCL_VAR);
10081
10082                     goto _PUSH_ADRVAR;
10083                 }
10084
10085                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10086                 assertImp(lclNum < numArgs);
10087
10088                 if (lclNum == info.compThisArg)
10089                 {
10090                     lclNum = lvaArg0Var;
10091                 }
10092
10093                 goto ADRVAR;
10094
10095             ADRVAR:
10096
10097                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10098
10099             _PUSH_ADRVAR:
10100                 assert(op1->gtOper == GT_LCL_VAR);
10101
10102                 /* Note that this is supposed to create the transient type "*"
10103                    which may be used as a TYP_I_IMPL. However we catch places
10104                    where it is used as a TYP_I_IMPL and change the node if needed.
10105                    Thus we are pessimistic and may report byrefs in the GC info
10106                    where it was not absolutely needed, but it is safer this way.
10107                  */
10108                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10109
10110                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10111                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10112
10113                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10114                 if (tiVerificationNeeded)
10115                 {
10116                     // Don't allow taking address of uninit this ptr.
10117                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10118                     {
10119                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10120                     }
10121
10122                     if (!tiRetVal.IsByRef())
10123                     {
10124                         tiRetVal.MakeByRef();
10125                     }
10126                     else
10127                     {
10128                         Verify(false, "byref to byref");
10129                     }
10130                 }
10131
10132                 impPushOnStack(op1, tiRetVal);
10133                 break;
10134
10135             case CEE_ARGLIST:
10136
10137                 if (!info.compIsVarArgs)
10138                 {
10139                     BADCODE("arglist in non-vararg method");
10140                 }
10141
10142                 if (tiVerificationNeeded)
10143                 {
10144                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10145                 }
10146                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10147
10148                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10149                    adjusted the arg count cos this is like fetching the last param */
10150                 assertImp(0 < numArgs);
10151                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10152                 lclNum = lvaVarargsHandleArg;
10153                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10154                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10155                 impPushOnStack(op1, tiRetVal);
10156                 break;
10157
10158             case CEE_ENDFINALLY:
10159
10160                 if (compIsForInlining())
10161                 {
10162                     assert(!"Shouldn't have exception handlers in the inliner!");
10163                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10164                     return;
10165                 }
10166
10167                 if (verCurrentState.esStackDepth > 0)
10168                 {
10169                     impEvalSideEffects();
10170                 }
10171
10172                 if (info.compXcptnsCount == 0)
10173                 {
10174                     BADCODE("endfinally outside finally");
10175                 }
10176
10177                 assert(verCurrentState.esStackDepth == 0);
10178
10179                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10180                 goto APPEND;
10181
10182             case CEE_ENDFILTER:
10183
10184                 if (compIsForInlining())
10185                 {
10186                     assert(!"Shouldn't have exception handlers in the inliner!");
10187                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10188                     return;
10189                 }
10190
10191                 block->bbSetRunRarely(); // filters are rare
10192
10193                 if (info.compXcptnsCount == 0)
10194                 {
10195                     BADCODE("endfilter outside filter");
10196                 }
10197
10198                 if (tiVerificationNeeded)
10199                 {
10200                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10201                 }
10202
10203                 op1 = impPopStack().val;
10204                 assertImp(op1->gtType == TYP_INT);
10205                 if (!bbInFilterILRange(block))
10206                 {
10207                     BADCODE("EndFilter outside a filter handler");
10208                 }
10209
10210                 /* Mark current bb as end of filter */
10211
10212                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10213                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10214
10215                 /* Mark catch handler as successor */
10216
10217                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10218                 if (verCurrentState.esStackDepth != 0)
10219                 {
10220                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10221                                                 DEBUGARG(__LINE__));
10222                 }
10223                 goto APPEND;
10224
10225             case CEE_RET:
10226                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10227             RET:
10228                 if (!impReturnInstruction(block, prefixFlags, opcode))
10229                 {
10230                     return; // abort
10231                 }
10232                 else
10233                 {
10234                     break;
10235                 }
10236
10237             case CEE_JMP:
10238
10239                 assert(!compIsForInlining());
10240
10241                 if (tiVerificationNeeded)
10242                 {
10243                     Verify(false, "Invalid opcode: CEE_JMP");
10244                 }
10245
10246                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10247                 {
10248                     /* CEE_JMP does not make sense in some "protected" regions. */
10249
10250                     BADCODE("Jmp not allowed in protected region");
10251                 }
10252
10253                 if (verCurrentState.esStackDepth != 0)
10254                 {
10255                     BADCODE("Stack must be empty after CEE_JMPs");
10256                 }
10257
10258                 _impResolveToken(CORINFO_TOKENKIND_Method);
10259
10260                 JITDUMP(" %08X", resolvedToken.token);
10261
10262                 /* The signature of the target has to be identical to ours.
10263                    At least check that argCnt and returnType match */
10264
10265                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10266                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10267                     sig.retType != info.compMethodInfo->args.retType ||
10268                     sig.callConv != info.compMethodInfo->args.callConv)
10269                 {
10270                     BADCODE("Incompatible target for CEE_JMPs");
10271                 }
10272
10273 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10274
10275                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10276
10277                 /* Mark the basic block as being a JUMP instead of RETURN */
10278
10279                 block->bbFlags |= BBF_HAS_JMP;
10280
10281                 /* Set this flag to make sure register arguments have a location assigned
10282                  * even if we don't use them inside the method */
10283
10284                 compJmpOpUsed = true;
10285
10286                 fgNoStructPromotion = true;
10287
10288                 goto APPEND;
10289
10290 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10291
10292                 // Import this just like a series of LDARGs + tail. + call + ret
10293
10294                 if (info.compIsVarArgs)
10295                 {
10296                     // For now we don't implement true tail calls, so this breaks varargs.
10297                     // So warn the user instead of generating bad code.
10298                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10299                     // implement true tail calls.
10300                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10301                 }
10302
10303                 // First load up the arguments (0 - N)
10304                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10305                 {
10306                     impLoadArg(argNum, opcodeOffs + sz + 1);
10307                 }
10308
10309                 // Now generate the tail call
10310                 noway_assert(prefixFlags == 0);
10311                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10312                 opcode      = CEE_CALL;
10313
10314                 eeGetCallInfo(&resolvedToken, NULL,
10315                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10316
10317                 // All calls and delegates need a security callout.
10318                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10319
10320                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10321                                         opcodeOffs);
10322
10323                 // And finish with the ret
10324                 goto RET;
10325
10326 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10327
10328             case CEE_LDELEMA:
10329                 assertImp(sz == sizeof(unsigned));
10330
10331                 _impResolveToken(CORINFO_TOKENKIND_Class);
10332
10333                 JITDUMP(" %08X", resolvedToken.token);
10334
10335                 ldelemClsHnd = resolvedToken.hClass;
10336
10337                 if (tiVerificationNeeded)
10338                 {
10339                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10340                     typeInfo tiIndex = impStackTop().seTypeInfo;
10341
10342                     // As per ECMA 'index' specified can be either int32 or native int.
10343                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10344
10345                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10346                     Verify(tiArray.IsNullObjRef() ||
10347                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10348                            "bad array");
10349
10350                     tiRetVal = arrayElemType;
10351                     tiRetVal.MakeByRef();
10352                     if (prefixFlags & PREFIX_READONLY)
10353                     {
10354                         tiRetVal.SetIsReadonlyByRef();
10355                     }
10356
10357                     // an array interior pointer is always in the heap
10358                     tiRetVal.SetIsPermanentHomeByRef();
10359                 }
10360
10361                 // If it's a value class array we just do a simple address-of
10362                 if (eeIsValueClass(ldelemClsHnd))
10363                 {
10364                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10365                     if (cit == CORINFO_TYPE_UNDEF)
10366                     {
10367                         lclTyp = TYP_STRUCT;
10368                     }
10369                     else
10370                     {
10371                         lclTyp = JITtype2varType(cit);
10372                     }
10373                     goto ARR_LD_POST_VERIFY;
10374                 }
10375
10376                 // Similarly, if its a readonly access, we can do a simple address-of
10377                 // without doing a runtime type-check
10378                 if (prefixFlags & PREFIX_READONLY)
10379                 {
10380                     lclTyp = TYP_REF;
10381                     goto ARR_LD_POST_VERIFY;
10382                 }
10383
10384                 // Otherwise we need the full helper function with run-time type check
10385                 op1 = impTokenToHandle(&resolvedToken);
10386                 if (op1 == nullptr)
10387                 { // compDonotInline()
10388                     return;
10389                 }
10390
10391                 args = gtNewArgList(op1);                      // Type
10392                 args = gtNewListNode(impPopStack().val, args); // index
10393                 args = gtNewListNode(impPopStack().val, args); // array
10394                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10395
10396                 impPushOnStack(op1, tiRetVal);
10397                 break;
10398
10399             // ldelem for reference and value types
10400             case CEE_LDELEM:
10401                 assertImp(sz == sizeof(unsigned));
10402
10403                 _impResolveToken(CORINFO_TOKENKIND_Class);
10404
10405                 JITDUMP(" %08X", resolvedToken.token);
10406
10407                 ldelemClsHnd = resolvedToken.hClass;
10408
10409                 if (tiVerificationNeeded)
10410                 {
10411                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10412                     typeInfo tiIndex = impStackTop().seTypeInfo;
10413
10414                     // As per ECMA 'index' specified can be either int32 or native int.
10415                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10416                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10417
10418                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10419                            "type of array incompatible with type operand");
10420                     tiRetVal.NormaliseForStack();
10421                 }
10422
10423                 // If it's a reference type or generic variable type
10424                 // then just generate code as though it's a ldelem.ref instruction
10425                 if (!eeIsValueClass(ldelemClsHnd))
10426                 {
10427                     lclTyp = TYP_REF;
10428                     opcode = CEE_LDELEM_REF;
10429                 }
10430                 else
10431                 {
10432                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10433                     lclTyp             = JITtype2varType(jitTyp);
10434                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10435                     tiRetVal.NormaliseForStack();
10436                 }
10437                 goto ARR_LD_POST_VERIFY;
10438
10439             case CEE_LDELEM_I1:
10440                 lclTyp = TYP_BYTE;
10441                 goto ARR_LD;
10442             case CEE_LDELEM_I2:
10443                 lclTyp = TYP_SHORT;
10444                 goto ARR_LD;
10445             case CEE_LDELEM_I:
10446                 lclTyp = TYP_I_IMPL;
10447                 goto ARR_LD;
10448
10449             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10450             // and treating it as TYP_INT avoids other asserts.
10451             case CEE_LDELEM_U4:
10452                 lclTyp = TYP_INT;
10453                 goto ARR_LD;
10454
10455             case CEE_LDELEM_I4:
10456                 lclTyp = TYP_INT;
10457                 goto ARR_LD;
10458             case CEE_LDELEM_I8:
10459                 lclTyp = TYP_LONG;
10460                 goto ARR_LD;
10461             case CEE_LDELEM_REF:
10462                 lclTyp = TYP_REF;
10463                 goto ARR_LD;
10464             case CEE_LDELEM_R4:
10465                 lclTyp = TYP_FLOAT;
10466                 goto ARR_LD;
10467             case CEE_LDELEM_R8:
10468                 lclTyp = TYP_DOUBLE;
10469                 goto ARR_LD;
10470             case CEE_LDELEM_U1:
10471                 lclTyp = TYP_UBYTE;
10472                 goto ARR_LD;
10473             case CEE_LDELEM_U2:
10474                 lclTyp = TYP_CHAR;
10475                 goto ARR_LD;
10476
10477             ARR_LD:
10478
10479                 if (tiVerificationNeeded)
10480                 {
10481                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10482                     typeInfo tiIndex = impStackTop().seTypeInfo;
10483
10484                     // As per ECMA 'index' specified can be either int32 or native int.
10485                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10486                     if (tiArray.IsNullObjRef())
10487                     {
10488                         if (lclTyp == TYP_REF)
10489                         { // we will say a deref of a null array yields a null ref
10490                             tiRetVal = typeInfo(TI_NULL);
10491                         }
10492                         else
10493                         {
10494                             tiRetVal = typeInfo(lclTyp);
10495                         }
10496                     }
10497                     else
10498                     {
10499                         tiRetVal             = verGetArrayElemType(tiArray);
10500                         typeInfo arrayElemTi = typeInfo(lclTyp);
10501 #ifdef _TARGET_64BIT_
10502                         if (opcode == CEE_LDELEM_I)
10503                         {
10504                             arrayElemTi = typeInfo::nativeInt();
10505                         }
10506
10507                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10508                         {
10509                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10510                         }
10511                         else
10512 #endif // _TARGET_64BIT_
10513                         {
10514                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10515                         }
10516                     }
10517                     tiRetVal.NormaliseForStack();
10518                 }
10519             ARR_LD_POST_VERIFY:
10520
10521                 /* Pull the index value and array address */
10522                 op2 = impPopStack().val;
10523                 op1 = impPopStack().val;
10524                 assertImp(op1->gtType == TYP_REF);
10525
10526                 /* Check for null pointer - in the inliner case we simply abort */
10527
10528                 if (compIsForInlining())
10529                 {
10530                     if (op1->gtOper == GT_CNS_INT)
10531                     {
10532                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10533                         return;
10534                     }
10535                 }
10536
10537                 op1 = impCheckForNullPointer(op1);
10538
10539                 /* Mark the block as containing an index expression */
10540
10541                 if (op1->gtOper == GT_LCL_VAR)
10542                 {
10543                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10544                     {
10545                         block->bbFlags |= BBF_HAS_IDX_LEN;
10546                         optMethodFlags |= OMF_HAS_ARRAYREF;
10547                     }
10548                 }
10549
10550                 /* Create the index node and push it on the stack */
10551
10552                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10553
10554                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10555
10556                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10557                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10558                 {
10559                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10560
10561                     // remember the element size
10562                     if (lclTyp == TYP_REF)
10563                     {
10564                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10565                     }
10566                     else
10567                     {
10568                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10569                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10570                         {
10571                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10572                         }
10573                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10574                         if (lclTyp == TYP_STRUCT)
10575                         {
10576                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10577                             op1->gtIndex.gtIndElemSize = size;
10578                             op1->gtType                = lclTyp;
10579                         }
10580                     }
10581
10582                     if ((opcode == CEE_LDELEMA) || ldstruct)
10583                     {
10584                         // wrap it in a &
10585                         lclTyp = TYP_BYREF;
10586
10587                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10588                     }
10589                     else
10590                     {
10591                         assert(lclTyp != TYP_STRUCT);
10592                     }
10593                 }
10594
10595                 if (ldstruct)
10596                 {
10597                     // Create an OBJ for the result
10598                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10599                     op1->gtFlags |= GTF_EXCEPT;
10600                 }
10601                 impPushOnStack(op1, tiRetVal);
10602                 break;
10603
10604             // stelem for reference and value types
10605             case CEE_STELEM:
10606
10607                 assertImp(sz == sizeof(unsigned));
10608
10609                 _impResolveToken(CORINFO_TOKENKIND_Class);
10610
10611                 JITDUMP(" %08X", resolvedToken.token);
10612
10613                 stelemClsHnd = resolvedToken.hClass;
10614
10615                 if (tiVerificationNeeded)
10616                 {
10617                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10618                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10619                     typeInfo tiValue = impStackTop().seTypeInfo;
10620
10621                     // As per ECMA 'index' specified can be either int32 or native int.
10622                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10623                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10624
10625                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10626                            "type operand incompatible with array element type");
10627                     arrayElem.NormaliseForStack();
10628                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10629                 }
10630
10631                 // If it's a reference type just behave as though it's a stelem.ref instruction
10632                 if (!eeIsValueClass(stelemClsHnd))
10633                 {
10634                     goto STELEM_REF_POST_VERIFY;
10635                 }
10636
10637                 // Otherwise extract the type
10638                 {
10639                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10640                     lclTyp             = JITtype2varType(jitTyp);
10641                     goto ARR_ST_POST_VERIFY;
10642                 }
10643
10644             case CEE_STELEM_REF:
10645
10646                 if (tiVerificationNeeded)
10647                 {
10648                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10649                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10650                     typeInfo tiValue = impStackTop().seTypeInfo;
10651
10652                     // As per ECMA 'index' specified can be either int32 or native int.
10653                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10654                     Verify(tiValue.IsObjRef(), "bad value");
10655
10656                     // we only check that it is an object referece, The helper does additional checks
10657                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10658                 }
10659
10660                 arrayNodeTo      = impStackTop(2).val;
10661                 arrayNodeToIndex = impStackTop(1).val;
10662                 arrayNodeFrom    = impStackTop().val;
10663
10664                 //
10665                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10666                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10667                 //
10668
10669                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10670                 // This does not need CORINFO_HELP_ARRADDR_ST
10671
10672                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10673                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10674                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10675                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10676                 {
10677                     lclTyp = TYP_REF;
10678                     goto ARR_ST_POST_VERIFY;
10679                 }
10680
10681                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10682
10683                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10684                 {
10685                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10686
10687                     lclTyp = TYP_REF;
10688                     goto ARR_ST_POST_VERIFY;
10689                 }
10690
10691             STELEM_REF_POST_VERIFY:
10692
10693                 /* Call a helper function to do the assignment */
10694                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10695
10696                 goto SPILL_APPEND;
10697
10698             case CEE_STELEM_I1:
10699                 lclTyp = TYP_BYTE;
10700                 goto ARR_ST;
10701             case CEE_STELEM_I2:
10702                 lclTyp = TYP_SHORT;
10703                 goto ARR_ST;
10704             case CEE_STELEM_I:
10705                 lclTyp = TYP_I_IMPL;
10706                 goto ARR_ST;
10707             case CEE_STELEM_I4:
10708                 lclTyp = TYP_INT;
10709                 goto ARR_ST;
10710             case CEE_STELEM_I8:
10711                 lclTyp = TYP_LONG;
10712                 goto ARR_ST;
10713             case CEE_STELEM_R4:
10714                 lclTyp = TYP_FLOAT;
10715                 goto ARR_ST;
10716             case CEE_STELEM_R8:
10717                 lclTyp = TYP_DOUBLE;
10718                 goto ARR_ST;
10719
10720             ARR_ST:
10721
10722                 if (tiVerificationNeeded)
10723                 {
10724                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10725                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10726                     typeInfo tiValue = impStackTop().seTypeInfo;
10727
10728                     // As per ECMA 'index' specified can be either int32 or native int.
10729                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10730                     typeInfo arrayElem = typeInfo(lclTyp);
10731 #ifdef _TARGET_64BIT_
10732                     if (opcode == CEE_STELEM_I)
10733                     {
10734                         arrayElem = typeInfo::nativeInt();
10735                     }
10736 #endif // _TARGET_64BIT_
10737                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10738                            "bad array");
10739
10740                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10741                            "bad value");
10742                 }
10743
10744             ARR_ST_POST_VERIFY:
10745                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10746                    range-check, and then assignment. However, codegen currently
10747                    does the range-check before evaluation the RHS-operands. So to
10748                    maintain strict ordering, we spill the stack. */
10749
10750                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10751                 {
10752                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10753                                                    "Strict ordering of exceptions for Array store"));
10754                 }
10755
10756                 /* Pull the new value from the stack */
10757                 op2 = impPopStack().val;
10758
10759                 /* Pull the index value */
10760                 op1 = impPopStack().val;
10761
10762                 /* Pull the array address */
10763                 op3 = impPopStack().val;
10764
10765                 assertImp(op3->gtType == TYP_REF);
10766                 if (op2->IsVarAddr())
10767                 {
10768                     op2->gtType = TYP_I_IMPL;
10769                 }
10770
10771                 op3 = impCheckForNullPointer(op3);
10772
10773                 // Mark the block as containing an index expression
10774
10775                 if (op3->gtOper == GT_LCL_VAR)
10776                 {
10777                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10778                     {
10779                         block->bbFlags |= BBF_HAS_IDX_LEN;
10780                         optMethodFlags |= OMF_HAS_ARRAYREF;
10781                     }
10782                 }
10783
10784                 /* Create the index node */
10785
10786                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10787
10788                 /* Create the assignment node and append it */
10789
10790                 if (lclTyp == TYP_STRUCT)
10791                 {
10792                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10793
10794                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10795                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10796                 }
10797                 if (varTypeIsStruct(op1))
10798                 {
10799                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10800                 }
10801                 else
10802                 {
10803                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10804                     op1 = gtNewAssignNode(op1, op2);
10805                 }
10806
10807                 /* Mark the expression as containing an assignment */
10808
10809                 op1->gtFlags |= GTF_ASG;
10810
10811                 goto SPILL_APPEND;
10812
10813             case CEE_ADD:
10814                 oper = GT_ADD;
10815                 goto MATH_OP2;
10816
10817             case CEE_ADD_OVF:
10818                 uns = false;
10819                 goto ADD_OVF;
10820             case CEE_ADD_OVF_UN:
10821                 uns = true;
10822                 goto ADD_OVF;
10823
10824             ADD_OVF:
10825                 ovfl     = true;
10826                 callNode = false;
10827                 oper     = GT_ADD;
10828                 goto MATH_OP2_FLAGS;
10829
10830             case CEE_SUB:
10831                 oper = GT_SUB;
10832                 goto MATH_OP2;
10833
10834             case CEE_SUB_OVF:
10835                 uns = false;
10836                 goto SUB_OVF;
10837             case CEE_SUB_OVF_UN:
10838                 uns = true;
10839                 goto SUB_OVF;
10840
10841             SUB_OVF:
10842                 ovfl     = true;
10843                 callNode = false;
10844                 oper     = GT_SUB;
10845                 goto MATH_OP2_FLAGS;
10846
10847             case CEE_MUL:
10848                 oper = GT_MUL;
10849                 goto MATH_MAYBE_CALL_NO_OVF;
10850
10851             case CEE_MUL_OVF:
10852                 uns = false;
10853                 goto MUL_OVF;
10854             case CEE_MUL_OVF_UN:
10855                 uns = true;
10856                 goto MUL_OVF;
10857
10858             MUL_OVF:
10859                 ovfl = true;
10860                 oper = GT_MUL;
10861                 goto MATH_MAYBE_CALL_OVF;
10862
10863             // Other binary math operations
10864
10865             case CEE_DIV:
10866                 oper = GT_DIV;
10867                 goto MATH_MAYBE_CALL_NO_OVF;
10868
10869             case CEE_DIV_UN:
10870                 oper = GT_UDIV;
10871                 goto MATH_MAYBE_CALL_NO_OVF;
10872
10873             case CEE_REM:
10874                 oper = GT_MOD;
10875                 goto MATH_MAYBE_CALL_NO_OVF;
10876
10877             case CEE_REM_UN:
10878                 oper = GT_UMOD;
10879                 goto MATH_MAYBE_CALL_NO_OVF;
10880
10881             MATH_MAYBE_CALL_NO_OVF:
10882                 ovfl = false;
10883             MATH_MAYBE_CALL_OVF:
10884                 // Morpher has some complex logic about when to turn different
10885                 // typed nodes on different platforms into helper calls. We
10886                 // need to either duplicate that logic here, or just
10887                 // pessimistically make all the nodes large enough to become
10888                 // call nodes.  Since call nodes aren't that much larger and
10889                 // these opcodes are infrequent enough I chose the latter.
10890                 callNode = true;
10891                 goto MATH_OP2_FLAGS;
10892
10893             case CEE_AND:
10894                 oper = GT_AND;
10895                 goto MATH_OP2;
10896             case CEE_OR:
10897                 oper = GT_OR;
10898                 goto MATH_OP2;
10899             case CEE_XOR:
10900                 oper = GT_XOR;
10901                 goto MATH_OP2;
10902
10903             MATH_OP2: // For default values of 'ovfl' and 'callNode'
10904
10905                 ovfl     = false;
10906                 callNode = false;
10907
10908             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10909
10910                 /* Pull two values and push back the result */
10911
10912                 if (tiVerificationNeeded)
10913                 {
10914                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10915                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10916
10917                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10918                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10919                     {
10920                         Verify(tiOp1.IsNumberType(), "not number");
10921                     }
10922                     else
10923                     {
10924                         Verify(tiOp1.IsIntegerType(), "not integer");
10925                     }
10926
10927                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10928
10929                     tiRetVal = tiOp1;
10930
10931 #ifdef _TARGET_64BIT_
10932                     if (tiOp2.IsNativeIntType())
10933                     {
10934                         tiRetVal = tiOp2;
10935                     }
10936 #endif // _TARGET_64BIT_
10937                 }
10938
10939                 op2 = impPopStack().val;
10940                 op1 = impPopStack().val;
10941
10942 #if !CPU_HAS_FP_SUPPORT
10943                 if (varTypeIsFloating(op1->gtType))
10944                 {
10945                     callNode = true;
10946                 }
10947 #endif
10948                 /* Can't do arithmetic with references */
10949                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10950
10951                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
10952                 // if it is in the stack)
10953                 impBashVarAddrsToI(op1, op2);
10954
10955                 type = impGetByRefResultType(oper, uns, &op1, &op2);
10956
10957                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
10958
10959                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
10960
10961                 if (op2->gtOper == GT_CNS_INT)
10962                 {
10963                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
10964                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
10965
10966                     {
10967                         impPushOnStack(op1, tiRetVal);
10968                         break;
10969                     }
10970                 }
10971
10972 #if !FEATURE_X87_DOUBLES
10973                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
10974                 //
10975                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
10976                 {
10977                     if (op1->TypeGet() != type)
10978                     {
10979                         // We insert a cast of op1 to 'type'
10980                         op1 = gtNewCastNode(type, op1, type);
10981                     }
10982                     if (op2->TypeGet() != type)
10983                     {
10984                         // We insert a cast of op2 to 'type'
10985                         op2 = gtNewCastNode(type, op2, type);
10986                     }
10987                 }
10988 #endif // !FEATURE_X87_DOUBLES
10989
10990 #if SMALL_TREE_NODES
10991                 if (callNode)
10992                 {
10993                     /* These operators can later be transformed into 'GT_CALL' */
10994
10995                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
10996 #ifndef _TARGET_ARM_
10997                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
10998                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
10999                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11000                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11001 #endif
11002                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11003                     // that we'll need to transform into a general large node, but rather specifically
11004                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11005                     // and a CALL is no longer the largest.
11006                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11007                     // than an "if".
11008                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11009                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11010                 }
11011                 else
11012 #endif // SMALL_TREE_NODES
11013                 {
11014                     op1 = gtNewOperNode(oper, type, op1, op2);
11015                 }
11016
11017                 /* Special case: integer/long division may throw an exception */
11018
11019                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11020                 {
11021                     op1->gtFlags |= GTF_EXCEPT;
11022                 }
11023
11024                 if (ovfl)
11025                 {
11026                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11027                     if (ovflType != TYP_UNKNOWN)
11028                     {
11029                         op1->gtType = ovflType;
11030                     }
11031                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11032                     if (uns)
11033                     {
11034                         op1->gtFlags |= GTF_UNSIGNED;
11035                     }
11036                 }
11037
11038                 impPushOnStack(op1, tiRetVal);
11039                 break;
11040
11041             case CEE_SHL:
11042                 oper = GT_LSH;
11043                 goto CEE_SH_OP2;
11044
11045             case CEE_SHR:
11046                 oper = GT_RSH;
11047                 goto CEE_SH_OP2;
11048             case CEE_SHR_UN:
11049                 oper = GT_RSZ;
11050                 goto CEE_SH_OP2;
11051
11052             CEE_SH_OP2:
11053                 if (tiVerificationNeeded)
11054                 {
11055                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11056                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11057                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11058                     tiRetVal = tiVal;
11059                 }
11060                 op2 = impPopStack().val;
11061                 op1 = impPopStack().val; // operand to be shifted
11062                 impBashVarAddrsToI(op1, op2);
11063
11064                 type = genActualType(op1->TypeGet());
11065                 op1  = gtNewOperNode(oper, type, op1, op2);
11066
11067                 impPushOnStack(op1, tiRetVal);
11068                 break;
11069
11070             case CEE_NOT:
11071                 if (tiVerificationNeeded)
11072                 {
11073                     tiRetVal = impStackTop().seTypeInfo;
11074                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11075                 }
11076
11077                 op1 = impPopStack().val;
11078                 impBashVarAddrsToI(op1, nullptr);
11079                 type = genActualType(op1->TypeGet());
11080                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11081                 break;
11082
11083             case CEE_CKFINITE:
11084                 if (tiVerificationNeeded)
11085                 {
11086                     tiRetVal = impStackTop().seTypeInfo;
11087                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11088                 }
11089                 op1  = impPopStack().val;
11090                 type = op1->TypeGet();
11091                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11092                 op1->gtFlags |= GTF_EXCEPT;
11093
11094                 impPushOnStack(op1, tiRetVal);
11095                 break;
11096
11097             case CEE_LEAVE:
11098
11099                 val     = getI4LittleEndian(codeAddr); // jump distance
11100                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11101                 goto LEAVE;
11102
11103             case CEE_LEAVE_S:
11104                 val     = getI1LittleEndian(codeAddr); // jump distance
11105                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11106
11107             LEAVE:
11108
11109                 if (compIsForInlining())
11110                 {
11111                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11112                     return;
11113                 }
11114
11115                 JITDUMP(" %04X", jmpAddr);
11116                 if (block->bbJumpKind != BBJ_LEAVE)
11117                 {
11118                     impResetLeaveBlock(block, jmpAddr);
11119                 }
11120
11121                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11122                 impImportLeave(block);
11123                 impNoteBranchOffs();
11124
11125                 break;
11126
11127             case CEE_BR:
11128             case CEE_BR_S:
11129                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11130
11131                 if (compIsForInlining() && jmpDist == 0)
11132                 {
11133                     break; /* NOP */
11134                 }
11135
11136                 impNoteBranchOffs();
11137                 break;
11138
11139             case CEE_BRTRUE:
11140             case CEE_BRTRUE_S:
11141             case CEE_BRFALSE:
11142             case CEE_BRFALSE_S:
11143
11144                 /* Pop the comparand (now there's a neat term) from the stack */
11145                 if (tiVerificationNeeded)
11146                 {
11147                     typeInfo& tiVal = impStackTop().seTypeInfo;
11148                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11149                            "bad value");
11150                 }
11151
11152                 op1  = impPopStack().val;
11153                 type = op1->TypeGet();
11154
11155                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11156                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11157                 {
11158                     block->bbJumpKind = BBJ_NONE;
11159
11160                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11161                     {
11162                         op1 = gtUnusedValNode(op1);
11163                         goto SPILL_APPEND;
11164                     }
11165                     else
11166                     {
11167                         break;
11168                     }
11169                 }
11170
11171                 if (op1->OperIsCompare())
11172                 {
11173                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11174                     {
11175                         // Flip the sense of the compare
11176
11177                         op1 = gtReverseCond(op1);
11178                     }
11179                 }
11180                 else
11181                 {
11182                     /* We'll compare against an equally-sized integer 0 */
11183                     /* For small types, we always compare against int   */
11184                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11185
11186                     /* Create the comparison operator and try to fold it */
11187
11188                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11189                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11190                 }
11191
11192             // fall through
11193
11194             COND_JUMP:
11195
11196                 /* Fold comparison if we can */
11197
11198                 op1 = gtFoldExpr(op1);
11199
11200                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11201                 /* Don't make any blocks unreachable in import only mode */
11202
11203                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11204                 {
11205                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11206                        unreachable under compDbgCode */
11207                     assert(!opts.compDbgCode);
11208
11209                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11210                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11211                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11212                                                                          // block for the second time
11213
11214                     block->bbJumpKind = foldedJumpKind;
11215 #ifdef DEBUG
11216                     if (verbose)
11217                     {
11218                         if (op1->gtIntCon.gtIconVal)
11219                         {
11220                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11221                                    block->bbJumpDest->bbNum);
11222                         }
11223                         else
11224                         {
11225                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11226                         }
11227                     }
11228 #endif
11229                     break;
11230                 }
11231
11232                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11233
11234                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11235                    in impImportBlock(block). For correct line numbers, spill stack. */
11236
11237                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11238                 {
11239                     impSpillStackEnsure(true);
11240                 }
11241
11242                 goto SPILL_APPEND;
11243
11244             case CEE_CEQ:
11245                 oper = GT_EQ;
11246                 uns  = false;
11247                 goto CMP_2_OPs;
11248             case CEE_CGT_UN:
11249                 oper = GT_GT;
11250                 uns  = true;
11251                 goto CMP_2_OPs;
11252             case CEE_CGT:
11253                 oper = GT_GT;
11254                 uns  = false;
11255                 goto CMP_2_OPs;
11256             case CEE_CLT_UN:
11257                 oper = GT_LT;
11258                 uns  = true;
11259                 goto CMP_2_OPs;
11260             case CEE_CLT:
11261                 oper = GT_LT;
11262                 uns  = false;
11263                 goto CMP_2_OPs;
11264
11265             CMP_2_OPs:
11266                 if (tiVerificationNeeded)
11267                 {
11268                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11269                     tiRetVal = typeInfo(TI_INT);
11270                 }
11271
11272                 op2 = impPopStack().val;
11273                 op1 = impPopStack().val;
11274
11275 #ifdef _TARGET_64BIT_
11276                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11277                 {
11278                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11279                 }
11280                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11281                 {
11282                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11283                 }
11284 #endif // _TARGET_64BIT_
11285
11286                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11287                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11288                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11289
11290                 /* Create the comparison node */
11291
11292                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11293
11294                 /* TODO: setting both flags when only one is appropriate */
11295                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11296                 {
11297                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11298                 }
11299
11300                 impPushOnStack(op1, tiRetVal);
11301                 break;
11302
11303             case CEE_BEQ_S:
11304             case CEE_BEQ:
11305                 oper = GT_EQ;
11306                 goto CMP_2_OPs_AND_BR;
11307
11308             case CEE_BGE_S:
11309             case CEE_BGE:
11310                 oper = GT_GE;
11311                 goto CMP_2_OPs_AND_BR;
11312
11313             case CEE_BGE_UN_S:
11314             case CEE_BGE_UN:
11315                 oper = GT_GE;
11316                 goto CMP_2_OPs_AND_BR_UN;
11317
11318             case CEE_BGT_S:
11319             case CEE_BGT:
11320                 oper = GT_GT;
11321                 goto CMP_2_OPs_AND_BR;
11322
11323             case CEE_BGT_UN_S:
11324             case CEE_BGT_UN:
11325                 oper = GT_GT;
11326                 goto CMP_2_OPs_AND_BR_UN;
11327
11328             case CEE_BLE_S:
11329             case CEE_BLE:
11330                 oper = GT_LE;
11331                 goto CMP_2_OPs_AND_BR;
11332
11333             case CEE_BLE_UN_S:
11334             case CEE_BLE_UN:
11335                 oper = GT_LE;
11336                 goto CMP_2_OPs_AND_BR_UN;
11337
11338             case CEE_BLT_S:
11339             case CEE_BLT:
11340                 oper = GT_LT;
11341                 goto CMP_2_OPs_AND_BR;
11342
11343             case CEE_BLT_UN_S:
11344             case CEE_BLT_UN:
11345                 oper = GT_LT;
11346                 goto CMP_2_OPs_AND_BR_UN;
11347
11348             case CEE_BNE_UN_S:
11349             case CEE_BNE_UN:
11350                 oper = GT_NE;
11351                 goto CMP_2_OPs_AND_BR_UN;
11352
11353             CMP_2_OPs_AND_BR_UN:
11354                 uns       = true;
11355                 unordered = true;
11356                 goto CMP_2_OPs_AND_BR_ALL;
11357             CMP_2_OPs_AND_BR:
11358                 uns       = false;
11359                 unordered = false;
11360                 goto CMP_2_OPs_AND_BR_ALL;
11361             CMP_2_OPs_AND_BR_ALL:
11362
11363                 if (tiVerificationNeeded)
11364                 {
11365                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11366                 }
11367
11368                 /* Pull two values */
11369                 op2 = impPopStack().val;
11370                 op1 = impPopStack().val;
11371
11372 #ifdef _TARGET_64BIT_
11373                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11374                 {
11375                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11376                 }
11377                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11378                 {
11379                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11380                 }
11381 #endif // _TARGET_64BIT_
11382
11383                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11384                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11385                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11386
11387                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11388                 {
11389                     block->bbJumpKind = BBJ_NONE;
11390
11391                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11392                     {
11393                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11394                                                        "Branch to next Optimization, op1 side effect"));
11395                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11396                     }
11397                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11398                     {
11399                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11400                                                        "Branch to next Optimization, op2 side effect"));
11401                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11402                     }
11403
11404 #ifdef DEBUG
11405                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11406                     {
11407                         impNoteLastILoffs();
11408                     }
11409 #endif
11410                     break;
11411                 }
11412 #if !FEATURE_X87_DOUBLES
11413                 // We can generate an compare of different sized floating point op1 and op2
11414                 // We insert a cast
11415                 //
11416                 if (varTypeIsFloating(op1->TypeGet()))
11417                 {
11418                     if (op1->TypeGet() != op2->TypeGet())
11419                     {
11420                         assert(varTypeIsFloating(op2->TypeGet()));
11421
11422                         // say op1=double, op2=float. To avoid loss of precision
11423                         // while comparing, op2 is converted to double and double
11424                         // comparison is done.
11425                         if (op1->TypeGet() == TYP_DOUBLE)
11426                         {
11427                             // We insert a cast of op2 to TYP_DOUBLE
11428                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11429                         }
11430                         else if (op2->TypeGet() == TYP_DOUBLE)
11431                         {
11432                             // We insert a cast of op1 to TYP_DOUBLE
11433                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11434                         }
11435                     }
11436                 }
11437 #endif // !FEATURE_X87_DOUBLES
11438
11439                 /* Create and append the operator */
11440
11441                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11442
11443                 if (uns)
11444                 {
11445                     op1->gtFlags |= GTF_UNSIGNED;
11446                 }
11447
11448                 if (unordered)
11449                 {
11450                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11451                 }
11452
11453                 goto COND_JUMP;
11454
11455             case CEE_SWITCH:
11456                 assert(!compIsForInlining());
11457
11458                 if (tiVerificationNeeded)
11459                 {
11460                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11461                 }
11462                 /* Pop the switch value off the stack */
11463                 op1 = impPopStack().val;
11464                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11465
11466 #ifdef _TARGET_64BIT_
11467                 // Widen 'op1' on 64-bit targets
11468                 if (op1->TypeGet() != TYP_I_IMPL)
11469                 {
11470                     if (op1->OperGet() == GT_CNS_INT)
11471                     {
11472                         op1->gtType = TYP_I_IMPL;
11473                     }
11474                     else
11475                     {
11476                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11477                     }
11478                 }
11479 #endif // _TARGET_64BIT_
11480                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11481
11482                 /* We can create a switch node */
11483
11484                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11485
11486                 val = (int)getU4LittleEndian(codeAddr);
11487                 codeAddr += 4 + val * 4; // skip over the switch-table
11488
11489                 goto SPILL_APPEND;
11490
11491             /************************** Casting OPCODES ***************************/
11492
11493             case CEE_CONV_OVF_I1:
11494                 lclTyp = TYP_BYTE;
11495                 goto CONV_OVF;
11496             case CEE_CONV_OVF_I2:
11497                 lclTyp = TYP_SHORT;
11498                 goto CONV_OVF;
11499             case CEE_CONV_OVF_I:
11500                 lclTyp = TYP_I_IMPL;
11501                 goto CONV_OVF;
11502             case CEE_CONV_OVF_I4:
11503                 lclTyp = TYP_INT;
11504                 goto CONV_OVF;
11505             case CEE_CONV_OVF_I8:
11506                 lclTyp = TYP_LONG;
11507                 goto CONV_OVF;
11508
11509             case CEE_CONV_OVF_U1:
11510                 lclTyp = TYP_UBYTE;
11511                 goto CONV_OVF;
11512             case CEE_CONV_OVF_U2:
11513                 lclTyp = TYP_CHAR;
11514                 goto CONV_OVF;
11515             case CEE_CONV_OVF_U:
11516                 lclTyp = TYP_U_IMPL;
11517                 goto CONV_OVF;
11518             case CEE_CONV_OVF_U4:
11519                 lclTyp = TYP_UINT;
11520                 goto CONV_OVF;
11521             case CEE_CONV_OVF_U8:
11522                 lclTyp = TYP_ULONG;
11523                 goto CONV_OVF;
11524
11525             case CEE_CONV_OVF_I1_UN:
11526                 lclTyp = TYP_BYTE;
11527                 goto CONV_OVF_UN;
11528             case CEE_CONV_OVF_I2_UN:
11529                 lclTyp = TYP_SHORT;
11530                 goto CONV_OVF_UN;
11531             case CEE_CONV_OVF_I_UN:
11532                 lclTyp = TYP_I_IMPL;
11533                 goto CONV_OVF_UN;
11534             case CEE_CONV_OVF_I4_UN:
11535                 lclTyp = TYP_INT;
11536                 goto CONV_OVF_UN;
11537             case CEE_CONV_OVF_I8_UN:
11538                 lclTyp = TYP_LONG;
11539                 goto CONV_OVF_UN;
11540
11541             case CEE_CONV_OVF_U1_UN:
11542                 lclTyp = TYP_UBYTE;
11543                 goto CONV_OVF_UN;
11544             case CEE_CONV_OVF_U2_UN:
11545                 lclTyp = TYP_CHAR;
11546                 goto CONV_OVF_UN;
11547             case CEE_CONV_OVF_U_UN:
11548                 lclTyp = TYP_U_IMPL;
11549                 goto CONV_OVF_UN;
11550             case CEE_CONV_OVF_U4_UN:
11551                 lclTyp = TYP_UINT;
11552                 goto CONV_OVF_UN;
11553             case CEE_CONV_OVF_U8_UN:
11554                 lclTyp = TYP_ULONG;
11555                 goto CONV_OVF_UN;
11556
11557             CONV_OVF_UN:
11558                 uns = true;
11559                 goto CONV_OVF_COMMON;
11560             CONV_OVF:
11561                 uns = false;
11562                 goto CONV_OVF_COMMON;
11563
11564             CONV_OVF_COMMON:
11565                 ovfl = true;
11566                 goto _CONV;
11567
11568             case CEE_CONV_I1:
11569                 lclTyp = TYP_BYTE;
11570                 goto CONV;
11571             case CEE_CONV_I2:
11572                 lclTyp = TYP_SHORT;
11573                 goto CONV;
11574             case CEE_CONV_I:
11575                 lclTyp = TYP_I_IMPL;
11576                 goto CONV;
11577             case CEE_CONV_I4:
11578                 lclTyp = TYP_INT;
11579                 goto CONV;
11580             case CEE_CONV_I8:
11581                 lclTyp = TYP_LONG;
11582                 goto CONV;
11583
11584             case CEE_CONV_U1:
11585                 lclTyp = TYP_UBYTE;
11586                 goto CONV;
11587             case CEE_CONV_U2:
11588                 lclTyp = TYP_CHAR;
11589                 goto CONV;
11590 #if (REGSIZE_BYTES == 8)
11591             case CEE_CONV_U:
11592                 lclTyp = TYP_U_IMPL;
11593                 goto CONV_UN;
11594 #else
11595             case CEE_CONV_U:
11596                 lclTyp = TYP_U_IMPL;
11597                 goto CONV;
11598 #endif
11599             case CEE_CONV_U4:
11600                 lclTyp = TYP_UINT;
11601                 goto CONV;
11602             case CEE_CONV_U8:
11603                 lclTyp = TYP_ULONG;
11604                 goto CONV_UN;
11605
11606             case CEE_CONV_R4:
11607                 lclTyp = TYP_FLOAT;
11608                 goto CONV;
11609             case CEE_CONV_R8:
11610                 lclTyp = TYP_DOUBLE;
11611                 goto CONV;
11612
11613             case CEE_CONV_R_UN:
11614                 lclTyp = TYP_DOUBLE;
11615                 goto CONV_UN;
11616
11617             CONV_UN:
11618                 uns  = true;
11619                 ovfl = false;
11620                 goto _CONV;
11621
11622             CONV:
11623                 uns  = false;
11624                 ovfl = false;
11625                 goto _CONV;
11626
11627             _CONV:
11628                 // just check that we have a number on the stack
11629                 if (tiVerificationNeeded)
11630                 {
11631                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11632                     Verify(tiVal.IsNumberType(), "bad arg");
11633
11634 #ifdef _TARGET_64BIT_
11635                     bool isNative = false;
11636
11637                     switch (opcode)
11638                     {
11639                         case CEE_CONV_OVF_I:
11640                         case CEE_CONV_OVF_I_UN:
11641                         case CEE_CONV_I:
11642                         case CEE_CONV_OVF_U:
11643                         case CEE_CONV_OVF_U_UN:
11644                         case CEE_CONV_U:
11645                             isNative = true;
11646                         default:
11647                             // leave 'isNative' = false;
11648                             break;
11649                     }
11650                     if (isNative)
11651                     {
11652                         tiRetVal = typeInfo::nativeInt();
11653                     }
11654                     else
11655 #endif // _TARGET_64BIT_
11656                     {
11657                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11658                     }
11659                 }
11660
11661                 // only converts from FLOAT or DOUBLE to an integer type
11662                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11663
11664                 if (varTypeIsFloating(lclTyp))
11665                 {
11666                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11667 #ifdef _TARGET_64BIT_
11668                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11669                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11670                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11671                                // and generate SSE2 code instead of going through helper calls.
11672                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11673 #endif
11674                         ;
11675                 }
11676                 else
11677                 {
11678                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11679                 }
11680
11681                 // At this point uns, ovf, callNode all set
11682
11683                 op1 = impPopStack().val;
11684                 impBashVarAddrsToI(op1);
11685
11686                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11687                 {
11688                     op2 = op1->gtOp.gtOp2;
11689
11690                     if (op2->gtOper == GT_CNS_INT)
11691                     {
11692                         ssize_t ival = op2->gtIntCon.gtIconVal;
11693                         ssize_t mask, umask;
11694
11695                         switch (lclTyp)
11696                         {
11697                             case TYP_BYTE:
11698                             case TYP_UBYTE:
11699                                 mask  = 0x00FF;
11700                                 umask = 0x007F;
11701                                 break;
11702                             case TYP_CHAR:
11703                             case TYP_SHORT:
11704                                 mask  = 0xFFFF;
11705                                 umask = 0x7FFF;
11706                                 break;
11707
11708                             default:
11709                                 assert(!"unexpected type");
11710                                 return;
11711                         }
11712
11713                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11714                         {
11715                             /* Toss the cast, it's a waste of time */
11716
11717                             impPushOnStack(op1, tiRetVal);
11718                             break;
11719                         }
11720                         else if (ival == mask)
11721                         {
11722                             /* Toss the masking, it's a waste of time, since
11723                                we sign-extend from the small value anyways */
11724
11725                             op1 = op1->gtOp.gtOp1;
11726                         }
11727                     }
11728                 }
11729
11730                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11731                     since the result of a cast to one of the 'small' integer
11732                     types is an integer.
11733                  */
11734
11735                 type = genActualType(lclTyp);
11736
11737 #if SMALL_TREE_NODES
11738                 if (callNode)
11739                 {
11740                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11741                 }
11742                 else
11743 #endif // SMALL_TREE_NODES
11744                 {
11745                     op1 = gtNewCastNode(type, op1, lclTyp);
11746                 }
11747
11748                 if (ovfl)
11749                 {
11750                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11751                 }
11752                 if (uns)
11753                 {
11754                     op1->gtFlags |= GTF_UNSIGNED;
11755                 }
11756                 impPushOnStack(op1, tiRetVal);
11757                 break;
11758
11759             case CEE_NEG:
11760                 if (tiVerificationNeeded)
11761                 {
11762                     tiRetVal = impStackTop().seTypeInfo;
11763                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11764                 }
11765
11766                 op1 = impPopStack().val;
11767                 impBashVarAddrsToI(op1, nullptr);
11768                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11769                 break;
11770
11771             case CEE_POP:
11772                 if (tiVerificationNeeded)
11773                 {
11774                     impStackTop(0);
11775                 }
11776
11777                 /* Pull the top value from the stack */
11778
11779                 op1 = impPopStack(clsHnd).val;
11780
11781                 /* Get hold of the type of the value being duplicated */
11782
11783                 lclTyp = genActualType(op1->gtType);
11784
11785                 /* Does the value have any side effects? */
11786
11787                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11788                 {
11789                     // Since we are throwing away the value, just normalize
11790                     // it to its address.  This is more efficient.
11791
11792                     if (varTypeIsStruct(op1))
11793                     {
11794 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11795                         // Non-calls, such as obj or ret_expr, have to go through this.
11796                         // Calls with large struct return value have to go through this.
11797                         // Helper calls with small struct return value also have to go
11798                         // through this since they do not follow Unix calling convention.
11799                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11800                             op1->AsCall()->gtCallType == CT_HELPER)
11801 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11802                         {
11803                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11804                         }
11805                     }
11806
11807                     // If op1 is non-overflow cast, throw it away since it is useless.
11808                     // Another reason for throwing away the useless cast is in the context of
11809                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11810                     // The cast gets added as part of importing GT_CALL, which gets in the way
11811                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11812                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11813                     {
11814                         op1 = op1->gtOp.gtOp1;
11815                     }
11816
11817                     // If 'op1' is an expression, create an assignment node.
11818                     // Helps analyses (like CSE) to work fine.
11819
11820                     if (op1->gtOper != GT_CALL)
11821                     {
11822                         op1 = gtUnusedValNode(op1);
11823                     }
11824
11825                     /* Append the value to the tree list */
11826                     goto SPILL_APPEND;
11827                 }
11828
11829                 /* No side effects - just throw the <BEEP> thing away */
11830                 break;
11831
11832             case CEE_DUP:
11833
11834                 if (tiVerificationNeeded)
11835                 {
11836                     // Dup could start the begining of delegate creation sequence, remember that
11837                     delegateCreateStart = codeAddr - 1;
11838                     impStackTop(0);
11839                 }
11840
11841                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11842                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11843                 //   This helps eliminate a redundant bounds check in cases such as:
11844                 //       ariba[i+3] += some_value;
11845                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11846
11847                 if (codeAddr < codeEndp)
11848                 {
11849                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11850                     if (impIsAnySTLOC(nextOpcode))
11851                     {
11852                         if (!opts.compDbgCode)
11853                         {
11854                             insertLdloc = true;
11855                             break;
11856                         }
11857                         GenTree* stackTop = impStackTop().val;
11858                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11859                         {
11860                             insertLdloc = true;
11861                             break;
11862                         }
11863                     }
11864                 }
11865
11866                 /* Pull the top value from the stack */
11867                 op1 = impPopStack(tiRetVal);
11868
11869                 /* Clone the value */
11870                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11871                                    nullptr DEBUGARG("DUP instruction"));
11872
11873                 /* Either the tree started with no global effects, or impCloneExpr
11874                    evaluated the tree to a temp and returned two copies of that
11875                    temp. Either way, neither op1 nor op2 should have side effects.
11876                 */
11877                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11878
11879                 /* Push the tree/temp back on the stack */
11880                 impPushOnStack(op1, tiRetVal);
11881
11882                 /* Push the copy on the stack */
11883                 impPushOnStack(op2, tiRetVal);
11884
11885                 break;
11886
11887             case CEE_STIND_I1:
11888                 lclTyp = TYP_BYTE;
11889                 goto STIND;
11890             case CEE_STIND_I2:
11891                 lclTyp = TYP_SHORT;
11892                 goto STIND;
11893             case CEE_STIND_I4:
11894                 lclTyp = TYP_INT;
11895                 goto STIND;
11896             case CEE_STIND_I8:
11897                 lclTyp = TYP_LONG;
11898                 goto STIND;
11899             case CEE_STIND_I:
11900                 lclTyp = TYP_I_IMPL;
11901                 goto STIND;
11902             case CEE_STIND_REF:
11903                 lclTyp = TYP_REF;
11904                 goto STIND;
11905             case CEE_STIND_R4:
11906                 lclTyp = TYP_FLOAT;
11907                 goto STIND;
11908             case CEE_STIND_R8:
11909                 lclTyp = TYP_DOUBLE;
11910                 goto STIND;
11911             STIND:
11912
11913                 if (tiVerificationNeeded)
11914                 {
11915                     typeInfo instrType(lclTyp);
11916 #ifdef _TARGET_64BIT_
11917                     if (opcode == CEE_STIND_I)
11918                     {
11919                         instrType = typeInfo::nativeInt();
11920                     }
11921 #endif // _TARGET_64BIT_
11922                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11923                 }
11924                 else
11925                 {
11926                     compUnsafeCastUsed = true; // Have to go conservative
11927                 }
11928
11929             STIND_POST_VERIFY:
11930
11931                 op2 = impPopStack().val; // value to store
11932                 op1 = impPopStack().val; // address to store to
11933
11934                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11935                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11936
11937                 impBashVarAddrsToI(op1, op2);
11938
11939                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11940
11941 #ifdef _TARGET_64BIT_
11942                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11943                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11944                 {
11945                     op2->gtType = TYP_I_IMPL;
11946                 }
11947                 else
11948                 {
11949                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11950                     //
11951                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
11952                     {
11953                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11954                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
11955                     }
11956                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
11957                     //
11958                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
11959                     {
11960                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11961                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
11962                     }
11963                 }
11964 #endif // _TARGET_64BIT_
11965
11966                 if (opcode == CEE_STIND_REF)
11967                 {
11968                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
11969                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
11970                     lclTyp = genActualType(op2->TypeGet());
11971                 }
11972
11973 // Check target type.
11974 #ifdef DEBUG
11975                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
11976                 {
11977                     if (op2->gtType == TYP_BYREF)
11978                     {
11979                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
11980                     }
11981                     else if (lclTyp == TYP_BYREF)
11982                     {
11983                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
11984                     }
11985                 }
11986                 else
11987                 {
11988                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
11989                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
11990                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
11991                 }
11992 #endif
11993
11994                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
11995
11996                 // stind could point anywhere, example a boxed class static int
11997                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
11998
11999                 if (prefixFlags & PREFIX_VOLATILE)
12000                 {
12001                     assert(op1->OperGet() == GT_IND);
12002                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12003                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12004                     op1->gtFlags |= GTF_IND_VOLATILE;
12005                 }
12006
12007                 if (prefixFlags & PREFIX_UNALIGNED)
12008                 {
12009                     assert(op1->OperGet() == GT_IND);
12010                     op1->gtFlags |= GTF_IND_UNALIGNED;
12011                 }
12012
12013                 op1 = gtNewAssignNode(op1, op2);
12014                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12015
12016                 // Spill side-effects AND global-data-accesses
12017                 if (verCurrentState.esStackDepth > 0)
12018                 {
12019                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12020                 }
12021
12022                 goto APPEND;
12023
12024             case CEE_LDIND_I1:
12025                 lclTyp = TYP_BYTE;
12026                 goto LDIND;
12027             case CEE_LDIND_I2:
12028                 lclTyp = TYP_SHORT;
12029                 goto LDIND;
12030             case CEE_LDIND_U4:
12031             case CEE_LDIND_I4:
12032                 lclTyp = TYP_INT;
12033                 goto LDIND;
12034             case CEE_LDIND_I8:
12035                 lclTyp = TYP_LONG;
12036                 goto LDIND;
12037             case CEE_LDIND_REF:
12038                 lclTyp = TYP_REF;
12039                 goto LDIND;
12040             case CEE_LDIND_I:
12041                 lclTyp = TYP_I_IMPL;
12042                 goto LDIND;
12043             case CEE_LDIND_R4:
12044                 lclTyp = TYP_FLOAT;
12045                 goto LDIND;
12046             case CEE_LDIND_R8:
12047                 lclTyp = TYP_DOUBLE;
12048                 goto LDIND;
12049             case CEE_LDIND_U1:
12050                 lclTyp = TYP_UBYTE;
12051                 goto LDIND;
12052             case CEE_LDIND_U2:
12053                 lclTyp = TYP_CHAR;
12054                 goto LDIND;
12055             LDIND:
12056
12057                 if (tiVerificationNeeded)
12058                 {
12059                     typeInfo lclTiType(lclTyp);
12060 #ifdef _TARGET_64BIT_
12061                     if (opcode == CEE_LDIND_I)
12062                     {
12063                         lclTiType = typeInfo::nativeInt();
12064                     }
12065 #endif // _TARGET_64BIT_
12066                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12067                     tiRetVal.NormaliseForStack();
12068                 }
12069                 else
12070                 {
12071                     compUnsafeCastUsed = true; // Have to go conservative
12072                 }
12073
12074             LDIND_POST_VERIFY:
12075
12076                 op1 = impPopStack().val; // address to load from
12077                 impBashVarAddrsToI(op1);
12078
12079 #ifdef _TARGET_64BIT_
12080                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12081                 //
12082                 if (genActualType(op1->gtType) == TYP_INT)
12083                 {
12084                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12085                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12086                 }
12087 #endif
12088
12089                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12090
12091                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12092
12093                 // ldind could point anywhere, example a boxed class static int
12094                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12095
12096                 if (prefixFlags & PREFIX_VOLATILE)
12097                 {
12098                     assert(op1->OperGet() == GT_IND);
12099                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12100                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12101                     op1->gtFlags |= GTF_IND_VOLATILE;
12102                 }
12103
12104                 if (prefixFlags & PREFIX_UNALIGNED)
12105                 {
12106                     assert(op1->OperGet() == GT_IND);
12107                     op1->gtFlags |= GTF_IND_UNALIGNED;
12108                 }
12109
12110                 impPushOnStack(op1, tiRetVal);
12111
12112                 break;
12113
12114             case CEE_UNALIGNED:
12115
12116                 assert(sz == 1);
12117                 val = getU1LittleEndian(codeAddr);
12118                 ++codeAddr;
12119                 JITDUMP(" %u", val);
12120                 if ((val != 1) && (val != 2) && (val != 4))
12121                 {
12122                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12123                 }
12124
12125                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12126                 prefixFlags |= PREFIX_UNALIGNED;
12127
12128                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12129
12130             PREFIX:
12131                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12132                 codeAddr += sizeof(__int8);
12133                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12134                 goto DECODE_OPCODE;
12135
12136             case CEE_VOLATILE:
12137
12138                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12139                 prefixFlags |= PREFIX_VOLATILE;
12140
12141                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12142
12143                 assert(sz == 0);
12144                 goto PREFIX;
12145
12146             case CEE_LDFTN:
12147             {
12148                 // Need to do a lookup here so that we perform an access check
12149                 // and do a NOWAY if protections are violated
12150                 _impResolveToken(CORINFO_TOKENKIND_Method);
12151
12152                 JITDUMP(" %08X", resolvedToken.token);
12153
12154                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12155                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12156                               &callInfo);
12157
12158                 // This check really only applies to intrinsic Array.Address methods
12159                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12160                 {
12161                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12162                 }
12163
12164                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12165                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12166
12167                 if (tiVerificationNeeded)
12168                 {
12169                     // LDFTN could start the begining of delegate creation sequence, remember that
12170                     delegateCreateStart = codeAddr - 2;
12171
12172                     // check any constraints on the callee's class and type parameters
12173                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12174                                    "method has unsatisfied class constraints");
12175                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12176                                                                                 resolvedToken.hMethod),
12177                                    "method has unsatisfied method constraints");
12178
12179                     mflags = callInfo.verMethodFlags;
12180                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12181                 }
12182
12183             DO_LDFTN:
12184                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12185                 if (compDonotInline())
12186                 {
12187                     return;
12188                 }
12189
12190                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12191
12192                 break;
12193             }
12194
12195             case CEE_LDVIRTFTN:
12196             {
12197                 /* Get the method token */
12198
12199                 _impResolveToken(CORINFO_TOKENKIND_Method);
12200
12201                 JITDUMP(" %08X", resolvedToken.token);
12202
12203                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12204                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12205                                                     CORINFO_CALLINFO_CALLVIRT)),
12206                               &callInfo);
12207
12208                 // This check really only applies to intrinsic Array.Address methods
12209                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12210                 {
12211                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12212                 }
12213
12214                 mflags = callInfo.methodFlags;
12215
12216                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12217
12218                 if (compIsForInlining())
12219                 {
12220                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12221                     {
12222                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12223                         return;
12224                     }
12225                 }
12226
12227                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12228
12229                 if (tiVerificationNeeded)
12230                 {
12231
12232                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12233                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12234
12235                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12236                     typeInfo declType =
12237                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12238
12239                     typeInfo arg = impStackTop().seTypeInfo;
12240                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12241                            "bad ldvirtftn");
12242
12243                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12244                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12245                     {
12246                         instanceClassHnd = arg.GetClassHandleForObjRef();
12247                     }
12248
12249                     // check any constraints on the method's class and type parameters
12250                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12251                                    "method has unsatisfied class constraints");
12252                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12253                                                                                 resolvedToken.hMethod),
12254                                    "method has unsatisfied method constraints");
12255
12256                     if (mflags & CORINFO_FLG_PROTECTED)
12257                     {
12258                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12259                                "Accessing protected method through wrong type.");
12260                     }
12261                 }
12262
12263                 /* Get the object-ref */
12264                 op1 = impPopStack().val;
12265                 assertImp(op1->gtType == TYP_REF);
12266
12267                 if (opts.IsReadyToRun())
12268                 {
12269                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12270                     {
12271                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12272                         {
12273                             op1 = gtUnusedValNode(op1);
12274                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12275                         }
12276                         goto DO_LDFTN;
12277                     }
12278                 }
12279                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12280                 {
12281                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12282                     {
12283                         op1 = gtUnusedValNode(op1);
12284                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12285                     }
12286                     goto DO_LDFTN;
12287                 }
12288
12289                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12290                 if (compDonotInline())
12291                 {
12292                     return;
12293                 }
12294
12295                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12296
12297                 break;
12298             }
12299
12300             case CEE_CONSTRAINED:
12301
12302                 assertImp(sz == sizeof(unsigned));
12303                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12304                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12305                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12306
12307                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12308                 prefixFlags |= PREFIX_CONSTRAINED;
12309
12310                 {
12311                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12312                     if (actualOpcode != CEE_CALLVIRT)
12313                     {
12314                         BADCODE("constrained. has to be followed by callvirt");
12315                     }
12316                 }
12317
12318                 goto PREFIX;
12319
12320             case CEE_READONLY:
12321                 JITDUMP(" readonly.");
12322
12323                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12324                 prefixFlags |= PREFIX_READONLY;
12325
12326                 {
12327                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12328                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12329                     {
12330                         BADCODE("readonly. has to be followed by ldelema or call");
12331                     }
12332                 }
12333
12334                 assert(sz == 0);
12335                 goto PREFIX;
12336
12337             case CEE_TAILCALL:
12338                 JITDUMP(" tail.");
12339
12340                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12341                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12342
12343                 {
12344                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12345                     if (!impOpcodeIsCallOpcode(actualOpcode))
12346                     {
12347                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12348                     }
12349                 }
12350                 assert(sz == 0);
12351                 goto PREFIX;
12352
12353             case CEE_NEWOBJ:
12354
12355                 /* Since we will implicitly insert newObjThisPtr at the start of the
12356                    argument list, spill any GTF_ORDER_SIDEEFF */
12357                 impSpillSpecialSideEff();
12358
12359                 /* NEWOBJ does not respond to TAIL */
12360                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12361
12362                 /* NEWOBJ does not respond to CONSTRAINED */
12363                 prefixFlags &= ~PREFIX_CONSTRAINED;
12364
12365 #if COR_JIT_EE_VERSION > 460
12366                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12367 #else
12368                 _impResolveToken(CORINFO_TOKENKIND_Method);
12369 #endif
12370
12371                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12372                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12373                               &callInfo);
12374
12375                 if (compIsForInlining())
12376                 {
12377                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12378                     {
12379                         // Check to see if this call violates the boundary.
12380                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12381                         return;
12382                     }
12383                 }
12384
12385                 mflags = callInfo.methodFlags;
12386
12387                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12388                 {
12389                     BADCODE("newobj on static or abstract method");
12390                 }
12391
12392                 // Insert the security callout before any actual code is generated
12393                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12394
12395                 // There are three different cases for new
12396                 // Object size is variable (depends on arguments)
12397                 //      1) Object is an array (arrays treated specially by the EE)
12398                 //      2) Object is some other variable sized object (e.g. String)
12399                 //      3) Class Size can be determined beforehand (normal case)
12400                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12401                 // in the second case we call the constructor with a '0' this pointer
12402                 // In the third case we alloc the memory, then call the constuctor
12403
12404                 clsFlags = callInfo.classFlags;
12405                 if (clsFlags & CORINFO_FLG_ARRAY)
12406                 {
12407                     if (tiVerificationNeeded)
12408                     {
12409                         CORINFO_CLASS_HANDLE elemTypeHnd;
12410                         INDEBUG(CorInfoType corType =)
12411                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12412                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12413                         Verify(elemTypeHnd == nullptr ||
12414                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12415                                "newarr of byref-like objects");
12416                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12417                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12418                                       &callInfo DEBUGARG(info.compFullName));
12419                     }
12420                     // Arrays need to call the NEWOBJ helper.
12421                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12422
12423                     impImportNewObjArray(&resolvedToken, &callInfo);
12424                     if (compDonotInline())
12425                     {
12426                         return;
12427                     }
12428
12429                     callTyp = TYP_REF;
12430                     break;
12431                 }
12432                 // At present this can only be String
12433                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12434                 {
12435                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12436                     {
12437                         // The dummy argument does not exist in CoreRT
12438                         newObjThisPtr = nullptr;
12439                     }
12440                     else
12441                     {
12442                         // This is the case for variable-sized objects that are not
12443                         // arrays.  In this case, call the constructor with a null 'this'
12444                         // pointer
12445                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12446                     }
12447
12448                     /* Remember that this basic block contains 'new' of an object */
12449                     block->bbFlags |= BBF_HAS_NEWOBJ;
12450                     optMethodFlags |= OMF_HAS_NEWOBJ;
12451                 }
12452                 else
12453                 {
12454                     // This is the normal case where the size of the object is
12455                     // fixed.  Allocate the memory and call the constructor.
12456
12457                     // Note: We cannot add a peep to avoid use of temp here
12458                     // becase we don't have enough interference info to detect when
12459                     // sources and destination interfere, example: s = new S(ref);
12460
12461                     // TODO: We find the correct place to introduce a general
12462                     // reverse copy prop for struct return values from newobj or
12463                     // any function returning structs.
12464
12465                     /* get a temporary for the new object */
12466                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12467
12468                     // In the value class case we only need clsHnd for size calcs.
12469                     //
12470                     // The lookup of the code pointer will be handled by CALL in this case
12471                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12472                     {
12473                         if (compIsForInlining())
12474                         {
12475                             // If value class has GC fields, inform the inliner. It may choose to
12476                             // bail out on the inline.
12477                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12478                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12479                             {
12480                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12481                                 if (compInlineResult->IsFailure())
12482                                 {
12483                                     return;
12484                                 }
12485
12486                                 // Do further notification in the case where the call site is rare;
12487                                 // some policies do not track the relative hotness of call sites for
12488                                 // "always" inline cases.
12489                                 if (impInlineInfo->iciBlock->isRunRarely())
12490                                 {
12491                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12492                                     if (compInlineResult->IsFailure())
12493                                     {
12494                                         return;
12495                                     }
12496                                 }
12497                             }
12498                         }
12499
12500                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12501                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12502
12503                         if (impIsPrimitive(jitTyp))
12504                         {
12505                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12506                         }
12507                         else
12508                         {
12509                             // The local variable itself is the allocated space.
12510                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12511                             // and potentially exploitable.
12512                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12513                         }
12514
12515                         // Append a tree to zero-out the temp
12516                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12517
12518                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12519                                                        gtNewIconNode(0), // Value
12520                                                        size,             // Size
12521                                                        false,            // isVolatile
12522                                                        false);           // not copyBlock
12523                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12524
12525                         // Obtain the address of the temp
12526                         newObjThisPtr =
12527                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12528                     }
12529                     else
12530                     {
12531 #ifdef FEATURE_READYTORUN_COMPILER
12532                         if (opts.IsReadyToRun())
12533                         {
12534                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12535                             usingReadyToRunHelper = (op1 != nullptr);
12536                         }
12537
12538                         if (!usingReadyToRunHelper)
12539 #endif
12540                         {
12541                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12542                             if (op1 == nullptr)
12543                             { // compDonotInline()
12544                                 return;
12545                             }
12546
12547                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12548                             // and the newfast call with a single call to a dynamic R2R cell that will:
12549                             //      1) Load the context
12550                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12551                             //      stub
12552                             //      3) Allocate and return the new object
12553                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12554
12555                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12556                                                     resolvedToken.hClass, TYP_REF, op1);
12557                         }
12558
12559                         // Remember that this basic block contains 'new' of an object
12560                         block->bbFlags |= BBF_HAS_NEWOBJ;
12561                         optMethodFlags |= OMF_HAS_NEWOBJ;
12562
12563                         // Append the assignment to the temp/local. Dont need to spill
12564                         // at all as we are just calling an EE-Jit helper which can only
12565                         // cause an (async) OutOfMemoryException.
12566
12567                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12568                         // to a temp. Note that the pattern "temp = allocObj" is required
12569                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12570                         // without exhaustive walk over all expressions.
12571
12572                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12573
12574                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12575                     }
12576                 }
12577                 goto CALL;
12578
12579             case CEE_CALLI:
12580
12581                 /* CALLI does not respond to CONSTRAINED */
12582                 prefixFlags &= ~PREFIX_CONSTRAINED;
12583
12584                 if (compIsForInlining())
12585                 {
12586                     // CALLI doesn't have a method handle, so assume the worst.
12587                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12588                     {
12589                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12590                         return;
12591                     }
12592                 }
12593
12594             // fall through
12595
12596             case CEE_CALLVIRT:
12597             case CEE_CALL:
12598
12599                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12600                 // many other places.  We unfortunately embed that knowledge here.
12601                 if (opcode != CEE_CALLI)
12602                 {
12603                     _impResolveToken(CORINFO_TOKENKIND_Method);
12604
12605                     eeGetCallInfo(&resolvedToken,
12606                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12607                                   // this is how impImportCall invokes getCallInfo
12608                                   addVerifyFlag(
12609                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12610                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12611                                                                        : CORINFO_CALLINFO_NONE)),
12612                                   &callInfo);
12613                 }
12614                 else
12615                 {
12616                     // Suppress uninitialized use warning.
12617                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12618                     memset(&callInfo, 0, sizeof(callInfo));
12619
12620                     resolvedToken.token = getU4LittleEndian(codeAddr);
12621                 }
12622
12623             CALL: // memberRef should be set.
12624                 // newObjThisPtr should be set for CEE_NEWOBJ
12625
12626                 JITDUMP(" %08X", resolvedToken.token);
12627                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12628
12629                 bool newBBcreatedForTailcallStress;
12630
12631                 newBBcreatedForTailcallStress = false;
12632
12633                 if (compIsForInlining())
12634                 {
12635                     if (compDonotInline())
12636                     {
12637                         return;
12638                     }
12639                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12640                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12641                 }
12642                 else
12643                 {
12644                     if (compTailCallStress())
12645                     {
12646                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12647                         // Tail call stress only recognizes call+ret patterns and forces them to be
12648                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12649                         // doesn't import 'ret' opcode following the call into the basic block containing
12650                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12651                         // is already checking that there is an opcode following call and hence it is
12652                         // safe here to read next opcode without bounds check.
12653                         newBBcreatedForTailcallStress =
12654                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12655                                                              // make it jump to RET.
12656                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12657
12658                         if (newBBcreatedForTailcallStress &&
12659                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12660                             verCheckTailCallConstraint(opcode, &resolvedToken,
12661                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12662                                                        true) // Is it legal to do talcall?
12663                             )
12664                         {
12665                             // Stress the tailcall.
12666                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12667                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12668                         }
12669                     }
12670
12671                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12672                     // hence will not be considered for implicit tail calling.
12673                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12674                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12675                     {
12676                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12677                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12678                     }
12679                 }
12680
12681                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12682                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12683                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12684
12685                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12686                 {
12687                     // All calls and delegates need a security callout.
12688                     // For delegates, this is the call to the delegate constructor, not the access check on the
12689                     // LD(virt)FTN.
12690                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12691
12692 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12693      
12694                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12695                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12696                 // ldtoken <filed token>, and we now check accessibility
12697                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12698                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12699                 {
12700                     if (prevOpcode != CEE_LDTOKEN)
12701                     {
12702                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12703                     }
12704                     else
12705                     {
12706                         assert(lastLoadToken != NULL);
12707                         // Now that we know we have a token, verify that it is accessible for loading
12708                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12709                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12710                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12711                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12712                     }
12713                 }
12714
12715 #endif // DevDiv 410397
12716                 }
12717
12718                 if (tiVerificationNeeded)
12719                 {
12720                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12721                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12722                                   &callInfo DEBUGARG(info.compFullName));
12723                 }
12724
12725                 // Insert delegate callout here.
12726                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12727                 {
12728 #ifdef DEBUG
12729                     // We should do this only if verification is enabled
12730                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12731                     if (tiVerificationNeeded)
12732                     {
12733                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12734                         // We should get here only for well formed delegate creation.
12735                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12736                     }
12737 #endif
12738
12739 #ifdef FEATURE_CORECLR
12740                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12741                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12742                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12743
12744                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12745 #endif // FEATURE_CORECLR
12746                 }
12747
12748                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12749                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12750                 if (compDonotInline())
12751                 {
12752                     return;
12753                 }
12754
12755                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12756                                                                        // have created a new BB after the "call"
12757                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12758                 {
12759                     assert(!compIsForInlining());
12760                     goto RET;
12761                 }
12762
12763                 break;
12764
12765             case CEE_LDFLD:
12766             case CEE_LDSFLD:
12767             case CEE_LDFLDA:
12768             case CEE_LDSFLDA:
12769             {
12770
12771                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12772                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12773
12774                 /* Get the CP_Fieldref index */
12775                 assertImp(sz == sizeof(unsigned));
12776
12777                 _impResolveToken(CORINFO_TOKENKIND_Field);
12778
12779                 JITDUMP(" %08X", resolvedToken.token);
12780
12781                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12782
12783                 GenTreePtr           obj     = nullptr;
12784                 typeInfo*            tiObj   = nullptr;
12785                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12786
12787                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12788                 {
12789                     tiObj = &impStackTop().seTypeInfo;
12790                     obj   = impPopStack(objType).val;
12791
12792                     if (impIsThis(obj))
12793                     {
12794                         aflags |= CORINFO_ACCESS_THIS;
12795
12796                         // An optimization for Contextful classes:
12797                         // we unwrap the proxy when we have a 'this reference'
12798
12799                         if (info.compUnwrapContextful)
12800                         {
12801                             aflags |= CORINFO_ACCESS_UNWRAP;
12802                         }
12803                     }
12804                 }
12805
12806                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12807
12808                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12809                 // handle
12810                 CorInfoType ciType = fieldInfo.fieldType;
12811                 clsHnd             = fieldInfo.structType;
12812
12813                 lclTyp = JITtype2varType(ciType);
12814
12815 #ifdef _TARGET_AMD64
12816                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12817 #endif // _TARGET_AMD64
12818
12819                 if (compIsForInlining())
12820                 {
12821                     switch (fieldInfo.fieldAccessor)
12822                     {
12823                         case CORINFO_FIELD_INSTANCE_HELPER:
12824                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12825                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12826                         case CORINFO_FIELD_STATIC_TLS:
12827
12828                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12829                             return;
12830
12831                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12832 #if COR_JIT_EE_VERSION > 460
12833                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12834 #endif
12835                             /* We may be able to inline the field accessors in specific instantiations of generic
12836                              * methods */
12837                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12838                             return;
12839
12840                         default:
12841                             break;
12842                     }
12843
12844                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12845                         clsHnd)
12846                     {
12847                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12848                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12849                         {
12850                             // Loading a static valuetype field usually will cause a JitHelper to be called
12851                             // for the static base. This will bloat the code.
12852                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12853
12854                             if (compInlineResult->IsFailure())
12855                             {
12856                                 return;
12857                             }
12858                         }
12859                     }
12860                 }
12861
12862                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12863                 if (isLoadAddress)
12864                 {
12865                     tiRetVal.MakeByRef();
12866                 }
12867                 else
12868                 {
12869                     tiRetVal.NormaliseForStack();
12870                 }
12871
12872                 // Perform this check always to ensure that we get field access exceptions even with
12873                 // SkipVerification.
12874                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12875
12876                 if (tiVerificationNeeded)
12877                 {
12878                     // You can also pass the unboxed struct to  LDFLD
12879                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12880                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12881                     {
12882                         bAllowPlainValueTypeAsThis = TRUE;
12883                     }
12884
12885                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12886
12887                     // If we're doing this on a heap object or from a 'safe' byref
12888                     // then the result is a safe byref too
12889                     if (isLoadAddress) // load address
12890                     {
12891                         if (fieldInfo.fieldFlags &
12892                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12893                         {
12894                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12895                             {
12896                                 tiRetVal.SetIsPermanentHomeByRef();
12897                             }
12898                         }
12899                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12900                         {
12901                             // ldflda of byref is safe if done on a gc object or on  a
12902                             // safe byref
12903                             tiRetVal.SetIsPermanentHomeByRef();
12904                         }
12905                     }
12906                 }
12907                 else
12908                 {
12909                     // tiVerificationNeeded is false.
12910                     // Raise InvalidProgramException if static load accesses non-static field
12911                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12912                     {
12913                         BADCODE("static access on an instance field");
12914                     }
12915                 }
12916
12917                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12918                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12919                 {
12920                     if (obj->gtFlags & GTF_SIDE_EFFECT)
12921                     {
12922                         obj = gtUnusedValNode(obj);
12923                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12924                     }
12925                     obj = nullptr;
12926                 }
12927
12928                 /* Preserve 'small' int types */
12929                 if (lclTyp > TYP_INT)
12930                 {
12931                     lclTyp = genActualType(lclTyp);
12932                 }
12933
12934                 bool usesHelper = false;
12935
12936                 switch (fieldInfo.fieldAccessor)
12937                 {
12938                     case CORINFO_FIELD_INSTANCE:
12939 #ifdef FEATURE_READYTORUN_COMPILER
12940                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
12941 #endif
12942                     {
12943                         bool nullcheckNeeded = false;
12944
12945                         obj = impCheckForNullPointer(obj);
12946
12947                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12948                         {
12949                             nullcheckNeeded = true;
12950                         }
12951
12952                         // If the object is a struct, what we really want is
12953                         // for the field to operate on the address of the struct.
12954                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
12955                         {
12956                             assert(opcode == CEE_LDFLD && objType != nullptr);
12957
12958                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
12959                         }
12960
12961                         /* Create the data member node */
12962                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
12963
12964 #ifdef FEATURE_READYTORUN_COMPILER
12965                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
12966                         {
12967                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
12968                         }
12969 #endif
12970
12971                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
12972
12973                         if (fgAddrCouldBeNull(obj))
12974                         {
12975                             op1->gtFlags |= GTF_EXCEPT;
12976                         }
12977
12978                         // If gtFldObj is a BYREF then our target is a value class and
12979                         // it could point anywhere, example a boxed class static int
12980                         if (obj->gtType == TYP_BYREF)
12981                         {
12982                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
12983                         }
12984
12985                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12986                         if (StructHasOverlappingFields(typeFlags))
12987                         {
12988                             op1->gtField.gtFldMayOverlap = true;
12989                         }
12990
12991                         // wrap it in a address of operator if necessary
12992                         if (isLoadAddress)
12993                         {
12994                             op1 = gtNewOperNode(GT_ADDR,
12995                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
12996                         }
12997                         else
12998                         {
12999                             if (compIsForInlining() &&
13000                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13001                                                                                    impInlineInfo->inlArgInfo))
13002                             {
13003                                 impInlineInfo->thisDereferencedFirst = true;
13004                             }
13005                         }
13006                     }
13007                     break;
13008
13009                     case CORINFO_FIELD_STATIC_TLS:
13010 #ifdef _TARGET_X86_
13011                         // Legacy TLS access is implemented as intrinsic on x86 only
13012
13013                         /* Create the data member node */
13014                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13015                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13016
13017                         if (isLoadAddress)
13018                         {
13019                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13020                         }
13021                         break;
13022 #else
13023                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13024
13025                         __fallthrough;
13026 #endif
13027
13028                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13029                     case CORINFO_FIELD_INSTANCE_HELPER:
13030                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13031                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13032                                                clsHnd, nullptr);
13033                         usesHelper = true;
13034                         break;
13035
13036                     case CORINFO_FIELD_STATIC_ADDRESS:
13037                         // Replace static read-only fields with constant if possible
13038                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13039                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13040                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13041                         {
13042                             CorInfoInitClassResult initClassResult =
13043                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13044                                                             impTokenLookupContextHandle);
13045
13046                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13047                             {
13048                                 void** pFldAddr = nullptr;
13049                                 void*  fldAddr =
13050                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13051
13052                                 // We should always be able to access this static's address directly
13053                                 assert(pFldAddr == nullptr);
13054
13055                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13056                                 goto FIELD_DONE;
13057                             }
13058                         }
13059
13060                         __fallthrough;
13061
13062                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13063                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13064                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13065 #if COR_JIT_EE_VERSION > 460
13066                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13067 #endif
13068                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13069                                                          lclTyp);
13070                         break;
13071
13072                     case CORINFO_FIELD_INTRINSIC_ZERO:
13073                     {
13074                         assert(aflags & CORINFO_ACCESS_GET);
13075                         op1 = gtNewIconNode(0, lclTyp);
13076                         goto FIELD_DONE;
13077                     }
13078                     break;
13079
13080                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13081                     {
13082                         assert(aflags & CORINFO_ACCESS_GET);
13083
13084                         LPVOID         pValue;
13085                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13086                         op1                = gtNewStringLiteralNode(iat, pValue);
13087                         goto FIELD_DONE;
13088                     }
13089                     break;
13090
13091                     default:
13092                         assert(!"Unexpected fieldAccessor");
13093                 }
13094
13095                 if (!isLoadAddress)
13096                 {
13097
13098                     if (prefixFlags & PREFIX_VOLATILE)
13099                     {
13100                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13101                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13102
13103                         if (!usesHelper)
13104                         {
13105                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13106                                    (op1->OperGet() == GT_OBJ));
13107                             op1->gtFlags |= GTF_IND_VOLATILE;
13108                         }
13109                     }
13110
13111                     if (prefixFlags & PREFIX_UNALIGNED)
13112                     {
13113                         if (!usesHelper)
13114                         {
13115                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13116                                    (op1->OperGet() == GT_OBJ));
13117                             op1->gtFlags |= GTF_IND_UNALIGNED;
13118                         }
13119                     }
13120                 }
13121
13122                 /* Check if the class needs explicit initialization */
13123
13124                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13125                 {
13126                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13127                     if (compDonotInline())
13128                     {
13129                         return;
13130                     }
13131                     if (helperNode != nullptr)
13132                     {
13133                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13134                     }
13135                 }
13136
13137             FIELD_DONE:
13138                 impPushOnStack(op1, tiRetVal);
13139             }
13140             break;
13141
13142             case CEE_STFLD:
13143             case CEE_STSFLD:
13144             {
13145
13146                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13147
13148                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13149
13150                 /* Get the CP_Fieldref index */
13151
13152                 assertImp(sz == sizeof(unsigned));
13153
13154                 _impResolveToken(CORINFO_TOKENKIND_Field);
13155
13156                 JITDUMP(" %08X", resolvedToken.token);
13157
13158                 int        aflags = CORINFO_ACCESS_SET;
13159                 GenTreePtr obj    = nullptr;
13160                 typeInfo*  tiObj  = nullptr;
13161                 typeInfo   tiVal;
13162
13163                 /* Pull the value from the stack */
13164                 op2    = impPopStack(tiVal);
13165                 clsHnd = tiVal.GetClassHandle();
13166
13167                 if (opcode == CEE_STFLD)
13168                 {
13169                     tiObj = &impStackTop().seTypeInfo;
13170                     obj   = impPopStack().val;
13171
13172                     if (impIsThis(obj))
13173                     {
13174                         aflags |= CORINFO_ACCESS_THIS;
13175
13176                         // An optimization for Contextful classes:
13177                         // we unwrap the proxy when we have a 'this reference'
13178
13179                         if (info.compUnwrapContextful)
13180                         {
13181                             aflags |= CORINFO_ACCESS_UNWRAP;
13182                         }
13183                     }
13184                 }
13185
13186                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13187
13188                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13189                 // handle
13190                 CorInfoType ciType = fieldInfo.fieldType;
13191                 fieldClsHnd        = fieldInfo.structType;
13192
13193                 lclTyp = JITtype2varType(ciType);
13194
13195                 if (compIsForInlining())
13196                 {
13197                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13198                      * per-inst static? */
13199
13200                     switch (fieldInfo.fieldAccessor)
13201                     {
13202                         case CORINFO_FIELD_INSTANCE_HELPER:
13203                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13204                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13205                         case CORINFO_FIELD_STATIC_TLS:
13206
13207                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13208                             return;
13209
13210                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13211 #if COR_JIT_EE_VERSION > 460
13212                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13213 #endif
13214
13215                             /* We may be able to inline the field accessors in specific instantiations of generic
13216                              * methods */
13217                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13218                             return;
13219
13220                         default:
13221                             break;
13222                     }
13223                 }
13224
13225                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13226
13227                 if (tiVerificationNeeded)
13228                 {
13229                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13230                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13231                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13232                 }
13233                 else
13234                 {
13235                     // tiVerificationNeed is false.
13236                     // Raise InvalidProgramException if static store accesses non-static field
13237                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13238                     {
13239                         BADCODE("static access on an instance field");
13240                     }
13241                 }
13242
13243                 // We are using stfld on a static field.
13244                 // We allow it, but need to eval any side-effects for obj
13245                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13246                 {
13247                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13248                     {
13249                         obj = gtUnusedValNode(obj);
13250                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13251                     }
13252                     obj = nullptr;
13253                 }
13254
13255                 /* Preserve 'small' int types */
13256                 if (lclTyp > TYP_INT)
13257                 {
13258                     lclTyp = genActualType(lclTyp);
13259                 }
13260
13261                 switch (fieldInfo.fieldAccessor)
13262                 {
13263                     case CORINFO_FIELD_INSTANCE:
13264 #ifdef FEATURE_READYTORUN_COMPILER
13265                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13266 #endif
13267                     {
13268                         obj = impCheckForNullPointer(obj);
13269
13270                         /* Create the data member node */
13271                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13272                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13273                         if (StructHasOverlappingFields(typeFlags))
13274                         {
13275                             op1->gtField.gtFldMayOverlap = true;
13276                         }
13277
13278 #ifdef FEATURE_READYTORUN_COMPILER
13279                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13280                         {
13281                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13282                         }
13283 #endif
13284
13285                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13286
13287                         if (fgAddrCouldBeNull(obj))
13288                         {
13289                             op1->gtFlags |= GTF_EXCEPT;
13290                         }
13291
13292                         // If gtFldObj is a BYREF then our target is a value class and
13293                         // it could point anywhere, example a boxed class static int
13294                         if (obj->gtType == TYP_BYREF)
13295                         {
13296                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13297                         }
13298
13299                         if (compIsForInlining() &&
13300                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13301                         {
13302                             impInlineInfo->thisDereferencedFirst = true;
13303                         }
13304                     }
13305                     break;
13306
13307                     case CORINFO_FIELD_STATIC_TLS:
13308 #ifdef _TARGET_X86_
13309                         // Legacy TLS access is implemented as intrinsic on x86 only
13310
13311                         /* Create the data member node */
13312                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13313                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13314
13315                         break;
13316 #else
13317                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13318
13319                         __fallthrough;
13320 #endif
13321
13322                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13323                     case CORINFO_FIELD_INSTANCE_HELPER:
13324                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13325                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13326                                                clsHnd, op2);
13327                         goto SPILL_APPEND;
13328
13329                     case CORINFO_FIELD_STATIC_ADDRESS:
13330                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13331                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13332                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13333 #if COR_JIT_EE_VERSION > 460
13334                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13335 #endif
13336                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13337                                                          lclTyp);
13338                         break;
13339
13340                     default:
13341                         assert(!"Unexpected fieldAccessor");
13342                 }
13343
13344                 // Create the member assignment, unless we have a struct.
13345                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13346                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13347
13348                 if (!deferStructAssign)
13349                 {
13350                     if (prefixFlags & PREFIX_VOLATILE)
13351                     {
13352                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13353                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13354                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13355                         op1->gtFlags |= GTF_IND_VOLATILE;
13356                     }
13357                     if (prefixFlags & PREFIX_UNALIGNED)
13358                     {
13359                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13360                         op1->gtFlags |= GTF_IND_UNALIGNED;
13361                     }
13362
13363                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13364                        trust
13365                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13366                        importation
13367                        and reads from the union as if it were a long during code generation. Though this can potentially
13368                        read garbage, one can get lucky to have this working correctly.
13369
13370                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13371                        /O2
13372                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13373                        on
13374                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13375                        correctly
13376                        always.
13377
13378                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13379                        V4.0.
13380                     */
13381                     CLANG_FORMAT_COMMENT_ANCHOR;
13382
13383 #ifdef _TARGET_X86_
13384                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13385                         varTypeIsLong(op1->TypeGet()))
13386                     {
13387                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13388                     }
13389 #endif
13390
13391 #ifdef _TARGET_64BIT_
13392                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13393                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13394                     {
13395                         op2->gtType = TYP_I_IMPL;
13396                     }
13397                     else
13398                     {
13399                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13400                         //
13401                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13402                         {
13403                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13404                         }
13405                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13406                         //
13407                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13408                         {
13409                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13410                         }
13411                     }
13412 #endif
13413
13414 #if !FEATURE_X87_DOUBLES
13415                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13416                     // We insert a cast to the dest 'op1' type
13417                     //
13418                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13419                         varTypeIsFloating(op2->gtType))
13420                     {
13421                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13422                     }
13423 #endif // !FEATURE_X87_DOUBLES
13424
13425                     op1 = gtNewAssignNode(op1, op2);
13426
13427                     /* Mark the expression as containing an assignment */
13428
13429                     op1->gtFlags |= GTF_ASG;
13430                 }
13431
13432                 /* Check if the class needs explicit initialization */
13433
13434                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13435                 {
13436                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13437                     if (compDonotInline())
13438                     {
13439                         return;
13440                     }
13441                     if (helperNode != nullptr)
13442                     {
13443                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13444                     }
13445                 }
13446
13447                 /* stfld can interfere with value classes (consider the sequence
13448                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13449                    spill all value class references from the stack. */
13450
13451                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13452                 {
13453                     assert(tiObj);
13454
13455                     if (impIsValueType(tiObj))
13456                     {
13457                         impSpillEvalStack();
13458                     }
13459                     else
13460                     {
13461                         impSpillValueClasses();
13462                     }
13463                 }
13464
13465                 /* Spill any refs to the same member from the stack */
13466
13467                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13468
13469                 /* stsfld also interferes with indirect accesses (for aliased
13470                    statics) and calls. But don't need to spill other statics
13471                    as we have explicitly spilled this particular static field. */
13472
13473                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13474
13475                 if (deferStructAssign)
13476                 {
13477                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13478                 }
13479             }
13480                 goto APPEND;
13481
13482             case CEE_NEWARR:
13483             {
13484
13485                 /* Get the class type index operand */
13486
13487                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13488
13489                 JITDUMP(" %08X", resolvedToken.token);
13490
13491                 if (!opts.IsReadyToRun())
13492                 {
13493                     // Need to restore array classes before creating array objects on the heap
13494                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13495                     if (op1 == nullptr)
13496                     { // compDonotInline()
13497                         return;
13498                     }
13499                 }
13500
13501                 if (tiVerificationNeeded)
13502                 {
13503                     // As per ECMA 'numElems' specified can be either int32 or native int.
13504                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13505
13506                     CORINFO_CLASS_HANDLE elemTypeHnd;
13507                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13508                     Verify(elemTypeHnd == nullptr ||
13509                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13510                            "array of byref-like type");
13511                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13512                 }
13513
13514                 accessAllowedResult =
13515                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13516                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13517
13518                 /* Form the arglist: array class handle, size */
13519                 op2 = impPopStack().val;
13520                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13521
13522 #ifdef FEATURE_READYTORUN_COMPILER
13523                 if (opts.IsReadyToRun())
13524                 {
13525                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13526                                                     gtNewArgList(op2));
13527                     usingReadyToRunHelper = (op1 != nullptr);
13528
13529                     if (!usingReadyToRunHelper)
13530                     {
13531                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13532                         // and the newarr call with a single call to a dynamic R2R cell that will:
13533                         //      1) Load the context
13534                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13535                         //      3) Allocate the new array
13536                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13537
13538                         // Need to restore array classes before creating array objects on the heap
13539                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13540                         if (op1 == nullptr)
13541                         { // compDonotInline()
13542                             return;
13543                         }
13544                     }
13545                 }
13546
13547                 if (!usingReadyToRunHelper)
13548 #endif
13549                 {
13550                     args = gtNewArgList(op1, op2);
13551
13552                     /* Create a call to 'new' */
13553
13554                     // Note that this only works for shared generic code because the same helper is used for all
13555                     // reference array types
13556                     op1 =
13557                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13558                 }
13559
13560                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13561
13562                 /* Remember that this basic block contains 'new' of an sd array */
13563
13564                 block->bbFlags |= BBF_HAS_NEWARRAY;
13565                 optMethodFlags |= OMF_HAS_NEWARRAY;
13566
13567                 /* Push the result of the call on the stack */
13568
13569                 impPushOnStack(op1, tiRetVal);
13570
13571                 callTyp = TYP_REF;
13572             }
13573             break;
13574
13575             case CEE_LOCALLOC:
13576                 assert(!compIsForInlining());
13577
13578                 if (tiVerificationNeeded)
13579                 {
13580                     Verify(false, "bad opcode");
13581                 }
13582
13583                 // We don't allow locallocs inside handlers
13584                 if (block->hasHndIndex())
13585                 {
13586                     BADCODE("Localloc can't be inside handler");
13587                 }
13588
13589                 /* The FP register may not be back to the original value at the end
13590                    of the method, even if the frame size is 0, as localloc may
13591                    have modified it. So we will HAVE to reset it */
13592
13593                 compLocallocUsed = true;
13594                 setNeedsGSSecurityCookie();
13595
13596                 // Get the size to allocate
13597
13598                 op2 = impPopStack().val;
13599                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13600
13601                 if (verCurrentState.esStackDepth != 0)
13602                 {
13603                     BADCODE("Localloc can only be used when the stack is empty");
13604                 }
13605
13606                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13607
13608                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13609
13610                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13611
13612                 impPushOnStack(op1, tiRetVal);
13613                 break;
13614
13615             case CEE_ISINST:
13616
13617                 /* Get the type token */
13618                 assertImp(sz == sizeof(unsigned));
13619
13620                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13621
13622                 JITDUMP(" %08X", resolvedToken.token);
13623
13624                 if (!opts.IsReadyToRun())
13625                 {
13626                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13627                     if (op2 == nullptr)
13628                     { // compDonotInline()
13629                         return;
13630                     }
13631                 }
13632
13633                 if (tiVerificationNeeded)
13634                 {
13635                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13636                     // Even if this is a value class, we know it is boxed.
13637                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13638                 }
13639                 accessAllowedResult =
13640                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13641                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13642
13643                 op1 = impPopStack().val;
13644
13645 #ifdef FEATURE_READYTORUN_COMPILER
13646                 if (opts.IsReadyToRun())
13647                 {
13648                     GenTreePtr opLookup =
13649                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13650                                                   gtNewArgList(op1));
13651                     usingReadyToRunHelper = (opLookup != nullptr);
13652                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13653
13654                     if (!usingReadyToRunHelper)
13655                     {
13656                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13657                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13658                         //      1) Load the context
13659                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13660                         //      3) Perform the 'is instance' check on the input object
13661                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13662
13663                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13664                         if (op2 == nullptr)
13665                         { // compDonotInline()
13666                             return;
13667                         }
13668                     }
13669                 }
13670
13671                 if (!usingReadyToRunHelper)
13672 #endif
13673                 {
13674                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13675                 }
13676                 if (compDonotInline())
13677                 {
13678                     return;
13679                 }
13680
13681                 impPushOnStack(op1, tiRetVal);
13682
13683                 break;
13684
13685             case CEE_REFANYVAL:
13686
13687                 // get the class handle and make a ICON node out of it
13688
13689                 _impResolveToken(CORINFO_TOKENKIND_Class);
13690
13691                 JITDUMP(" %08X", resolvedToken.token);
13692
13693                 op2 = impTokenToHandle(&resolvedToken);
13694                 if (op2 == nullptr)
13695                 { // compDonotInline()
13696                     return;
13697                 }
13698
13699                 if (tiVerificationNeeded)
13700                 {
13701                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13702                            "need refany");
13703                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13704                 }
13705
13706                 op1 = impPopStack().val;
13707                 // make certain it is normalized;
13708                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13709
13710                 // Call helper GETREFANY(classHandle, op1);
13711                 args = gtNewArgList(op2, op1);
13712                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13713
13714                 impPushOnStack(op1, tiRetVal);
13715                 break;
13716
13717             case CEE_REFANYTYPE:
13718
13719                 if (tiVerificationNeeded)
13720                 {
13721                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13722                            "need refany");
13723                 }
13724
13725                 op1 = impPopStack().val;
13726
13727                 // make certain it is normalized;
13728                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13729
13730                 if (op1->gtOper == GT_OBJ)
13731                 {
13732                     // Get the address of the refany
13733                     op1 = op1->gtOp.gtOp1;
13734
13735                     // Fetch the type from the correct slot
13736                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13737                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13738                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13739                 }
13740                 else
13741                 {
13742                     assertImp(op1->gtOper == GT_MKREFANY);
13743
13744                     // The pointer may have side-effects
13745                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13746                     {
13747                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13748 #ifdef DEBUG
13749                         impNoteLastILoffs();
13750 #endif
13751                     }
13752
13753                     // We already have the class handle
13754                     op1 = op1->gtOp.gtOp2;
13755                 }
13756
13757                 // convert native TypeHandle to RuntimeTypeHandle
13758                 {
13759                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13760
13761                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13762                                               helperArgs);
13763
13764                     // The handle struct is returned in register
13765                     op1->gtCall.gtReturnType = TYP_REF;
13766
13767                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13768                 }
13769
13770                 impPushOnStack(op1, tiRetVal);
13771                 break;
13772
13773             case CEE_LDTOKEN:
13774             {
13775                 /* Get the Class index */
13776                 assertImp(sz == sizeof(unsigned));
13777                 lastLoadToken = codeAddr;
13778                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13779
13780                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13781
13782                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13783                 if (op1 == nullptr)
13784                 { // compDonotInline()
13785                     return;
13786                 }
13787
13788                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13789                 assert(resolvedToken.hClass != nullptr);
13790
13791                 if (resolvedToken.hMethod != nullptr)
13792                 {
13793                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13794                 }
13795                 else if (resolvedToken.hField != nullptr)
13796                 {
13797                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13798                 }
13799
13800                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13801
13802                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13803
13804                 // The handle struct is returned in register
13805                 op1->gtCall.gtReturnType = TYP_REF;
13806
13807                 tiRetVal = verMakeTypeInfo(tokenType);
13808                 impPushOnStack(op1, tiRetVal);
13809             }
13810             break;
13811
13812             case CEE_UNBOX:
13813             case CEE_UNBOX_ANY:
13814             {
13815                 /* Get the Class index */
13816                 assertImp(sz == sizeof(unsigned));
13817
13818                 _impResolveToken(CORINFO_TOKENKIND_Class);
13819
13820                 JITDUMP(" %08X", resolvedToken.token);
13821
13822                 BOOL runtimeLookup;
13823                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13824                 if (op2 == nullptr)
13825                 { // compDonotInline()
13826                     return;
13827                 }
13828
13829                 // Run this always so we can get access exceptions even with SkipVerification.
13830                 accessAllowedResult =
13831                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13832                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13833
13834                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13835                 {
13836                     if (tiVerificationNeeded)
13837                     {
13838                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13839                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13840                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13841                         tiRetVal.NormaliseForStack();
13842                     }
13843                     op1 = impPopStack().val;
13844                     goto CASTCLASS;
13845                 }
13846
13847                 /* Pop the object and create the unbox helper call */
13848                 /* You might think that for UNBOX_ANY we need to push a different */
13849                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13850                 /* for the intermediate pointer which we then transfer onto the OBJ */
13851                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13852                 if (tiVerificationNeeded)
13853                 {
13854                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13855                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13856
13857                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13858                     Verify(tiRetVal.IsValueClass(), "not value class");
13859                     tiRetVal.MakeByRef();
13860
13861                     // We always come from an objref, so this is safe byref
13862                     tiRetVal.SetIsPermanentHomeByRef();
13863                     tiRetVal.SetIsReadonlyByRef();
13864                 }
13865
13866                 op1 = impPopStack().val;
13867                 assertImp(op1->gtType == TYP_REF);
13868
13869                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13870                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13871
13872                 // We only want to expand inline the normal UNBOX helper;
13873                 expandInline = (helper == CORINFO_HELP_UNBOX);
13874
13875                 if (expandInline)
13876                 {
13877                     if (compCurBB->isRunRarely())
13878                     {
13879                         expandInline = false; // not worth the code expansion
13880                     }
13881                 }
13882
13883                 if (expandInline)
13884                 {
13885                     // we are doing normal unboxing
13886                     // inline the common case of the unbox helper
13887                     // UNBOX(exp) morphs into
13888                     // clone = pop(exp);
13889                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13890                     // push(clone + sizeof(void*))
13891                     //
13892                     GenTreePtr cloneOperand;
13893                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13894                                        nullptr DEBUGARG("inline UNBOX clone1"));
13895                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13896
13897                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13898
13899                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13900                                        nullptr DEBUGARG("inline UNBOX clone2"));
13901                     op2 = impTokenToHandle(&resolvedToken);
13902                     if (op2 == nullptr)
13903                     { // compDonotInline()
13904                         return;
13905                     }
13906                     args = gtNewArgList(op2, op1);
13907                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13908
13909                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13910                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13911                     condBox->gtFlags |= GTF_RELOP_QMARK;
13912
13913                     // QMARK nodes cannot reside on the evaluation stack. Because there
13914                     // may be other trees on the evaluation stack that side-effect the
13915                     // sources of the UNBOX operation we must spill the stack.
13916
13917                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13918
13919                     // Create the address-expression to reference past the object header
13920                     // to the beginning of the value-type. Today this means adjusting
13921                     // past the base of the objects vtable field which is pointer sized.
13922
13923                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13924                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13925                 }
13926                 else
13927                 {
13928                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13929
13930                     // Don't optimize, just call the helper and be done with it
13931                     args = gtNewArgList(op2, op1);
13932                     op1  = gtNewHelperCallNode(helper,
13933                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13934                                               callFlags, args);
13935                 }
13936
13937                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13938                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
13939                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13940                        );
13941
13942                 /*
13943                   ----------------------------------------------------------------------
13944                   | \ helper  |                         |                              |
13945                   |   \       |                         |                              |
13946                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
13947                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
13948                   | opcode  \ |                         |                              |
13949                   |---------------------------------------------------------------------
13950                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
13951                   |           |                         | push the BYREF to this local |
13952                   |---------------------------------------------------------------------
13953                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
13954                   |           | the BYREF               | For Linux when the           |
13955                   |           |                         |  struct is returned in two   |
13956                   |           |                         |  registers create a temp     |
13957                   |           |                         |  which address is passed to  |
13958                   |           |                         |  the unbox_nullable helper.  |
13959                   |---------------------------------------------------------------------
13960                 */
13961
13962                 if (opcode == CEE_UNBOX)
13963                 {
13964                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
13965                     {
13966                         // Unbox nullable helper returns a struct type.
13967                         // We need to spill it to a temp so than can take the address of it.
13968                         // Here we need unsafe value cls check, since the address of struct is taken to be used
13969                         // further along and potetially be exploitable.
13970
13971                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
13972                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
13973
13974                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13975                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
13976                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
13977
13978                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13979                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
13980                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
13981                     }
13982
13983                     assert(op1->gtType == TYP_BYREF);
13984                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
13985                 }
13986                 else
13987                 {
13988                     assert(opcode == CEE_UNBOX_ANY);
13989
13990                     if (helper == CORINFO_HELP_UNBOX)
13991                     {
13992                         // Normal unbox helper returns a TYP_BYREF.
13993                         impPushOnStack(op1, tiRetVal);
13994                         oper = GT_OBJ;
13995                         goto OBJ;
13996                     }
13997
13998                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
13999
14000 #if FEATURE_MULTIREG_RET
14001
14002                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14003                     {
14004                         // Unbox nullable helper returns a TYP_STRUCT.
14005                         // For the multi-reg case we need to spill it to a temp so that
14006                         // we can pass the address to the unbox_nullable jit helper.
14007
14008                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14009                         lvaTable[tmp].lvIsMultiRegArg = true;
14010                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14011
14012                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14013                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14014                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14015
14016                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14017                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14018                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14019
14020                         // In this case the return value of the unbox helper is TYP_BYREF.
14021                         // Make sure the right type is placed on the operand type stack.
14022                         impPushOnStack(op1, tiRetVal);
14023
14024                         // Load the struct.
14025                         oper = GT_OBJ;
14026
14027                         assert(op1->gtType == TYP_BYREF);
14028                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14029
14030                         goto OBJ;
14031                     }
14032                     else
14033
14034 #endif // !FEATURE_MULTIREG_RET
14035
14036                     {
14037                         // If non register passable struct we have it materialized in the RetBuf.
14038                         assert(op1->gtType == TYP_STRUCT);
14039                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14040                         assert(tiRetVal.IsValueClass());
14041                     }
14042                 }
14043
14044                 impPushOnStack(op1, tiRetVal);
14045             }
14046             break;
14047
14048             case CEE_BOX:
14049             {
14050                 /* Get the Class index */
14051                 assertImp(sz == sizeof(unsigned));
14052
14053                 _impResolveToken(CORINFO_TOKENKIND_Box);
14054
14055                 JITDUMP(" %08X", resolvedToken.token);
14056
14057                 if (tiVerificationNeeded)
14058                 {
14059                     typeInfo tiActual = impStackTop().seTypeInfo;
14060                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14061
14062                     Verify(verIsBoxable(tiBox), "boxable type expected");
14063
14064                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14065                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14066                            "boxed type has unsatisfied class constraints");
14067
14068                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14069
14070                     // Observation: the following code introduces a boxed value class on the stack, but,
14071                     // according to the ECMA spec, one would simply expect: tiRetVal =
14072                     // typeInfo(TI_REF,impGetObjectClass());
14073
14074                     // Push the result back on the stack,
14075                     // even if clsHnd is a value class we want the TI_REF
14076                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14077                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14078                 }
14079
14080                 accessAllowedResult =
14081                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14082                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14083
14084                 // Note BOX can be used on things that are not value classes, in which
14085                 // case we get a NOP.  However the verifier's view of the type on the
14086                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14087                 if (!eeIsValueClass(resolvedToken.hClass))
14088                 {
14089                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14090                     break;
14091                 }
14092
14093                 // Look ahead for unbox.any
14094                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14095                 {
14096                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14097                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14098                     {
14099                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14100
14101                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14102
14103                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14104                         {
14105                             // Skip the next unbox.any instruction
14106                             sz += sizeof(mdToken) + 1;
14107                             break;
14108                         }
14109                     }
14110                 }
14111
14112                 impImportAndPushBox(&resolvedToken);
14113                 if (compDonotInline())
14114                 {
14115                     return;
14116                 }
14117             }
14118             break;
14119
14120             case CEE_SIZEOF:
14121
14122                 /* Get the Class index */
14123                 assertImp(sz == sizeof(unsigned));
14124
14125                 _impResolveToken(CORINFO_TOKENKIND_Class);
14126
14127                 JITDUMP(" %08X", resolvedToken.token);
14128
14129                 if (tiVerificationNeeded)
14130                 {
14131                     tiRetVal = typeInfo(TI_INT);
14132                 }
14133
14134                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14135                 impPushOnStack(op1, tiRetVal);
14136                 break;
14137
14138             case CEE_CASTCLASS:
14139
14140                 /* Get the Class index */
14141
14142                 assertImp(sz == sizeof(unsigned));
14143
14144                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14145
14146                 JITDUMP(" %08X", resolvedToken.token);
14147
14148                 if (!opts.IsReadyToRun())
14149                 {
14150                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14151                     if (op2 == nullptr)
14152                     { // compDonotInline()
14153                         return;
14154                     }
14155                 }
14156
14157                 if (tiVerificationNeeded)
14158                 {
14159                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14160                     // box it
14161                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14162                 }
14163
14164                 accessAllowedResult =
14165                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14166                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14167
14168                 op1 = impPopStack().val;
14169
14170             /* Pop the address and create the 'checked cast' helper call */
14171
14172             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14173             // and op2 to contain code that creates the type handle corresponding to typeRef
14174             CASTCLASS:
14175
14176 #ifdef FEATURE_READYTORUN_COMPILER
14177                 if (opts.IsReadyToRun())
14178                 {
14179                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14180                                                                     TYP_REF, gtNewArgList(op1));
14181                     usingReadyToRunHelper = (opLookup != nullptr);
14182                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14183
14184                     if (!usingReadyToRunHelper)
14185                     {
14186                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14187                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14188                         //      1) Load the context
14189                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14190                         //      3) Check the object on the stack for the type-cast
14191                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14192
14193                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14194                         if (op2 == nullptr)
14195                         { // compDonotInline()
14196                             return;
14197                         }
14198                     }
14199                 }
14200
14201                 if (!usingReadyToRunHelper)
14202 #endif
14203                 {
14204                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14205                 }
14206                 if (compDonotInline())
14207                 {
14208                     return;
14209                 }
14210
14211                 /* Push the result back on the stack */
14212                 impPushOnStack(op1, tiRetVal);
14213                 break;
14214
14215             case CEE_THROW:
14216
14217                 if (compIsForInlining())
14218                 {
14219                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14220                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14221                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14222
14223                     /* Do we have just the exception on the stack ?*/
14224
14225                     if (verCurrentState.esStackDepth != 1)
14226                     {
14227                         /* if not, just don't inline the method */
14228
14229                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14230                         return;
14231                     }
14232                 }
14233
14234                 if (tiVerificationNeeded)
14235                 {
14236                     tiRetVal = impStackTop().seTypeInfo;
14237                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14238                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14239                     {
14240                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14241                     }
14242                 }
14243
14244                 block->bbSetRunRarely(); // any block with a throw is rare
14245                 /* Pop the exception object and create the 'throw' helper call */
14246
14247                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14248
14249             EVAL_APPEND:
14250                 if (verCurrentState.esStackDepth > 0)
14251                 {
14252                     impEvalSideEffects();
14253                 }
14254
14255                 assert(verCurrentState.esStackDepth == 0);
14256
14257                 goto APPEND;
14258
14259             case CEE_RETHROW:
14260
14261                 assert(!compIsForInlining());
14262
14263                 if (info.compXcptnsCount == 0)
14264                 {
14265                     BADCODE("rethrow outside catch");
14266                 }
14267
14268                 if (tiVerificationNeeded)
14269                 {
14270                     Verify(block->hasHndIndex(), "rethrow outside catch");
14271                     if (block->hasHndIndex())
14272                     {
14273                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14274                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14275                         if (HBtab->HasFilter())
14276                         {
14277                             // we better be in the handler clause part, not the filter part
14278                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14279                                    "rethrow in filter");
14280                         }
14281                     }
14282                 }
14283
14284                 /* Create the 'rethrow' helper call */
14285
14286                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14287
14288                 goto EVAL_APPEND;
14289
14290             case CEE_INITOBJ:
14291
14292                 assertImp(sz == sizeof(unsigned));
14293
14294                 _impResolveToken(CORINFO_TOKENKIND_Class);
14295
14296                 JITDUMP(" %08X", resolvedToken.token);
14297
14298                 if (tiVerificationNeeded)
14299                 {
14300                     typeInfo tiTo    = impStackTop().seTypeInfo;
14301                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14302
14303                     Verify(tiTo.IsByRef(), "byref expected");
14304                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14305
14306                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14307                            "type operand incompatible with type of address");
14308                 }
14309
14310                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14311                 op2  = gtNewIconNode(0);                                     // Value
14312                 op1  = impPopStack().val;                                    // Dest
14313                 op1  = gtNewBlockVal(op1, size);
14314                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14315                 goto SPILL_APPEND;
14316
14317             case CEE_INITBLK:
14318
14319                 if (tiVerificationNeeded)
14320                 {
14321                     Verify(false, "bad opcode");
14322                 }
14323
14324                 op3 = impPopStack().val; // Size
14325                 op2 = impPopStack().val; // Value
14326                 op1 = impPopStack().val; // Dest
14327
14328                 if (op3->IsCnsIntOrI())
14329                 {
14330                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14331                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14332                 }
14333                 else
14334                 {
14335                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14336                     size = 0;
14337                 }
14338                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14339
14340                 goto SPILL_APPEND;
14341
14342             case CEE_CPBLK:
14343
14344                 if (tiVerificationNeeded)
14345                 {
14346                     Verify(false, "bad opcode");
14347                 }
14348                 op3 = impPopStack().val; // Size
14349                 op2 = impPopStack().val; // Src
14350                 op1 = impPopStack().val; // Dest
14351
14352                 if (op3->IsCnsIntOrI())
14353                 {
14354                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14355                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14356                 }
14357                 else
14358                 {
14359                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14360                     size = 0;
14361                 }
14362                 if (op2->OperGet() == GT_ADDR)
14363                 {
14364                     op2 = op2->gtOp.gtOp1;
14365                 }
14366                 else
14367                 {
14368                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14369                 }
14370
14371                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14372                 goto SPILL_APPEND;
14373
14374             case CEE_CPOBJ:
14375
14376                 assertImp(sz == sizeof(unsigned));
14377
14378                 _impResolveToken(CORINFO_TOKENKIND_Class);
14379
14380                 JITDUMP(" %08X", resolvedToken.token);
14381
14382                 if (tiVerificationNeeded)
14383                 {
14384                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14385                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14386                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14387
14388                     Verify(tiFrom.IsByRef(), "expected byref source");
14389                     Verify(tiTo.IsByRef(), "expected byref destination");
14390
14391                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14392                            "type of source address incompatible with type operand");
14393                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14394                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14395                            "type operand incompatible with type of destination address");
14396                 }
14397
14398                 if (!eeIsValueClass(resolvedToken.hClass))
14399                 {
14400                     op1 = impPopStack().val; // address to load from
14401
14402                     impBashVarAddrsToI(op1);
14403
14404                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14405
14406                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14407                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14408
14409                     impPushOnStackNoType(op1);
14410                     opcode = CEE_STIND_REF;
14411                     lclTyp = TYP_REF;
14412                     goto STIND_POST_VERIFY;
14413                 }
14414
14415                 op2 = impPopStack().val; // Src
14416                 op1 = impPopStack().val; // Dest
14417                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14418                 goto SPILL_APPEND;
14419
14420             case CEE_STOBJ:
14421             {
14422                 assertImp(sz == sizeof(unsigned));
14423
14424                 _impResolveToken(CORINFO_TOKENKIND_Class);
14425
14426                 JITDUMP(" %08X", resolvedToken.token);
14427
14428                 if (eeIsValueClass(resolvedToken.hClass))
14429                 {
14430                     lclTyp = TYP_STRUCT;
14431                 }
14432                 else
14433                 {
14434                     lclTyp = TYP_REF;
14435                 }
14436
14437                 if (tiVerificationNeeded)
14438                 {
14439
14440                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14441
14442                     // Make sure we have a good looking byref
14443                     Verify(tiPtr.IsByRef(), "pointer not byref");
14444                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14445                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14446                     {
14447                         compUnsafeCastUsed = true;
14448                     }
14449
14450                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14451                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14452
14453                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14454                     {
14455                         Verify(false, "type of value incompatible with type operand");
14456                         compUnsafeCastUsed = true;
14457                     }
14458
14459                     if (!tiCompatibleWith(argVal, ptrVal, false))
14460                     {
14461                         Verify(false, "type operand incompatible with type of address");
14462                         compUnsafeCastUsed = true;
14463                     }
14464                 }
14465                 else
14466                 {
14467                     compUnsafeCastUsed = true;
14468                 }
14469
14470                 if (lclTyp == TYP_REF)
14471                 {
14472                     opcode = CEE_STIND_REF;
14473                     goto STIND_POST_VERIFY;
14474                 }
14475
14476                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14477                 if (impIsPrimitive(jitTyp))
14478                 {
14479                     lclTyp = JITtype2varType(jitTyp);
14480                     goto STIND_POST_VERIFY;
14481                 }
14482
14483                 op2 = impPopStack().val; // Value
14484                 op1 = impPopStack().val; // Ptr
14485
14486                 assertImp(varTypeIsStruct(op2));
14487
14488                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14489                 goto SPILL_APPEND;
14490             }
14491
14492             case CEE_MKREFANY:
14493
14494                 assert(!compIsForInlining());
14495
14496                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14497                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14498
14499                 JITDUMP("disabling struct promotion because of mkrefany\n");
14500                 fgNoStructPromotion = true;
14501
14502                 oper = GT_MKREFANY;
14503                 assertImp(sz == sizeof(unsigned));
14504
14505                 _impResolveToken(CORINFO_TOKENKIND_Class);
14506
14507                 JITDUMP(" %08X", resolvedToken.token);
14508
14509                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14510                 if (op2 == nullptr)
14511                 { // compDonotInline()
14512                     return;
14513                 }
14514
14515                 if (tiVerificationNeeded)
14516                 {
14517                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14518                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14519
14520                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14521                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14522                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14523                 }
14524
14525                 accessAllowedResult =
14526                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14527                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14528
14529                 op1 = impPopStack().val;
14530
14531                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14532                 // But JIT32 allowed it, so we continue to allow it.
14533                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14534
14535                 // MKREFANY returns a struct.  op2 is the class token.
14536                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14537
14538                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14539                 break;
14540
14541             case CEE_LDOBJ:
14542             {
14543                 oper = GT_OBJ;
14544                 assertImp(sz == sizeof(unsigned));
14545
14546                 _impResolveToken(CORINFO_TOKENKIND_Class);
14547
14548                 JITDUMP(" %08X", resolvedToken.token);
14549
14550             OBJ:
14551
14552                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14553
14554                 if (tiVerificationNeeded)
14555                 {
14556                     typeInfo tiPtr = impStackTop().seTypeInfo;
14557
14558                     // Make sure we have a byref
14559                     if (!tiPtr.IsByRef())
14560                     {
14561                         Verify(false, "pointer not byref");
14562                         compUnsafeCastUsed = true;
14563                     }
14564                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14565
14566                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14567                     {
14568                         Verify(false, "type of address incompatible with type operand");
14569                         compUnsafeCastUsed = true;
14570                     }
14571                     tiRetVal.NormaliseForStack();
14572                 }
14573                 else
14574                 {
14575                     compUnsafeCastUsed = true;
14576                 }
14577
14578                 if (eeIsValueClass(resolvedToken.hClass))
14579                 {
14580                     lclTyp = TYP_STRUCT;
14581                 }
14582                 else
14583                 {
14584                     lclTyp = TYP_REF;
14585                     opcode = CEE_LDIND_REF;
14586                     goto LDIND_POST_VERIFY;
14587                 }
14588
14589                 op1 = impPopStack().val;
14590
14591                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14592
14593                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14594                 if (impIsPrimitive(jitTyp))
14595                 {
14596                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14597
14598                     // Could point anywhere, example a boxed class static int
14599                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14600                     assertImp(varTypeIsArithmetic(op1->gtType));
14601                 }
14602                 else
14603                 {
14604                     // OBJ returns a struct
14605                     // and an inline argument which is the class token of the loaded obj
14606                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14607                 }
14608                 op1->gtFlags |= GTF_EXCEPT;
14609
14610                 impPushOnStack(op1, tiRetVal);
14611                 break;
14612             }
14613
14614             case CEE_LDLEN:
14615                 if (tiVerificationNeeded)
14616                 {
14617                     typeInfo tiArray = impStackTop().seTypeInfo;
14618                     Verify(verIsSDArray(tiArray), "bad array");
14619                     tiRetVal = typeInfo(TI_INT);
14620                 }
14621
14622                 op1 = impPopStack().val;
14623                 if (!opts.MinOpts() && !opts.compDbgCode)
14624                 {
14625                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14626                     GenTreeArrLen* arrLen =
14627                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14628
14629                     /* Mark the block as containing a length expression */
14630
14631                     if (op1->gtOper == GT_LCL_VAR)
14632                     {
14633                         block->bbFlags |= BBF_HAS_IDX_LEN;
14634                     }
14635
14636                     op1 = arrLen;
14637                 }
14638                 else
14639                 {
14640                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14641                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14642                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14643                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14644                     op1->gtFlags |= GTF_IND_ARR_LEN;
14645                 }
14646
14647                 /* An indirection will cause a GPF if the address is null */
14648                 op1->gtFlags |= GTF_EXCEPT;
14649
14650                 /* Push the result back on the stack */
14651                 impPushOnStack(op1, tiRetVal);
14652                 break;
14653
14654             case CEE_BREAK:
14655                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14656                 goto SPILL_APPEND;
14657
14658             case CEE_NOP:
14659                 if (opts.compDbgCode)
14660                 {
14661                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14662                     goto SPILL_APPEND;
14663                 }
14664                 break;
14665
14666             /******************************** NYI *******************************/
14667
14668             case 0xCC:
14669                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14670
14671             case CEE_ILLEGAL:
14672             case CEE_MACRO_END:
14673
14674             default:
14675                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14676         }
14677
14678         codeAddr += sz;
14679         prevOpcode = opcode;
14680
14681         prefixFlags = 0;
14682         assert(!insertLdloc || opcode == CEE_DUP);
14683     }
14684
14685     assert(!insertLdloc);
14686
14687     return;
14688 #undef _impResolveToken
14689 }
14690 #ifdef _PREFAST_
14691 #pragma warning(pop)
14692 #endif
14693
14694 // Push a local/argument treeon the operand stack
14695 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14696 {
14697     tiRetVal.NormaliseForStack();
14698
14699     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14700     {
14701         tiRetVal.SetUninitialisedObjRef();
14702     }
14703
14704     impPushOnStack(op, tiRetVal);
14705 }
14706
14707 // Load a local/argument on the operand stack
14708 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14709 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14710 {
14711     var_types lclTyp;
14712
14713     if (lvaTable[lclNum].lvNormalizeOnLoad())
14714     {
14715         lclTyp = lvaGetRealType(lclNum);
14716     }
14717     else
14718     {
14719         lclTyp = lvaGetActualType(lclNum);
14720     }
14721
14722     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14723 }
14724
14725 // Load an argument on the operand stack
14726 // Shared by the various CEE_LDARG opcodes
14727 // ilArgNum is the argument index as specified in IL.
14728 // It will be mapped to the correct lvaTable index
14729 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14730 {
14731     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14732
14733     if (compIsForInlining())
14734     {
14735         if (ilArgNum >= info.compArgsCount)
14736         {
14737             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14738             return;
14739         }
14740
14741         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14742                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14743     }
14744     else
14745     {
14746         if (ilArgNum >= info.compArgsCount)
14747         {
14748             BADCODE("Bad IL");
14749         }
14750
14751         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14752
14753         if (lclNum == info.compThisArg)
14754         {
14755             lclNum = lvaArg0Var;
14756         }
14757
14758         impLoadVar(lclNum, offset);
14759     }
14760 }
14761
14762 // Load a local on the operand stack
14763 // Shared by the various CEE_LDLOC opcodes
14764 // ilLclNum is the local index as specified in IL.
14765 // It will be mapped to the correct lvaTable index
14766 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14767 {
14768     if (tiVerificationNeeded)
14769     {
14770         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14771         Verify(info.compInitMem, "initLocals not set");
14772     }
14773
14774     if (compIsForInlining())
14775     {
14776         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14777         {
14778             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14779             return;
14780         }
14781
14782         // Get the local type
14783         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14784
14785         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14786
14787         /* Have we allocated a temp for this local? */
14788
14789         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14790
14791         // All vars of inlined methods should be !lvNormalizeOnLoad()
14792
14793         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14794         lclTyp = genActualType(lclTyp);
14795
14796         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14797     }
14798     else
14799     {
14800         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14801         {
14802             BADCODE("Bad IL");
14803         }
14804
14805         unsigned lclNum = info.compArgsCount + ilLclNum;
14806
14807         impLoadVar(lclNum, offset);
14808     }
14809 }
14810
14811 #ifdef _TARGET_ARM_
14812 /**************************************************************************************
14813  *
14814  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14815  *  dst struct, because struct promotion will turn it into a float/double variable while
14816  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14817  *  a float, but there is nothing that might prevent us from doing so. The tree however
14818  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14819  *
14820  *  tmpNum - the lcl dst variable num that is a struct.
14821  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14822  *  hClass - the type handle for the struct variable.
14823  *
14824  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14825  *        however, we could do a codegen of transferring from int to float registers
14826  *        (transfer, not a cast.)
14827  *
14828  */
14829 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14830 {
14831     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14832     {
14833         int       hfaSlots = GetHfaCount(hClass);
14834         var_types hfaType  = GetHfaType(hClass);
14835
14836         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14837         // type: struct/float at importer because the ABI calls out return in integer registers.
14838         // We don't want struct promotion to replace an expression like this:
14839         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14840         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14841         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14842             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14843         {
14844             // Make sure this struct type stays as struct so we can receive the call in a struct.
14845             lvaTable[tmpNum].lvIsMultiRegRet = true;
14846         }
14847     }
14848 }
14849 #endif // _TARGET_ARM_
14850
14851 #if FEATURE_MULTIREG_RET
14852 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14853 {
14854     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14855     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14856     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14857
14858     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14859     ret->gtFlags |= GTF_DONT_CSE;
14860
14861     assert(IsMultiRegReturnedType(hClass));
14862
14863     // Mark the var so that fields are not promoted and stay together.
14864     lvaTable[tmpNum].lvIsMultiRegRet = true;
14865
14866     return ret;
14867 }
14868 #endif // FEATURE_MULTIREG_RET
14869
14870 // do import for a return
14871 // returns false if inlining was aborted
14872 // opcode can be ret or call in the case of a tail.call
14873 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14874 {
14875     if (tiVerificationNeeded)
14876     {
14877         verVerifyThisPtrInitialised();
14878
14879         unsigned expectedStack = 0;
14880         if (info.compRetType != TYP_VOID)
14881         {
14882             typeInfo tiVal = impStackTop().seTypeInfo;
14883             typeInfo tiDeclared =
14884                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14885
14886             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14887
14888             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14889             expectedStack = 1;
14890         }
14891         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14892     }
14893
14894     GenTree*             op2       = nullptr;
14895     GenTree*             op1       = nullptr;
14896     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14897
14898     if (info.compRetType != TYP_VOID)
14899     {
14900         StackEntry se = impPopStack(retClsHnd);
14901         op2           = se.val;
14902
14903         if (!compIsForInlining())
14904         {
14905             impBashVarAddrsToI(op2);
14906             op2 = impImplicitIorI4Cast(op2, info.compRetType);
14907             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14908             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14909                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14910                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14911                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14912                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14913
14914 #ifdef DEBUG
14915             if (opts.compGcChecks && info.compRetType == TYP_REF)
14916             {
14917                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
14918                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14919                 // one-return BB.
14920
14921                 assert(op2->gtType == TYP_REF);
14922
14923                 // confirm that the argument is a GC pointer (for debugging (GC stress))
14924                 GenTreeArgList* args = gtNewArgList(op2);
14925                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14926
14927                 if (verbose)
14928                 {
14929                     printf("\ncompGcChecks tree:\n");
14930                     gtDispTree(op2);
14931                 }
14932             }
14933 #endif
14934         }
14935         else
14936         {
14937             // inlinee's stack should be empty now.
14938             assert(verCurrentState.esStackDepth == 0);
14939
14940 #ifdef DEBUG
14941             if (verbose)
14942             {
14943                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
14944                 gtDispTree(op2);
14945             }
14946 #endif
14947
14948             // Make sure the type matches the original call.
14949
14950             var_types returnType       = genActualType(op2->gtType);
14951             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
14952             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
14953             {
14954                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
14955             }
14956
14957             if (returnType != originalCallType)
14958             {
14959                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
14960                 return false;
14961             }
14962
14963             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
14964             // expression. At this point, retExpr could already be set if there are multiple
14965             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
14966             // the other blocks already set it. If there is only a single return block,
14967             // retExpr shouldn't be set. However, this is not true if we reimport a block
14968             // with a return. In that case, retExpr will be set, then the block will be
14969             // reimported, but retExpr won't get cleared as part of setting the block to
14970             // be reimported. The reimported retExpr value should be the same, so even if
14971             // we don't unconditionally overwrite it, it shouldn't matter.
14972             if (info.compRetNativeType != TYP_STRUCT)
14973             {
14974                 // compRetNativeType is not TYP_STRUCT.
14975                 // This implies it could be either a scalar type or SIMD vector type or
14976                 // a struct type that can be normalized to a scalar type.
14977
14978                 if (varTypeIsStruct(info.compRetType))
14979                 {
14980                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
14981                     // adjust the type away from struct to integral
14982                     // and no normalizing
14983                     op2 = impFixupStructReturnType(op2, retClsHnd);
14984                 }
14985                 else
14986                 {
14987                     // Do we have to normalize?
14988                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
14989                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
14990                         fgCastNeeded(op2, fncRealRetType))
14991                     {
14992                         // Small-typed return values are normalized by the callee
14993                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
14994                     }
14995                 }
14996
14997                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
14998                 {
14999                     assert(info.compRetNativeType != TYP_VOID &&
15000                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15001
15002                     // This is a bit of a workaround...
15003                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15004                     // not a struct (for example, the struct is composed of exactly one int, and the native
15005                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15006                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15007                     // to the *native* return type), and at least one of the return blocks is the result of
15008                     // a call, then we have a problem. The situation is like this (from a failed test case):
15009                     //
15010                     // inliner:
15011                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15012                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15013                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15014                     //
15015                     // inlinee:
15016                     //      ...
15017                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15018                     //      ret
15019                     //      ...
15020                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15021                     //      object&, class System.Func`1<!!0>)
15022                     //      ret
15023                     //
15024                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15025                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15026                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15027                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15028                     //
15029                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15030                     // native return type, which is what it will be set to eventually. We generate the
15031                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15032                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15033
15034                     bool restoreType = false;
15035                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15036                     {
15037                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15038                         op2->gtType = info.compRetNativeType;
15039                         restoreType = true;
15040                     }
15041
15042                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15043                                      (unsigned)CHECK_SPILL_ALL);
15044
15045                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15046
15047                     if (restoreType)
15048                     {
15049                         op2->gtType = TYP_STRUCT; // restore it to what it was
15050                     }
15051
15052                     op2 = tmpOp2;
15053
15054 #ifdef DEBUG
15055                     if (impInlineInfo->retExpr)
15056                     {
15057                         // Some other block(s) have seen the CEE_RET first.
15058                         // Better they spilled to the same temp.
15059                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15060                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15061                     }
15062 #endif
15063                 }
15064
15065 #ifdef DEBUG
15066                 if (verbose)
15067                 {
15068                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15069                     gtDispTree(op2);
15070                 }
15071 #endif
15072
15073                 // Report the return expression
15074                 impInlineInfo->retExpr = op2;
15075             }
15076             else
15077             {
15078                 // compRetNativeType is TYP_STRUCT.
15079                 // This implies that struct return via RetBuf arg or multi-reg struct return
15080
15081                 GenTreePtr iciCall = impInlineInfo->iciCall;
15082                 assert(iciCall->gtOper == GT_CALL);
15083
15084                 // Assign the inlinee return into a spill temp.
15085                 // spill temp only exists if there are multiple return points
15086                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15087                 {
15088                     // in this case we have to insert multiple struct copies to the temp
15089                     // and the retexpr is just the temp.
15090                     assert(info.compRetNativeType != TYP_VOID);
15091                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15092
15093                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15094                                      (unsigned)CHECK_SPILL_ALL);
15095                 }
15096
15097 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15098 #if defined(_TARGET_ARM_)
15099                 // TODO-ARM64-NYI: HFA
15100                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15101                 // next ifdefs could be refactored in a single method with the ifdef inside.
15102                 if (IsHfa(retClsHnd))
15103                 {
15104 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15105 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15106                 ReturnTypeDesc retTypeDesc;
15107                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15108                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15109
15110                 if (retRegCount != 0)
15111                 {
15112                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15113                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15114                     // max allowed.)
15115                     assert(retRegCount == MAX_RET_REG_COUNT);
15116                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15117                     CLANG_FORMAT_COMMENT_ANCHOR;
15118 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15119
15120                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15121                     {
15122                         if (!impInlineInfo->retExpr)
15123                         {
15124 #if defined(_TARGET_ARM_)
15125                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15126 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15127                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15128                             impInlineInfo->retExpr =
15129                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15130 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15131                         }
15132                     }
15133                     else
15134                     {
15135                         impInlineInfo->retExpr = op2;
15136                     }
15137                 }
15138                 else
15139 #elif defined(_TARGET_ARM64_)
15140                 ReturnTypeDesc retTypeDesc;
15141                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15142                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15143
15144                 if (retRegCount != 0)
15145                 {
15146                     assert(!iciCall->AsCall()->HasRetBufArg());
15147                     assert(retRegCount >= 2);
15148                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15149                     {
15150                         if (!impInlineInfo->retExpr)
15151                         {
15152                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15153                             impInlineInfo->retExpr =
15154                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15155                         }
15156                     }
15157                     else
15158                     {
15159                         impInlineInfo->retExpr = op2;
15160                     }
15161                 }
15162                 else
15163 #endif // defined(_TARGET_ARM64_)
15164                 {
15165                     assert(iciCall->AsCall()->HasRetBufArg());
15166                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15167                     // spill temp only exists if there are multiple return points
15168                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15169                     {
15170                         // if this is the first return we have seen set the retExpr
15171                         if (!impInlineInfo->retExpr)
15172                         {
15173                             impInlineInfo->retExpr =
15174                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15175                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15176                         }
15177                     }
15178                     else
15179                     {
15180                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15181                     }
15182                 }
15183             }
15184         }
15185     }
15186
15187     if (compIsForInlining())
15188     {
15189         return true;
15190     }
15191
15192     if (info.compRetType == TYP_VOID)
15193     {
15194         // return void
15195         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15196     }
15197     else if (info.compRetBuffArg != BAD_VAR_NUM)
15198     {
15199         // Assign value to return buff (first param)
15200         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15201
15202         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15203         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15204
15205         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15206         CLANG_FORMAT_COMMENT_ANCHOR;
15207
15208 #if defined(_TARGET_AMD64_)
15209
15210         // x64 (System V and Win64) calling convention requires to
15211         // return the implicit return buffer explicitly (in RAX).
15212         // Change the return type to be BYREF.
15213         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15214 #else  // !defined(_TARGET_AMD64_)
15215         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15216         // In such case the return value of the function is changed to BYREF.
15217         // If profiler hook is not needed the return type of the function is TYP_VOID.
15218         if (compIsProfilerHookNeeded())
15219         {
15220             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15221         }
15222         else
15223         {
15224             // return void
15225             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15226         }
15227 #endif // !defined(_TARGET_AMD64_)
15228     }
15229     else if (varTypeIsStruct(info.compRetType))
15230     {
15231 #if !FEATURE_MULTIREG_RET
15232         // For both ARM architectures the HFA native types are maintained as structs.
15233         // Also on System V AMD64 the multireg structs returns are also left as structs.
15234         noway_assert(info.compRetNativeType != TYP_STRUCT);
15235 #endif
15236         op2 = impFixupStructReturnType(op2, retClsHnd);
15237         // return op2
15238         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15239     }
15240     else
15241     {
15242         // return op2
15243         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15244     }
15245
15246     // We must have imported a tailcall and jumped to RET
15247     if (prefixFlags & PREFIX_TAILCALL)
15248     {
15249 #ifndef _TARGET_AMD64_
15250         // Jit64 compat:
15251         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15252         //      tail.call
15253         //      pop
15254         //      ret
15255         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15256 #endif
15257
15258         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15259
15260         // impImportCall() would have already appended TYP_VOID calls
15261         if (info.compRetType == TYP_VOID)
15262         {
15263             return true;
15264         }
15265     }
15266
15267     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15268 #ifdef DEBUG
15269     // Remember at which BC offset the tree was finished
15270     impNoteLastILoffs();
15271 #endif
15272     return true;
15273 }
15274
15275 /*****************************************************************************
15276  *  Mark the block as unimported.
15277  *  Note that the caller is responsible for calling impImportBlockPending(),
15278  *  with the appropriate stack-state
15279  */
15280
15281 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15282 {
15283 #ifdef DEBUG
15284     if (verbose && (block->bbFlags & BBF_IMPORTED))
15285     {
15286         printf("\nBB%02u will be reimported\n", block->bbNum);
15287     }
15288 #endif
15289
15290     block->bbFlags &= ~BBF_IMPORTED;
15291 }
15292
15293 /*****************************************************************************
15294  *  Mark the successors of the given block as unimported.
15295  *  Note that the caller is responsible for calling impImportBlockPending()
15296  *  for all the successors, with the appropriate stack-state.
15297  */
15298
15299 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15300 {
15301     for (unsigned i = 0; i < block->NumSucc(); i++)
15302     {
15303         impReimportMarkBlock(block->GetSucc(i));
15304     }
15305 }
15306
15307 /*****************************************************************************
15308  *
15309  *  Filter wrapper to handle only passed in exception code
15310  *  from it).
15311  */
15312
15313 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15314 {
15315     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15316     {
15317         return EXCEPTION_EXECUTE_HANDLER;
15318     }
15319
15320     return EXCEPTION_CONTINUE_SEARCH;
15321 }
15322
15323 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15324 {
15325     assert(block->hasTryIndex());
15326     assert(!compIsForInlining());
15327
15328     unsigned  tryIndex = block->getTryIndex();
15329     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15330
15331     if (isTryStart)
15332     {
15333         assert(block->bbFlags & BBF_TRY_BEG);
15334
15335         // The Stack must be empty
15336         //
15337         if (block->bbStkDepth != 0)
15338         {
15339             BADCODE("Evaluation stack must be empty on entry into a try block");
15340         }
15341     }
15342
15343     // Save the stack contents, we'll need to restore it later
15344     //
15345     SavedStack blockState;
15346     impSaveStackState(&blockState, false);
15347
15348     while (HBtab != nullptr)
15349     {
15350         if (isTryStart)
15351         {
15352             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15353             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15354             //
15355             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15356             {
15357                 // We  trigger an invalid program exception here unless we have a try/fault region.
15358                 //
15359                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15360                 {
15361                     BADCODE(
15362                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15363                 }
15364                 else
15365                 {
15366                     // Allow a try/fault region to proceed.
15367                     assert(HBtab->HasFaultHandler());
15368                 }
15369             }
15370
15371             /* Recursively process the handler block */
15372             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15373
15374             //  Construct the proper verification stack state
15375             //   either empty or one that contains just
15376             //   the Exception Object that we are dealing with
15377             //
15378             verCurrentState.esStackDepth = 0;
15379
15380             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15381             {
15382                 CORINFO_CLASS_HANDLE clsHnd;
15383
15384                 if (HBtab->HasFilter())
15385                 {
15386                     clsHnd = impGetObjectClass();
15387                 }
15388                 else
15389                 {
15390                     CORINFO_RESOLVED_TOKEN resolvedToken;
15391
15392                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15393                     resolvedToken.tokenScope   = info.compScopeHnd;
15394                     resolvedToken.token        = HBtab->ebdTyp;
15395                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15396                     info.compCompHnd->resolveToken(&resolvedToken);
15397
15398                     clsHnd = resolvedToken.hClass;
15399                 }
15400
15401                 // push catch arg the stack, spill to a temp if necessary
15402                 // Note: can update HBtab->ebdHndBeg!
15403                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15404             }
15405
15406             // Queue up the handler for importing
15407             //
15408             impImportBlockPending(hndBegBB);
15409
15410             if (HBtab->HasFilter())
15411             {
15412                 /* @VERIFICATION : Ideally the end of filter state should get
15413                    propagated to the catch handler, this is an incompleteness,
15414                    but is not a security/compliance issue, since the only
15415                    interesting state is the 'thisInit' state.
15416                    */
15417
15418                 verCurrentState.esStackDepth = 0;
15419
15420                 BasicBlock* filterBB = HBtab->ebdFilter;
15421
15422                 // push catch arg the stack, spill to a temp if necessary
15423                 // Note: can update HBtab->ebdFilter!
15424                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15425
15426                 impImportBlockPending(filterBB);
15427             }
15428         }
15429         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15430         {
15431             /* Recursively process the handler block */
15432
15433             verCurrentState.esStackDepth = 0;
15434
15435             // Queue up the fault handler for importing
15436             //
15437             impImportBlockPending(HBtab->ebdHndBeg);
15438         }
15439
15440         // Now process our enclosing try index (if any)
15441         //
15442         tryIndex = HBtab->ebdEnclosingTryIndex;
15443         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15444         {
15445             HBtab = nullptr;
15446         }
15447         else
15448         {
15449             HBtab = ehGetDsc(tryIndex);
15450         }
15451     }
15452
15453     // Restore the stack contents
15454     impRestoreStackState(&blockState);
15455 }
15456
15457 //***************************************************************
15458 // Import the instructions for the given basic block.  Perform
15459 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15460 // time, or whose verification pre-state is changed.
15461
15462 #ifdef _PREFAST_
15463 #pragma warning(push)
15464 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15465 #endif
15466 void Compiler::impImportBlock(BasicBlock* block)
15467 {
15468     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15469     // handle them specially. In particular, there is no IL to import for them, but we do need
15470     // to mark them as imported and put their successors on the pending import list.
15471     if (block->bbFlags & BBF_INTERNAL)
15472     {
15473         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15474         block->bbFlags |= BBF_IMPORTED;
15475
15476         for (unsigned i = 0; i < block->NumSucc(); i++)
15477         {
15478             impImportBlockPending(block->GetSucc(i));
15479         }
15480
15481         return;
15482     }
15483
15484     bool markImport;
15485
15486     assert(block);
15487
15488     /* Make the block globaly available */
15489
15490     compCurBB = block;
15491
15492 #ifdef DEBUG
15493     /* Initialize the debug variables */
15494     impCurOpcName = "unknown";
15495     impCurOpcOffs = block->bbCodeOffs;
15496 #endif
15497
15498     /* Set the current stack state to the merged result */
15499     verResetCurrentState(block, &verCurrentState);
15500
15501     /* Now walk the code and import the IL into GenTrees */
15502
15503     struct FilterVerificationExceptionsParam
15504     {
15505         Compiler*   pThis;
15506         BasicBlock* block;
15507     };
15508     FilterVerificationExceptionsParam param;
15509
15510     param.pThis = this;
15511     param.block = block;
15512
15513     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15514     {
15515         /* @VERIFICATION : For now, the only state propagation from try
15516            to it's handler is "thisInit" state (stack is empty at start of try).
15517            In general, for state that we track in verification, we need to
15518            model the possibility that an exception might happen at any IL
15519            instruction, so we really need to merge all states that obtain
15520            between IL instructions in a try block into the start states of
15521            all handlers.
15522
15523            However we do not allow the 'this' pointer to be uninitialized when
15524            entering most kinds try regions (only try/fault are allowed to have
15525            an uninitialized this pointer on entry to the try)
15526
15527            Fortunately, the stack is thrown away when an exception
15528            leads to a handler, so we don't have to worry about that.
15529            We DO, however, have to worry about the "thisInit" state.
15530            But only for the try/fault case.
15531
15532            The only allowed transition is from TIS_Uninit to TIS_Init.
15533
15534            So for a try/fault region for the fault handler block
15535            we will merge the start state of the try begin
15536            and the post-state of each block that is part of this try region
15537         */
15538
15539         // merge the start state of the try begin
15540         //
15541         if (pParam->block->bbFlags & BBF_TRY_BEG)
15542         {
15543             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15544         }
15545
15546         pParam->pThis->impImportBlockCode(pParam->block);
15547
15548         // As discussed above:
15549         // merge the post-state of each block that is part of this try region
15550         //
15551         if (pParam->block->hasTryIndex())
15552         {
15553             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15554         }
15555     }
15556     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15557     {
15558         verHandleVerificationFailure(block DEBUGARG(false));
15559     }
15560     PAL_ENDTRY
15561
15562     if (compDonotInline())
15563     {
15564         return;
15565     }
15566
15567     assert(!compDonotInline());
15568
15569     markImport = false;
15570
15571 SPILLSTACK:
15572
15573     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15574     bool        reimportSpillClique = false;
15575     BasicBlock* tgtBlock            = nullptr;
15576
15577     /* If the stack is non-empty, we might have to spill its contents */
15578
15579     if (verCurrentState.esStackDepth != 0)
15580     {
15581         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15582                                   // on the stack, its lifetime is hard to determine, simply
15583                                   // don't reuse such temps.
15584
15585         GenTreePtr addStmt = nullptr;
15586
15587         /* Do the successors of 'block' have any other predecessors ?
15588            We do not want to do some of the optimizations related to multiRef
15589            if we can reimport blocks */
15590
15591         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15592
15593         switch (block->bbJumpKind)
15594         {
15595             case BBJ_COND:
15596
15597                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15598
15599                 assert(impTreeLast);
15600                 assert(impTreeLast->gtOper == GT_STMT);
15601                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15602
15603                 addStmt     = impTreeLast;
15604                 impTreeLast = impTreeLast->gtPrev;
15605
15606                 /* Note if the next block has more than one ancestor */
15607
15608                 multRef |= block->bbNext->bbRefs;
15609
15610                 /* Does the next block have temps assigned? */
15611
15612                 baseTmp  = block->bbNext->bbStkTempsIn;
15613                 tgtBlock = block->bbNext;
15614
15615                 if (baseTmp != NO_BASE_TMP)
15616                 {
15617                     break;
15618                 }
15619
15620                 /* Try the target of the jump then */
15621
15622                 multRef |= block->bbJumpDest->bbRefs;
15623                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15624                 tgtBlock = block->bbJumpDest;
15625                 break;
15626
15627             case BBJ_ALWAYS:
15628                 multRef |= block->bbJumpDest->bbRefs;
15629                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15630                 tgtBlock = block->bbJumpDest;
15631                 break;
15632
15633             case BBJ_NONE:
15634                 multRef |= block->bbNext->bbRefs;
15635                 baseTmp  = block->bbNext->bbStkTempsIn;
15636                 tgtBlock = block->bbNext;
15637                 break;
15638
15639             case BBJ_SWITCH:
15640
15641                 BasicBlock** jmpTab;
15642                 unsigned     jmpCnt;
15643
15644                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15645
15646                 assert(impTreeLast);
15647                 assert(impTreeLast->gtOper == GT_STMT);
15648                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15649
15650                 addStmt     = impTreeLast;
15651                 impTreeLast = impTreeLast->gtPrev;
15652
15653                 jmpCnt = block->bbJumpSwt->bbsCount;
15654                 jmpTab = block->bbJumpSwt->bbsDstTab;
15655
15656                 do
15657                 {
15658                     tgtBlock = (*jmpTab);
15659
15660                     multRef |= tgtBlock->bbRefs;
15661
15662                     // Thanks to spill cliques, we should have assigned all or none
15663                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15664                     baseTmp = tgtBlock->bbStkTempsIn;
15665                     if (multRef > 1)
15666                     {
15667                         break;
15668                     }
15669                 } while (++jmpTab, --jmpCnt);
15670
15671                 break;
15672
15673             case BBJ_CALLFINALLY:
15674             case BBJ_EHCATCHRET:
15675             case BBJ_RETURN:
15676             case BBJ_EHFINALLYRET:
15677             case BBJ_EHFILTERRET:
15678             case BBJ_THROW:
15679                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15680                 break;
15681
15682             default:
15683                 noway_assert(!"Unexpected bbJumpKind");
15684                 break;
15685         }
15686
15687         assert(multRef >= 1);
15688
15689         /* Do we have a base temp number? */
15690
15691         bool newTemps = (baseTmp == NO_BASE_TMP);
15692
15693         if (newTemps)
15694         {
15695             /* Grab enough temps for the whole stack */
15696             baseTmp = impGetSpillTmpBase(block);
15697         }
15698
15699         /* Spill all stack entries into temps */
15700         unsigned level, tempNum;
15701
15702         JITDUMP("\nSpilling stack entries into temps\n");
15703         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15704         {
15705             GenTreePtr tree = verCurrentState.esStack[level].val;
15706
15707             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15708                the other. This should merge to a byref in unverifiable code.
15709                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15710                successor would be imported assuming there was a TYP_I_IMPL on
15711                the stack. Thus the value would not get GC-tracked. Hence,
15712                change the temp to TYP_BYREF and reimport the successors.
15713                Note: We should only allow this in unverifiable code.
15714             */
15715             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15716             {
15717                 lvaTable[tempNum].lvType = TYP_BYREF;
15718                 impReimportMarkSuccessors(block);
15719                 markImport = true;
15720             }
15721
15722 #ifdef _TARGET_64BIT_
15723             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15724             {
15725                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15726                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15727                 {
15728                     // Merge the current state into the entry state of block;
15729                     // the call to verMergeEntryStates must have changed
15730                     // the entry state of the block by merging the int local var
15731                     // and the native-int stack entry.
15732                     bool changed = false;
15733                     if (verMergeEntryStates(tgtBlock, &changed))
15734                     {
15735                         impRetypeEntryStateTemps(tgtBlock);
15736                         impReimportBlockPending(tgtBlock);
15737                         assert(changed);
15738                     }
15739                     else
15740                     {
15741                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15742                         break;
15743                     }
15744                 }
15745
15746                 // Some other block in the spill clique set this to "int", but now we have "native int".
15747                 // Change the type and go back to re-import any blocks that used the wrong type.
15748                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15749                 reimportSpillClique      = true;
15750             }
15751             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15752             {
15753                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15754                 // Insert a sign-extension to "native int" so we match the clique.
15755                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15756             }
15757
15758             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15759             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15760             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15761             // behavior instead of asserting and then generating bad code (where we save/restore the
15762             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15763             // imported already, we need to change the type of the local and reimport the spill clique.
15764             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15765             // the 'byref' size.
15766             if (!tiVerificationNeeded)
15767             {
15768                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15769                 {
15770                     // Some other block in the spill clique set this to "int", but now we have "byref".
15771                     // Change the type and go back to re-import any blocks that used the wrong type.
15772                     lvaTable[tempNum].lvType = TYP_BYREF;
15773                     reimportSpillClique      = true;
15774                 }
15775                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15776                 {
15777                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15778                     // Insert a sign-extension to "native int" so we match the clique size.
15779                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15780                 }
15781             }
15782 #endif // _TARGET_64BIT_
15783
15784 #if FEATURE_X87_DOUBLES
15785             // X87 stack doesn't differentiate between float/double
15786             // so promoting is no big deal.
15787             // For everybody else keep it as float until we have a collision and then promote
15788             // Just like for x64's TYP_INT<->TYP_I_IMPL
15789
15790             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15791             {
15792                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15793             }
15794
15795 #else // !FEATURE_X87_DOUBLES
15796
15797             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15798             {
15799                 // Some other block in the spill clique set this to "float", but now we have "double".
15800                 // Change the type and go back to re-import any blocks that used the wrong type.
15801                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15802                 reimportSpillClique      = true;
15803             }
15804             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15805             {
15806                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15807                 // Insert a cast to "double" so we match the clique.
15808                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15809             }
15810
15811 #endif // FEATURE_X87_DOUBLES
15812
15813             /* If addStmt has a reference to tempNum (can only happen if we
15814                are spilling to the temps already used by a previous block),
15815                we need to spill addStmt */
15816
15817             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15818             {
15819                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15820
15821                 if (addTree->gtOper == GT_JTRUE)
15822                 {
15823                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15824                     assert(relOp->OperIsCompare());
15825
15826                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15827
15828                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15829                     {
15830                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15831                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15832                         type              = genActualType(lvaTable[temp].TypeGet());
15833                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15834                     }
15835
15836                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15837                     {
15838                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15839                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15840                         type              = genActualType(lvaTable[temp].TypeGet());
15841                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15842                     }
15843                 }
15844                 else
15845                 {
15846                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15847
15848                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15849                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15850                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15851                 }
15852             }
15853
15854             /* Spill the stack entry, and replace with the temp */
15855
15856             if (!impSpillStackEntry(level, tempNum
15857 #ifdef DEBUG
15858                                     ,
15859                                     true, "Spill Stack Entry"
15860 #endif
15861                                     ))
15862             {
15863                 if (markImport)
15864                 {
15865                     BADCODE("bad stack state");
15866                 }
15867
15868                 // Oops. Something went wrong when spilling. Bad code.
15869                 verHandleVerificationFailure(block DEBUGARG(true));
15870
15871                 goto SPILLSTACK;
15872             }
15873         }
15874
15875         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15876
15877         if (addStmt)
15878         {
15879             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15880         }
15881     }
15882
15883     // Some of the append/spill logic works on compCurBB
15884
15885     assert(compCurBB == block);
15886
15887     /* Save the tree list in the block */
15888     impEndTreeList(block);
15889
15890     // impEndTreeList sets BBF_IMPORTED on the block
15891     // We do *NOT* want to set it later than this because
15892     // impReimportSpillClique might clear it if this block is both a
15893     // predecessor and successor in the current spill clique
15894     assert(block->bbFlags & BBF_IMPORTED);
15895
15896     // If we had a int/native int, or float/double collision, we need to re-import
15897     if (reimportSpillClique)
15898     {
15899         // This will re-import all the successors of block (as well as each of their predecessors)
15900         impReimportSpillClique(block);
15901
15902         // For blocks that haven't been imported yet, we still need to mark them as pending import.
15903         for (unsigned i = 0; i < block->NumSucc(); i++)
15904         {
15905             BasicBlock* succ = block->GetSucc(i);
15906             if ((succ->bbFlags & BBF_IMPORTED) == 0)
15907             {
15908                 impImportBlockPending(succ);
15909             }
15910         }
15911     }
15912     else // the normal case
15913     {
15914         // otherwise just import the successors of block
15915
15916         /* Does this block jump to any other blocks? */
15917         for (unsigned i = 0; i < block->NumSucc(); i++)
15918         {
15919             impImportBlockPending(block->GetSucc(i));
15920         }
15921     }
15922 }
15923 #ifdef _PREFAST_
15924 #pragma warning(pop)
15925 #endif
15926
15927 /*****************************************************************************/
15928 //
15929 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15930 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15931 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
15932 // (its "pre-state").
15933
15934 void Compiler::impImportBlockPending(BasicBlock* block)
15935 {
15936 #ifdef DEBUG
15937     if (verbose)
15938     {
15939         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15940     }
15941 #endif
15942
15943     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15944     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15945     // (When we're doing verification, we always attempt the merge to detect verification errors.)
15946
15947     // If the block has not been imported, add to pending set.
15948     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15949
15950     // Initialize bbEntryState just the first time we try to add this block to the pending list
15951     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
15952     // We use NULL to indicate the 'common' state to avoid memory allocation
15953     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
15954         (impGetPendingBlockMember(block) == 0))
15955     {
15956         verInitBBEntryState(block, &verCurrentState);
15957         assert(block->bbStkDepth == 0);
15958         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
15959         assert(addToPending);
15960         assert(impGetPendingBlockMember(block) == 0);
15961     }
15962     else
15963     {
15964         // The stack should have the same height on entry to the block from all its predecessors.
15965         if (block->bbStkDepth != verCurrentState.esStackDepth)
15966         {
15967 #ifdef DEBUG
15968             char buffer[400];
15969             sprintf_s(buffer, sizeof(buffer),
15970                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
15971                       "Previous depth was %d, current depth is %d",
15972                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
15973                       verCurrentState.esStackDepth);
15974             buffer[400 - 1] = 0;
15975             NO_WAY(buffer);
15976 #else
15977             NO_WAY("Block entered with different stack depths");
15978 #endif
15979         }
15980
15981         // Additionally, if we need to verify, merge the verification state.
15982         if (tiVerificationNeeded)
15983         {
15984             // Merge the current state into the entry state of block; if this does not change the entry state
15985             // by merging, do not add the block to the pending-list.
15986             bool changed = false;
15987             if (!verMergeEntryStates(block, &changed))
15988             {
15989                 block->bbFlags |= BBF_FAILED_VERIFICATION;
15990                 addToPending = true; // We will pop it off, and check the flag set above.
15991             }
15992             else if (changed)
15993             {
15994                 addToPending = true;
15995
15996                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
15997             }
15998         }
15999
16000         if (!addToPending)
16001         {
16002             return;
16003         }
16004
16005         if (block->bbStkDepth > 0)
16006         {
16007             // We need to fix the types of any spill temps that might have changed:
16008             //   int->native int, float->double, int->byref, etc.
16009             impRetypeEntryStateTemps(block);
16010         }
16011
16012         // OK, we must add to the pending list, if it's not already in it.
16013         if (impGetPendingBlockMember(block) != 0)
16014         {
16015             return;
16016         }
16017     }
16018
16019     // Get an entry to add to the pending list
16020
16021     PendingDsc* dsc;
16022
16023     if (impPendingFree)
16024     {
16025         // We can reuse one of the freed up dscs.
16026         dsc            = impPendingFree;
16027         impPendingFree = dsc->pdNext;
16028     }
16029     else
16030     {
16031         // We have to create a new dsc
16032         dsc = new (this, CMK_Unknown) PendingDsc;
16033     }
16034
16035     dsc->pdBB                 = block;
16036     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16037     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16038
16039     // Save the stack trees for later
16040
16041     if (verCurrentState.esStackDepth)
16042     {
16043         impSaveStackState(&dsc->pdSavedStack, false);
16044     }
16045
16046     // Add the entry to the pending list
16047
16048     dsc->pdNext    = impPendingList;
16049     impPendingList = dsc;
16050     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16051
16052     // Various assertions require us to now to consider the block as not imported (at least for
16053     // the final time...)
16054     block->bbFlags &= ~BBF_IMPORTED;
16055
16056 #ifdef DEBUG
16057     if (verbose && 0)
16058     {
16059         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16060     }
16061 #endif
16062 }
16063
16064 /*****************************************************************************/
16065 //
16066 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16067 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16068 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16069
16070 void Compiler::impReimportBlockPending(BasicBlock* block)
16071 {
16072     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16073
16074     assert(block->bbFlags & BBF_IMPORTED);
16075
16076     // OK, we must add to the pending list, if it's not already in it.
16077     if (impGetPendingBlockMember(block) != 0)
16078     {
16079         return;
16080     }
16081
16082     // Get an entry to add to the pending list
16083
16084     PendingDsc* dsc;
16085
16086     if (impPendingFree)
16087     {
16088         // We can reuse one of the freed up dscs.
16089         dsc            = impPendingFree;
16090         impPendingFree = dsc->pdNext;
16091     }
16092     else
16093     {
16094         // We have to create a new dsc
16095         dsc = new (this, CMK_ImpStack) PendingDsc;
16096     }
16097
16098     dsc->pdBB = block;
16099
16100     if (block->bbEntryState)
16101     {
16102         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16103         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16104         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16105     }
16106     else
16107     {
16108         dsc->pdThisPtrInit        = TIS_Bottom;
16109         dsc->pdSavedStack.ssDepth = 0;
16110         dsc->pdSavedStack.ssTrees = nullptr;
16111     }
16112
16113     // Add the entry to the pending list
16114
16115     dsc->pdNext    = impPendingList;
16116     impPendingList = dsc;
16117     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16118
16119     // Various assertions require us to now to consider the block as not imported (at least for
16120     // the final time...)
16121     block->bbFlags &= ~BBF_IMPORTED;
16122
16123 #ifdef DEBUG
16124     if (verbose && 0)
16125     {
16126         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16127     }
16128 #endif
16129 }
16130
16131 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16132 {
16133     if (comp->impBlockListNodeFreeList == nullptr)
16134     {
16135         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16136     }
16137     else
16138     {
16139         BlockListNode* res             = comp->impBlockListNodeFreeList;
16140         comp->impBlockListNodeFreeList = res->m_next;
16141         return res;
16142     }
16143 }
16144
16145 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16146 {
16147     node->m_next             = impBlockListNodeFreeList;
16148     impBlockListNodeFreeList = node;
16149 }
16150
16151 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16152 {
16153     bool toDo = true;
16154
16155     noway_assert(!fgComputePredsDone);
16156     if (!fgCheapPredsValid)
16157     {
16158         fgComputeCheapPreds();
16159     }
16160
16161     BlockListNode* succCliqueToDo = nullptr;
16162     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16163     while (toDo)
16164     {
16165         toDo = false;
16166         // Look at the successors of every member of the predecessor to-do list.
16167         while (predCliqueToDo != nullptr)
16168         {
16169             BlockListNode* node = predCliqueToDo;
16170             predCliqueToDo      = node->m_next;
16171             BasicBlock* blk     = node->m_blk;
16172             FreeBlockListNode(node);
16173
16174             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16175             {
16176                 BasicBlock* succ = blk->GetSucc(succNum);
16177                 // If it's not already in the clique, add it, and also add it
16178                 // as a member of the successor "toDo" set.
16179                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16180                 {
16181                     callback->Visit(SpillCliqueSucc, succ);
16182                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16183                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16184                     toDo           = true;
16185                 }
16186             }
16187         }
16188         // Look at the predecessors of every member of the successor to-do list.
16189         while (succCliqueToDo != nullptr)
16190         {
16191             BlockListNode* node = succCliqueToDo;
16192             succCliqueToDo      = node->m_next;
16193             BasicBlock* blk     = node->m_blk;
16194             FreeBlockListNode(node);
16195
16196             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16197             {
16198                 BasicBlock* predBlock = pred->block;
16199                 // If it's not already in the clique, add it, and also add it
16200                 // as a member of the predecessor "toDo" set.
16201                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16202                 {
16203                     callback->Visit(SpillCliquePred, predBlock);
16204                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16205                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16206                     toDo           = true;
16207                 }
16208             }
16209         }
16210     }
16211
16212     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16213     // miss walking back to include the predecessor we started from.
16214     // This most likely cause: missing or out of date bbPreds
16215     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16216 }
16217
16218 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16219 {
16220     if (predOrSucc == SpillCliqueSucc)
16221     {
16222         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16223         blk->bbStkTempsIn = m_baseTmp;
16224     }
16225     else
16226     {
16227         assert(predOrSucc == SpillCliquePred);
16228         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16229         blk->bbStkTempsOut = m_baseTmp;
16230     }
16231 }
16232
16233 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16234 {
16235     // For Preds we could be a little smarter and just find the existing store
16236     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16237     // just re-import the whole block (just like we do for successors)
16238
16239     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16240     {
16241         // If we haven't imported this block and we're not going to (because it isn't on
16242         // the pending list) then just ignore it for now.
16243
16244         // This block has either never been imported (EntryState == NULL) or it failed
16245         // verification. Neither state requires us to force it to be imported now.
16246         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16247         return;
16248     }
16249
16250     // For successors we have a valid verCurrentState, so just mark them for reimport
16251     // the 'normal' way
16252     // Unlike predecessors, we *DO* need to reimport the current block because the
16253     // initial import had the wrong entry state types.
16254     // Similarly, blocks that are currently on the pending list, still need to call
16255     // impImportBlockPending to fixup their entry state.
16256     if (predOrSucc == SpillCliqueSucc)
16257     {
16258         m_pComp->impReimportMarkBlock(blk);
16259
16260         // Set the current stack state to that of the blk->bbEntryState
16261         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16262         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16263
16264         m_pComp->impImportBlockPending(blk);
16265     }
16266     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16267     {
16268         // As described above, we are only visiting predecessors so they can
16269         // add the appropriate casts, since we have already done that for the current
16270         // block, it does not need to be reimported.
16271         // Nor do we need to reimport blocks that are still pending, but not yet
16272         // imported.
16273         //
16274         // For predecessors, we have no state to seed the EntryState, so we just have
16275         // to assume the existing one is correct.
16276         // If the block is also a successor, it will get the EntryState properly
16277         // updated when it is visited as a successor in the above "if" block.
16278         assert(predOrSucc == SpillCliquePred);
16279         m_pComp->impReimportBlockPending(blk);
16280     }
16281 }
16282
16283 // Re-type the incoming lclVar nodes to match the varDsc.
16284 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16285 {
16286     if (blk->bbEntryState != nullptr)
16287     {
16288         EntryState* es = blk->bbEntryState;
16289         for (unsigned level = 0; level < es->esStackDepth; level++)
16290         {
16291             GenTreePtr tree = es->esStack[level].val;
16292             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16293             {
16294                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16295                 noway_assert(lclNum < lvaCount);
16296                 LclVarDsc* varDsc              = lvaTable + lclNum;
16297                 es->esStack[level].val->gtType = varDsc->TypeGet();
16298             }
16299         }
16300     }
16301 }
16302
16303 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16304 {
16305     if (block->bbStkTempsOut != NO_BASE_TMP)
16306     {
16307         return block->bbStkTempsOut;
16308     }
16309
16310 #ifdef DEBUG
16311     if (verbose)
16312     {
16313         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16314     }
16315 #endif // DEBUG
16316
16317     // Otherwise, choose one, and propagate to all members of the spill clique.
16318     // Grab enough temps for the whole stack.
16319     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16320     SetSpillTempsBase callback(baseTmp);
16321
16322     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16323     // to one spill clique, and similarly can only be the sucessor to one spill clique
16324     impWalkSpillCliqueFromPred(block, &callback);
16325
16326     return baseTmp;
16327 }
16328
16329 void Compiler::impReimportSpillClique(BasicBlock* block)
16330 {
16331 #ifdef DEBUG
16332     if (verbose)
16333     {
16334         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16335     }
16336 #endif // DEBUG
16337
16338     // If we get here, it is because this block is already part of a spill clique
16339     // and one predecessor had an outgoing live stack slot of type int, and this
16340     // block has an outgoing live stack slot of type native int.
16341     // We need to reset these before traversal because they have already been set
16342     // by the previous walk to determine all the members of the spill clique.
16343     impInlineRoot()->impSpillCliquePredMembers.Reset();
16344     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16345
16346     ReimportSpillClique callback(this);
16347
16348     impWalkSpillCliqueFromPred(block, &callback);
16349 }
16350
16351 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16352 // a copy of "srcState", cloning tree pointers as required.
16353 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16354 {
16355     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16356     {
16357         block->bbEntryState = nullptr;
16358         return;
16359     }
16360
16361     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16362
16363     // block->bbEntryState.esRefcount = 1;
16364
16365     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16366     block->bbEntryState->thisInitialized = TIS_Bottom;
16367
16368     if (srcState->esStackDepth > 0)
16369     {
16370         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16371         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16372
16373         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16374         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16375         {
16376             GenTreePtr tree                         = srcState->esStack[level].val;
16377             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16378         }
16379     }
16380
16381     if (verTrackObjCtorInitState)
16382     {
16383         verSetThisInit(block, srcState->thisInitialized);
16384     }
16385
16386     return;
16387 }
16388
16389 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16390 {
16391     assert(tis != TIS_Bottom); // Precondition.
16392     if (block->bbEntryState == nullptr)
16393     {
16394         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16395     }
16396
16397     block->bbEntryState->thisInitialized = tis;
16398 }
16399
16400 /*
16401  * Resets the current state to the state at the start of the basic block
16402  */
16403 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16404 {
16405
16406     if (block->bbEntryState == nullptr)
16407     {
16408         destState->esStackDepth    = 0;
16409         destState->thisInitialized = TIS_Bottom;
16410         return;
16411     }
16412
16413     destState->esStackDepth = block->bbEntryState->esStackDepth;
16414
16415     if (destState->esStackDepth > 0)
16416     {
16417         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16418
16419         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16420     }
16421
16422     destState->thisInitialized = block->bbThisOnEntry();
16423
16424     return;
16425 }
16426
16427 ThisInitState BasicBlock::bbThisOnEntry()
16428 {
16429     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16430 }
16431
16432 unsigned BasicBlock::bbStackDepthOnEntry()
16433 {
16434     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16435 }
16436
16437 void BasicBlock::bbSetStack(void* stackBuffer)
16438 {
16439     assert(bbEntryState);
16440     assert(stackBuffer);
16441     bbEntryState->esStack = (StackEntry*)stackBuffer;
16442 }
16443
16444 StackEntry* BasicBlock::bbStackOnEntry()
16445 {
16446     assert(bbEntryState);
16447     return bbEntryState->esStack;
16448 }
16449
16450 void Compiler::verInitCurrentState()
16451 {
16452     verTrackObjCtorInitState        = FALSE;
16453     verCurrentState.thisInitialized = TIS_Bottom;
16454
16455     if (tiVerificationNeeded)
16456     {
16457         // Track this ptr initialization
16458         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16459         {
16460             verTrackObjCtorInitState        = TRUE;
16461             verCurrentState.thisInitialized = TIS_Uninit;
16462         }
16463     }
16464
16465     // initialize stack info
16466
16467     verCurrentState.esStackDepth = 0;
16468     assert(verCurrentState.esStack != nullptr);
16469
16470     // copy current state to entry state of first BB
16471     verInitBBEntryState(fgFirstBB, &verCurrentState);
16472 }
16473
16474 Compiler* Compiler::impInlineRoot()
16475 {
16476     if (impInlineInfo == nullptr)
16477     {
16478         return this;
16479     }
16480     else
16481     {
16482         return impInlineInfo->InlineRoot;
16483     }
16484 }
16485
16486 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16487 {
16488     if (predOrSucc == SpillCliquePred)
16489     {
16490         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16491     }
16492     else
16493     {
16494         assert(predOrSucc == SpillCliqueSucc);
16495         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16496     }
16497 }
16498
16499 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16500 {
16501     if (predOrSucc == SpillCliquePred)
16502     {
16503         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16504     }
16505     else
16506     {
16507         assert(predOrSucc == SpillCliqueSucc);
16508         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16509     }
16510 }
16511
16512 /*****************************************************************************
16513  *
16514  *  Convert the instrs ("import") into our internal format (trees). The
16515  *  basic flowgraph has already been constructed and is passed in.
16516  */
16517
16518 void Compiler::impImport(BasicBlock* method)
16519 {
16520 #ifdef DEBUG
16521     if (verbose)
16522     {
16523         printf("*************** In impImport() for %s\n", info.compFullName);
16524     }
16525 #endif
16526
16527     /* Allocate the stack contents */
16528
16529     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16530     {
16531         /* Use local variable, don't waste time allocating on the heap */
16532
16533         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16534         verCurrentState.esStack = impSmallStack;
16535     }
16536     else
16537     {
16538         impStkSize              = info.compMaxStack;
16539         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16540     }
16541
16542     // initialize the entry state at start of method
16543     verInitCurrentState();
16544
16545     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16546     Compiler* inlineRoot = impInlineRoot();
16547     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16548     {
16549         // We have initialized these previously, but to size 0.  Make them larger.
16550         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16551         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16552         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16553     }
16554     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16555     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16556     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16557     impBlockListNodeFreeList = nullptr;
16558
16559 #ifdef DEBUG
16560     impLastILoffsStmt   = nullptr;
16561     impNestedStackSpill = false;
16562 #endif
16563     impBoxTemp = BAD_VAR_NUM;
16564
16565     impPendingList = impPendingFree = nullptr;
16566
16567     /* Add the entry-point to the worker-list */
16568
16569     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16570     // from EH normalization.
16571     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16572     // out.
16573     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16574     {
16575         // Treat these as imported.
16576         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16577         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16578         method->bbFlags |= BBF_IMPORTED;
16579     }
16580
16581     impImportBlockPending(method);
16582
16583     /* Import blocks in the worker-list until there are no more */
16584
16585     while (impPendingList)
16586     {
16587         /* Remove the entry at the front of the list */
16588
16589         PendingDsc* dsc = impPendingList;
16590         impPendingList  = impPendingList->pdNext;
16591         impSetPendingBlockMember(dsc->pdBB, 0);
16592
16593         /* Restore the stack state */
16594
16595         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16596         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16597         if (verCurrentState.esStackDepth)
16598         {
16599             impRestoreStackState(&dsc->pdSavedStack);
16600         }
16601
16602         /* Add the entry to the free list for reuse */
16603
16604         dsc->pdNext    = impPendingFree;
16605         impPendingFree = dsc;
16606
16607         /* Now import the block */
16608
16609         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16610         {
16611
16612 #ifdef _TARGET_64BIT_
16613             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16614             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16615             // method for further explanation on why we raise this exception instead of making the jitted
16616             // code throw the verification exception during execution.
16617             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16618             {
16619                 BADCODE("Basic block marked as not verifiable");
16620             }
16621             else
16622 #endif // _TARGET_64BIT_
16623             {
16624                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16625                 impEndTreeList(dsc->pdBB);
16626             }
16627         }
16628         else
16629         {
16630             impImportBlock(dsc->pdBB);
16631
16632             if (compDonotInline())
16633             {
16634                 return;
16635             }
16636             if (compIsForImportOnly() && !tiVerificationNeeded)
16637             {
16638                 return;
16639             }
16640         }
16641     }
16642
16643 #ifdef DEBUG
16644     if (verbose && info.compXcptnsCount)
16645     {
16646         printf("\nAfter impImport() added block for try,catch,finally");
16647         fgDispBasicBlocks();
16648         printf("\n");
16649     }
16650
16651     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16652     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16653     {
16654         block->bbFlags &= ~BBF_VISITED;
16655     }
16656 #endif
16657
16658     assert(!compIsForInlining() || !tiVerificationNeeded);
16659 }
16660
16661 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16662 // The invariant here is that if it's not a ref or a method and has a class handle
16663 // it's a valuetype
16664 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16665 {
16666     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16667     {
16668         return true;
16669     }
16670     else
16671     {
16672         return false;
16673     }
16674 }
16675
16676 /*****************************************************************************
16677  *  Check to see if the tree is the address of a local or
16678     the address of a field in a local.
16679
16680     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16681
16682  */
16683
16684 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16685 {
16686     if (tree->gtOper != GT_ADDR)
16687     {
16688         return FALSE;
16689     }
16690
16691     GenTreePtr op = tree->gtOp.gtOp1;
16692     while (op->gtOper == GT_FIELD)
16693     {
16694         op = op->gtField.gtFldObj;
16695         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16696         {
16697             op = op->gtOp.gtOp1;
16698         }
16699         else
16700         {
16701             return false;
16702         }
16703     }
16704
16705     if (op->gtOper == GT_LCL_VAR)
16706     {
16707         *lclVarTreeOut = op;
16708         return TRUE;
16709     }
16710     else
16711     {
16712         return FALSE;
16713     }
16714 }
16715
16716 //------------------------------------------------------------------------
16717 // impMakeDiscretionaryInlineObservations: make observations that help
16718 // determine the profitability of a discretionary inline
16719 //
16720 // Arguments:
16721 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16722 //    inlineResult -- InlineResult accumulating information about this inline
16723 //
16724 // Notes:
16725 //    If inlining or prejitting the root, this method also makes
16726 //    various observations about the method that factor into inline
16727 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16728
16729 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16730 {
16731     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16732            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16733            );
16734
16735     // If we're really inlining, we should just have one result in play.
16736     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16737
16738     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16739     // to the trouble of estimating the native code size. Even if it did, it
16740     // shouldn't be relying on the result of this method.
16741     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16742
16743     // Note if the caller contains NEWOBJ or NEWARR.
16744     Compiler* rootCompiler = impInlineRoot();
16745
16746     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16747     {
16748         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16749     }
16750
16751     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16752     {
16753         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16754     }
16755
16756     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16757     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16758
16759     if (isSpecialMethod)
16760     {
16761         if (calleeIsStatic)
16762         {
16763             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16764         }
16765         else
16766         {
16767             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16768         }
16769     }
16770     else if (!calleeIsStatic)
16771     {
16772         // Callee is an instance method.
16773         //
16774         // Check if the callee has the same 'this' as the root.
16775         if (pInlineInfo != nullptr)
16776         {
16777             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16778             assert(thisArg);
16779             bool isSameThis = impIsThis(thisArg);
16780             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16781         }
16782     }
16783
16784     // Note if the callee's class is a promotable struct
16785     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16786     {
16787         lvaStructPromotionInfo structPromotionInfo;
16788         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16789         if (structPromotionInfo.canPromote)
16790         {
16791             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16792         }
16793     }
16794
16795 #ifdef FEATURE_SIMD
16796
16797     // Note if this method is has SIMD args or return value
16798     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16799     {
16800         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16801     }
16802
16803 #endif // FEATURE_SIMD
16804
16805     // Roughly classify callsite frequency.
16806     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16807
16808     // If this is a prejit root, or a maximally hot block...
16809     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16810     {
16811         frequency = InlineCallsiteFrequency::HOT;
16812     }
16813     // No training data.  Look for loop-like things.
16814     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16815     // However, give it to things nearby.
16816     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16817              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16818     {
16819         frequency = InlineCallsiteFrequency::LOOP;
16820     }
16821     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16822     {
16823         frequency = InlineCallsiteFrequency::WARM;
16824     }
16825     // Now modify the multiplier based on where we're called from.
16826     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16827     {
16828         frequency = InlineCallsiteFrequency::RARE;
16829     }
16830     else
16831     {
16832         frequency = InlineCallsiteFrequency::BORING;
16833     }
16834
16835     // Also capture the block weight of the call site.  In the prejit
16836     // root case, assume there's some hot call site for this method.
16837     unsigned weight = 0;
16838
16839     if (pInlineInfo != nullptr)
16840     {
16841         weight = pInlineInfo->iciBlock->bbWeight;
16842     }
16843     else
16844     {
16845         weight = BB_MAX_WEIGHT;
16846     }
16847
16848     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16849     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16850 }
16851
16852 /*****************************************************************************
16853  This method makes STATIC inlining decision based on the IL code.
16854  It should not make any inlining decision based on the context.
16855  If forceInline is true, then the inlining decision should not depend on
16856  performance heuristics (code size, etc.).
16857  */
16858
16859 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16860                               CORINFO_METHOD_INFO*  methInfo,
16861                               bool                  forceInline,
16862                               InlineResult*         inlineResult)
16863 {
16864     unsigned codeSize = methInfo->ILCodeSize;
16865
16866     // We shouldn't have made up our minds yet...
16867     assert(!inlineResult->IsDecided());
16868
16869     if (methInfo->EHcount)
16870     {
16871         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16872         return;
16873     }
16874
16875     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16876     {
16877         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16878         return;
16879     }
16880
16881     // For now we don't inline varargs (import code can't handle it)
16882
16883     if (methInfo->args.isVarArg())
16884     {
16885         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16886         return;
16887     }
16888
16889     // Reject if it has too many locals.
16890     // This is currently an implementation limit due to fixed-size arrays in the
16891     // inline info, rather than a performance heuristic.
16892
16893     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16894
16895     if (methInfo->locals.numArgs > MAX_INL_LCLS)
16896     {
16897         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16898         return;
16899     }
16900
16901     // Make sure there aren't too many arguments.
16902     // This is currently an implementation limit due to fixed-size arrays in the
16903     // inline info, rather than a performance heuristic.
16904
16905     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16906
16907     if (methInfo->args.numArgs > MAX_INL_ARGS)
16908     {
16909         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16910         return;
16911     }
16912
16913     // Note force inline state
16914
16915     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16916
16917     // Note IL code size
16918
16919     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16920
16921     if (inlineResult->IsFailure())
16922     {
16923         return;
16924     }
16925
16926     // Make sure maxstack is not too big
16927
16928     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16929
16930     if (inlineResult->IsFailure())
16931     {
16932         return;
16933     }
16934 }
16935
16936 /*****************************************************************************
16937  */
16938
16939 void Compiler::impCheckCanInline(GenTreePtr             call,
16940                                  CORINFO_METHOD_HANDLE  fncHandle,
16941                                  unsigned               methAttr,
16942                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
16943                                  InlineCandidateInfo**  ppInlineCandidateInfo,
16944                                  InlineResult*          inlineResult)
16945 {
16946     // Either EE or JIT might throw exceptions below.
16947     // If that happens, just don't inline the method.
16948
16949     struct Param
16950     {
16951         Compiler*              pThis;
16952         GenTreePtr             call;
16953         CORINFO_METHOD_HANDLE  fncHandle;
16954         unsigned               methAttr;
16955         CORINFO_CONTEXT_HANDLE exactContextHnd;
16956         InlineResult*          result;
16957         InlineCandidateInfo**  ppInlineCandidateInfo;
16958     } param = {nullptr};
16959
16960     param.pThis                 = this;
16961     param.call                  = call;
16962     param.fncHandle             = fncHandle;
16963     param.methAttr              = methAttr;
16964     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
16965     param.result                = inlineResult;
16966     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
16967
16968     bool success = eeRunWithErrorTrap<Param>(
16969         [](Param* pParam) {
16970             DWORD                  dwRestrictions = 0;
16971             CorInfoInitClassResult initClassResult;
16972
16973 #ifdef DEBUG
16974             const char* methodName;
16975             const char* className;
16976             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
16977
16978             if (JitConfig.JitNoInline())
16979             {
16980                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
16981                 goto _exit;
16982             }
16983 #endif
16984
16985             /* Try to get the code address/size for the method */
16986
16987             CORINFO_METHOD_INFO methInfo;
16988             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
16989             {
16990                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
16991                 goto _exit;
16992             }
16993
16994             bool forceInline;
16995             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
16996
16997             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
16998
16999             if (pParam->result->IsFailure())
17000             {
17001                 assert(pParam->result->IsNever());
17002                 goto _exit;
17003             }
17004
17005             // Speculatively check if initClass() can be done.
17006             // If it can be done, we will try to inline the method. If inlining
17007             // succeeds, then we will do the non-speculative initClass() and commit it.
17008             // If this speculative call to initClass() fails, there is no point
17009             // trying to inline this method.
17010             initClassResult =
17011                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17012                                                            pParam->exactContextHnd /* context */,
17013                                                            TRUE /* speculative */);
17014
17015             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17016             {
17017                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17018                 goto _exit;
17019             }
17020
17021             // Given the EE the final say in whether to inline or not.
17022             // This should be last since for verifiable code, this can be expensive
17023
17024             /* VM Inline check also ensures that the method is verifiable if needed */
17025             CorInfoInline vmResult;
17026             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17027                                                                   &dwRestrictions);
17028
17029             if (vmResult == INLINE_FAIL)
17030             {
17031                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17032             }
17033             else if (vmResult == INLINE_NEVER)
17034             {
17035                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17036             }
17037
17038             if (pParam->result->IsFailure())
17039             {
17040                 // Make sure not to report this one.  It was already reported by the VM.
17041                 pParam->result->SetReported();
17042                 goto _exit;
17043             }
17044
17045             // check for unsupported inlining restrictions
17046             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17047
17048             if (dwRestrictions & INLINE_SAME_THIS)
17049             {
17050                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17051                 assert(thisArg);
17052
17053                 if (!pParam->pThis->impIsThis(thisArg))
17054                 {
17055                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17056                     goto _exit;
17057                 }
17058             }
17059
17060             /* Get the method properties */
17061
17062             CORINFO_CLASS_HANDLE clsHandle;
17063             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17064             unsigned clsAttr;
17065             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17066
17067             /* Get the return type */
17068
17069             var_types fncRetType;
17070             fncRetType = pParam->call->TypeGet();
17071
17072 #ifdef DEBUG
17073             var_types fncRealRetType;
17074             fncRealRetType = JITtype2varType(methInfo.args.retType);
17075
17076             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17077                    // <BUGNUM> VSW 288602 </BUGNUM>
17078                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17079                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17080                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17081 #endif
17082
17083             //
17084             // Allocate an InlineCandidateInfo structure
17085             //
17086             InlineCandidateInfo* pInfo;
17087             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17088
17089             pInfo->dwRestrictions  = dwRestrictions;
17090             pInfo->methInfo        = methInfo;
17091             pInfo->methAttr        = pParam->methAttr;
17092             pInfo->clsHandle       = clsHandle;
17093             pInfo->clsAttr         = clsAttr;
17094             pInfo->fncRetType      = fncRetType;
17095             pInfo->exactContextHnd = pParam->exactContextHnd;
17096             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17097             pInfo->initClassResult = initClassResult;
17098
17099             *(pParam->ppInlineCandidateInfo) = pInfo;
17100
17101         _exit:;
17102         },
17103         &param);
17104     if (!success)
17105     {
17106         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17107     }
17108 }
17109
17110 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17111                                       GenTreePtr    curArgVal,
17112                                       unsigned      argNum,
17113                                       InlineResult* inlineResult)
17114 {
17115     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17116
17117     if (curArgVal->gtOper == GT_MKREFANY)
17118     {
17119         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17120         return;
17121     }
17122
17123     inlCurArgInfo->argNode = curArgVal;
17124
17125     GenTreePtr lclVarTree;
17126     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17127     {
17128         inlCurArgInfo->argIsByRefToStructLocal = true;
17129 #ifdef FEATURE_SIMD
17130         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17131         {
17132             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17133         }
17134 #endif // FEATURE_SIMD
17135     }
17136
17137     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17138     {
17139         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17140         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17141     }
17142
17143     if (curArgVal->gtOper == GT_LCL_VAR)
17144     {
17145         inlCurArgInfo->argIsLclVar = true;
17146
17147         /* Remember the "original" argument number */
17148         curArgVal->gtLclVar.gtLclILoffs = argNum;
17149     }
17150
17151     if ((curArgVal->OperKind() & GTK_CONST) ||
17152         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17153     {
17154         inlCurArgInfo->argIsInvariant = true;
17155         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17156         {
17157             /* Abort, but do not mark as not inlinable */
17158             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17159             return;
17160         }
17161     }
17162
17163     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17164     {
17165         inlCurArgInfo->argHasLdargaOp = true;
17166     }
17167
17168 #ifdef DEBUG
17169     if (verbose)
17170     {
17171         if (inlCurArgInfo->argIsThis)
17172         {
17173             printf("thisArg:");
17174         }
17175         else
17176         {
17177             printf("\nArgument #%u:", argNum);
17178         }
17179         if (inlCurArgInfo->argIsLclVar)
17180         {
17181             printf(" is a local var");
17182         }
17183         if (inlCurArgInfo->argIsInvariant)
17184         {
17185             printf(" is a constant");
17186         }
17187         if (inlCurArgInfo->argHasGlobRef)
17188         {
17189             printf(" has global refs");
17190         }
17191         if (inlCurArgInfo->argHasSideEff)
17192         {
17193             printf(" has side effects");
17194         }
17195         if (inlCurArgInfo->argHasLdargaOp)
17196         {
17197             printf(" has ldarga effect");
17198         }
17199         if (inlCurArgInfo->argHasStargOp)
17200         {
17201             printf(" has starg effect");
17202         }
17203         if (inlCurArgInfo->argIsByRefToStructLocal)
17204         {
17205             printf(" is byref to a struct local");
17206         }
17207
17208         printf("\n");
17209         gtDispTree(curArgVal);
17210         printf("\n");
17211     }
17212 #endif
17213 }
17214
17215 /*****************************************************************************
17216  *
17217  */
17218
17219 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17220 {
17221     assert(!compIsForInlining());
17222
17223     GenTreePtr           call         = pInlineInfo->iciCall;
17224     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17225     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17226     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17227     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17228     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17229
17230     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17231
17232     /* init the argument stuct */
17233
17234     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17235
17236     /* Get hold of the 'this' pointer and the argument list proper */
17237
17238     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17239     GenTreePtr argList = call->gtCall.gtCallArgs;
17240     unsigned   argCnt  = 0; // Count of the arguments
17241
17242     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17243
17244     if (thisArg)
17245     {
17246         inlArgInfo[0].argIsThis = true;
17247
17248         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17249
17250         if (inlineResult->IsFailure())
17251         {
17252             return;
17253         }
17254
17255         /* Increment the argument count */
17256         argCnt++;
17257     }
17258
17259     /* Record some information about each of the arguments */
17260     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17261
17262 #if USER_ARGS_COME_LAST
17263     unsigned typeCtxtArg = thisArg ? 1 : 0;
17264 #else  // USER_ARGS_COME_LAST
17265     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17266 #endif // USER_ARGS_COME_LAST
17267
17268     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17269     {
17270         if (argTmp == argList && hasRetBuffArg)
17271         {
17272             continue;
17273         }
17274
17275         // Ignore the type context argument
17276         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17277         {
17278             typeCtxtArg = 0xFFFFFFFF;
17279             continue;
17280         }
17281
17282         assert(argTmp->gtOper == GT_LIST);
17283         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17284
17285         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17286
17287         if (inlineResult->IsFailure())
17288         {
17289             return;
17290         }
17291
17292         /* Increment the argument count */
17293         argCnt++;
17294     }
17295
17296     /* Make sure we got the arg number right */
17297     assert(argCnt == methInfo->args.totalILArgs());
17298
17299 #ifdef FEATURE_SIMD
17300     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17301 #endif // FEATURE_SIMD
17302
17303     /* We have typeless opcodes, get type information from the signature */
17304
17305     if (thisArg)
17306     {
17307         var_types sigType;
17308
17309         if (clsAttr & CORINFO_FLG_VALUECLASS)
17310         {
17311             sigType = TYP_BYREF;
17312         }
17313         else
17314         {
17315             sigType = TYP_REF;
17316         }
17317
17318         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17319         lclVarInfo[0].lclHasLdlocaOp = false;
17320
17321 #ifdef FEATURE_SIMD
17322         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17323         // the inlining multiplier) for anything in that assembly.
17324         // But we only need to normalize it if it is a TYP_STRUCT
17325         // (which we need to do even if we have already set foundSIMDType).
17326         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17327         {
17328             if (sigType == TYP_STRUCT)
17329             {
17330                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17331             }
17332             foundSIMDType = true;
17333         }
17334 #endif // FEATURE_SIMD
17335         lclVarInfo[0].lclTypeInfo = sigType;
17336
17337         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17338                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17339                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17340
17341         if (genActualType(thisArg->gtType) != genActualType(sigType))
17342         {
17343             if (sigType == TYP_REF)
17344             {
17345                 /* The argument cannot be bashed into a ref (see bug 750871) */
17346                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17347                 return;
17348             }
17349
17350             /* This can only happen with byrefs <-> ints/shorts */
17351
17352             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17353             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17354
17355             if (sigType == TYP_BYREF)
17356             {
17357                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17358             }
17359             else if (thisArg->gtType == TYP_BYREF)
17360             {
17361                 assert(sigType == TYP_I_IMPL);
17362
17363                 /* If possible change the BYREF to an int */
17364                 if (thisArg->IsVarAddr())
17365                 {
17366                     thisArg->gtType              = TYP_I_IMPL;
17367                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17368                 }
17369                 else
17370                 {
17371                     /* Arguments 'int <- byref' cannot be bashed */
17372                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17373                     return;
17374                 }
17375             }
17376         }
17377     }
17378
17379     /* Init the types of the arguments and make sure the types
17380      * from the trees match the types in the signature */
17381
17382     CORINFO_ARG_LIST_HANDLE argLst;
17383     argLst = methInfo->args.args;
17384
17385     unsigned i;
17386     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17387     {
17388         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17389
17390         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17391
17392 #ifdef FEATURE_SIMD
17393         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17394         {
17395             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17396             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17397             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17398             foundSIMDType = true;
17399             if (sigType == TYP_STRUCT)
17400             {
17401                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17402                 sigType              = structType;
17403             }
17404         }
17405 #endif // FEATURE_SIMD
17406
17407         lclVarInfo[i].lclTypeInfo    = sigType;
17408         lclVarInfo[i].lclHasLdlocaOp = false;
17409
17410         /* Does the tree type match the signature type? */
17411
17412         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17413
17414         if (sigType != inlArgNode->gtType)
17415         {
17416             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17417                but in bad IL cases with caller-callee signature mismatches we can see other types.
17418                Intentionally reject cases with mismatches so the jit is more flexible when
17419                encountering bad IL. */
17420
17421             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17422                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17423                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17424
17425             if (!isPlausibleTypeMatch)
17426             {
17427                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17428                 return;
17429             }
17430
17431             /* Is it a narrowing or widening cast?
17432              * Widening casts are ok since the value computed is already
17433              * normalized to an int (on the IL stack) */
17434
17435             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17436             {
17437                 if (sigType == TYP_BYREF)
17438                 {
17439                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17440                 }
17441                 else if (inlArgNode->gtType == TYP_BYREF)
17442                 {
17443                     assert(varTypeIsIntOrI(sigType));
17444
17445                     /* If possible bash the BYREF to an int */
17446                     if (inlArgNode->IsVarAddr())
17447                     {
17448                         inlArgNode->gtType           = TYP_I_IMPL;
17449                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17450                     }
17451                     else
17452                     {
17453                         /* Arguments 'int <- byref' cannot be changed */
17454                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17455                         return;
17456                     }
17457                 }
17458                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17459                 {
17460                     /* Narrowing cast */
17461
17462                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17463                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17464                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17465                     {
17466                         /* We don't need to insert a cast here as the variable
17467                            was assigned a normalized value of the right type */
17468
17469                         continue;
17470                     }
17471
17472                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17473
17474                     inlArgInfo[i].argIsLclVar = false;
17475
17476                     /* Try to fold the node in case we have constant arguments */
17477
17478                     if (inlArgInfo[i].argIsInvariant)
17479                     {
17480                         inlArgNode            = gtFoldExprConst(inlArgNode);
17481                         inlArgInfo[i].argNode = inlArgNode;
17482                         assert(inlArgNode->OperIsConst());
17483                     }
17484                 }
17485 #ifdef _TARGET_64BIT_
17486                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17487                 {
17488                     // This should only happen for int -> native int widening
17489                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17490
17491                     inlArgInfo[i].argIsLclVar = false;
17492
17493                     /* Try to fold the node in case we have constant arguments */
17494
17495                     if (inlArgInfo[i].argIsInvariant)
17496                     {
17497                         inlArgNode            = gtFoldExprConst(inlArgNode);
17498                         inlArgInfo[i].argNode = inlArgNode;
17499                         assert(inlArgNode->OperIsConst());
17500                     }
17501                 }
17502 #endif // _TARGET_64BIT_
17503             }
17504         }
17505     }
17506
17507     /* Init the types of the local variables */
17508
17509     CORINFO_ARG_LIST_HANDLE localsSig;
17510     localsSig = methInfo->locals.args;
17511
17512     for (i = 0; i < methInfo->locals.numArgs; i++)
17513     {
17514         bool      isPinned;
17515         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17516
17517         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17518         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17519         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17520
17521         if (isPinned)
17522         {
17523             // Pinned locals may cause inlines to fail.
17524             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17525             if (inlineResult->IsFailure())
17526             {
17527                 return;
17528             }
17529         }
17530
17531         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17532
17533         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17534         // out on the inline.
17535         if (type == TYP_STRUCT)
17536         {
17537             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17538             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17539             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17540             {
17541                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17542                 if (inlineResult->IsFailure())
17543                 {
17544                     return;
17545                 }
17546
17547                 // Do further notification in the case where the call site is rare; some policies do
17548                 // not track the relative hotness of call sites for "always" inline cases.
17549                 if (pInlineInfo->iciBlock->isRunRarely())
17550                 {
17551                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17552                     if (inlineResult->IsFailure())
17553                     {
17554
17555                         return;
17556                     }
17557                 }
17558             }
17559         }
17560
17561         localsSig = info.compCompHnd->getArgNext(localsSig);
17562
17563 #ifdef FEATURE_SIMD
17564         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17565         {
17566             foundSIMDType = true;
17567             if (featureSIMD && type == TYP_STRUCT)
17568             {
17569                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17570                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17571             }
17572         }
17573 #endif // FEATURE_SIMD
17574     }
17575
17576 #ifdef FEATURE_SIMD
17577     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17578     {
17579         foundSIMDType = true;
17580     }
17581     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17582 #endif // FEATURE_SIMD
17583 }
17584
17585 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17586 {
17587     assert(compIsForInlining());
17588
17589     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17590
17591     if (tmpNum == BAD_VAR_NUM)
17592     {
17593         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17594
17595         // The lifetime of this local might span multiple BBs.
17596         // So it is a long lifetime local.
17597         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17598
17599         lvaTable[tmpNum].lvType = lclTyp;
17600         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17601         {
17602             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17603         }
17604
17605         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17606         {
17607             lvaTable[tmpNum].lvPinned = 1;
17608
17609             if (!impInlineInfo->hasPinnedLocals)
17610             {
17611                 // If the inlinee returns a value, use a spill temp
17612                 // for the return value to ensure that even in case
17613                 // where the return expression refers to one of the
17614                 // pinned locals, we can unpin the local right after
17615                 // the inlined method body.
17616                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17617                 {
17618                     lvaInlineeReturnSpillTemp =
17619                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17620                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17621                 }
17622             }
17623
17624             impInlineInfo->hasPinnedLocals = true;
17625         }
17626
17627         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17628         {
17629             if (varTypeIsStruct(lclTyp))
17630             {
17631                 lvaSetStruct(tmpNum,
17632                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17633                              true /* unsafe value cls check */);
17634             }
17635             else
17636             {
17637                 // This is a wrapped primitive.  Make sure the verstate knows that
17638                 lvaTable[tmpNum].lvVerTypeInfo =
17639                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17640             }
17641         }
17642     }
17643
17644     return tmpNum;
17645 }
17646
17647 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17648 // Only use this method for the arguments of the inlinee method.
17649 // !!! Do not use it for the locals of the inlinee method. !!!!
17650
17651 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17652 {
17653     /* Get the argument type */
17654     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17655
17656     GenTreePtr op1 = nullptr;
17657
17658     // constant or address of local
17659     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17660     {
17661         /* Clone the constant. Note that we cannot directly use argNode
17662         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17663         would introduce aliasing between inlArgInfo[].argNode and
17664         impInlineExpr. Then gtFoldExpr() could change it, causing further
17665         references to the argument working off of the bashed copy. */
17666
17667         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17668         PREFIX_ASSUME(op1 != nullptr);
17669         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17670     }
17671     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17672     {
17673         /* Argument is a local variable (of the caller)
17674          * Can we re-use the passed argument node? */
17675
17676         op1                          = inlArgInfo[lclNum].argNode;
17677         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17678
17679         if (inlArgInfo[lclNum].argIsUsed)
17680         {
17681             assert(op1->gtOper == GT_LCL_VAR);
17682             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17683
17684             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17685             {
17686                 lclTyp = genActualType(lclTyp);
17687             }
17688
17689             /* Create a new lcl var node - remember the argument lclNum */
17690             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17691         }
17692     }
17693     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17694     {
17695         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17696            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17697            This way we will increase the chance for this byref to be optimized away by
17698            a subsequent "dereference" operation.
17699
17700            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17701            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17702            For example, if the caller is:
17703                 ldloca.s   V_1  // V_1 is a local struct
17704                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17705            and the callee being inlined has:
17706                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17707                     ldarga.s   ptrToInts
17708                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17709            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17710            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17711         */
17712         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17713                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17714         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17715     }
17716     else
17717     {
17718         /* Argument is a complex expression - it must be evaluated into a temp */
17719
17720         if (inlArgInfo[lclNum].argHasTmp)
17721         {
17722             assert(inlArgInfo[lclNum].argIsUsed);
17723             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17724
17725             /* Create a new lcl var node - remember the argument lclNum */
17726             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17727
17728             /* This is the second or later use of the this argument,
17729             so we have to use the temp (instead of the actual arg) */
17730             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17731         }
17732         else
17733         {
17734             /* First time use */
17735             assert(inlArgInfo[lclNum].argIsUsed == false);
17736
17737             /* Reserve a temp for the expression.
17738             * Use a large size node as we may change it later */
17739
17740             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17741
17742             lvaTable[tmpNum].lvType = lclTyp;
17743             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17744             if (inlArgInfo[lclNum].argHasLdargaOp)
17745             {
17746                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17747             }
17748
17749             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17750             {
17751                 if (varTypeIsStruct(lclTyp))
17752                 {
17753                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17754                                  true /* unsafe value cls check */);
17755                 }
17756                 else
17757                 {
17758                     // This is a wrapped primitive.  Make sure the verstate knows that
17759                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17760                 }
17761             }
17762
17763             inlArgInfo[lclNum].argHasTmp = true;
17764             inlArgInfo[lclNum].argTmpNum = tmpNum;
17765
17766             // If we require strict exception order, then arguments must
17767             // be evaluated in sequence before the body of the inlined method.
17768             // So we need to evaluate them to a temp.
17769             // Also, if arguments have global references, we need to
17770             // evaluate them to a temp before the inlined body as the
17771             // inlined body may be modifying the global ref.
17772             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17773             // if it is a struct, because it requires some additional handling.
17774
17775             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17776             {
17777                 /* Get a *LARGE* LCL_VAR node */
17778                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17779
17780                 /* Record op1 as the very first use of this argument.
17781                 If there are no further uses of the arg, we may be
17782                 able to use the actual arg node instead of the temp.
17783                 If we do see any further uses, we will clear this. */
17784                 inlArgInfo[lclNum].argBashTmpNode = op1;
17785             }
17786             else
17787             {
17788                 /* Get a small LCL_VAR node */
17789                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17790                 /* No bashing of this argument */
17791                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17792             }
17793         }
17794     }
17795
17796     /* Mark the argument as used */
17797
17798     inlArgInfo[lclNum].argIsUsed = true;
17799
17800     return op1;
17801 }
17802
17803 /******************************************************************************
17804  Is this the original "this" argument to the call being inlined?
17805
17806  Note that we do not inline methods with "starg 0", and so we do not need to
17807  worry about it.
17808 */
17809
17810 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17811 {
17812     assert(compIsForInlining());
17813     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17814 }
17815
17816 //-----------------------------------------------------------------------------
17817 // This function checks if a dereference in the inlinee can guarantee that
17818 // the "this" is non-NULL.
17819 // If we haven't hit a branch or a side effect, and we are dereferencing
17820 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17821 // then we can avoid a separate null pointer check.
17822 //
17823 // "additionalTreesToBeEvaluatedBefore"
17824 // is the set of pending trees that have not yet been added to the statement list,
17825 // and which have been removed from verCurrentState.esStack[]
17826
17827 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17828                                                                   GenTreePtr  variableBeingDereferenced,
17829                                                                   InlArgInfo* inlArgInfo)
17830 {
17831     assert(compIsForInlining());
17832     assert(opts.OptEnabled(CLFLG_INLINING));
17833
17834     BasicBlock* block = compCurBB;
17835
17836     GenTreePtr stmt;
17837     GenTreePtr expr;
17838
17839     if (block != fgFirstBB)
17840     {
17841         return FALSE;
17842     }
17843
17844     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17845     {
17846         return FALSE;
17847     }
17848
17849     if (additionalTreesToBeEvaluatedBefore &&
17850         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17851     {
17852         return FALSE;
17853     }
17854
17855     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17856     {
17857         expr = stmt->gtStmt.gtStmtExpr;
17858
17859         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17860         {
17861             return FALSE;
17862         }
17863     }
17864
17865     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17866     {
17867         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17868         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17869         {
17870             return FALSE;
17871         }
17872     }
17873
17874     return TRUE;
17875 }
17876
17877 /******************************************************************************/
17878 // Check the inlining eligibility of this GT_CALL node.
17879 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17880
17881 // Todo: find a way to record the failure reasons in the IR (or
17882 // otherwise build tree context) so when we do the inlining pass we
17883 // can capture these reasons
17884
17885 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17886                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17887                                       CORINFO_CALL_INFO*     callInfo)
17888 {
17889     // Let the strategy know there's another call
17890     impInlineRoot()->m_inlineStrategy->NoteCall();
17891
17892     if (!opts.OptEnabled(CLFLG_INLINING))
17893     {
17894         /* XXX Mon 8/18/2008
17895          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
17896          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
17897          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
17898          * figure out why we did not set MAXOPT for this compile.
17899          */
17900         assert(!compIsForInlining());
17901         return;
17902     }
17903
17904     if (compIsForImportOnly())
17905     {
17906         // Don't bother creating the inline candidate during verification.
17907         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17908         // that leads to the creation of multiple instances of Compiler.
17909         return;
17910     }
17911
17912     GenTreeCall* call = callNode->AsCall();
17913     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17914
17915     // Don't inline if not optimizing root method
17916     if (opts.compDbgCode)
17917     {
17918         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17919         return;
17920     }
17921
17922     // Don't inline if inlining into root method is disabled.
17923     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17924     {
17925         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17926         return;
17927     }
17928
17929     // Inlining candidate determination needs to honor only IL tail prefix.
17930     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17931     if (call->IsTailPrefixedCall())
17932     {
17933         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17934         return;
17935     }
17936
17937     // Tail recursion elimination takes precedence over inlining.
17938     // TODO: We may want to do some of the additional checks from fgMorphCall
17939     // here to reduce the chance we don't inline a call that won't be optimized
17940     // as a fast tail call or turned into a loop.
17941     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17942     {
17943         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17944         return;
17945     }
17946
17947     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17948     {
17949         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
17950         return;
17951     }
17952
17953     /* Ignore helper calls */
17954
17955     if (call->gtCallType == CT_HELPER)
17956     {
17957         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
17958         return;
17959     }
17960
17961     /* Ignore indirect calls */
17962     if (call->gtCallType == CT_INDIRECT)
17963     {
17964         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
17965         return;
17966     }
17967
17968     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
17969      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
17970      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
17971
17972     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
17973     unsigned              methAttr;
17974
17975     // Reuse method flags from the original callInfo if possible
17976     if (fncHandle == callInfo->hMethod)
17977     {
17978         methAttr = callInfo->methodFlags;
17979     }
17980     else
17981     {
17982         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
17983     }
17984
17985 #ifdef DEBUG
17986     if (compStressCompile(STRESS_FORCE_INLINE, 0))
17987     {
17988         methAttr |= CORINFO_FLG_FORCEINLINE;
17989     }
17990 #endif
17991
17992     // Check for COMPlus_AggressiveInlining
17993     if (compDoAggressiveInlining)
17994     {
17995         methAttr |= CORINFO_FLG_FORCEINLINE;
17996     }
17997
17998     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
17999     {
18000         /* Don't bother inline blocks that are in the filter region */
18001         if (bbInCatchHandlerILRange(compCurBB))
18002         {
18003 #ifdef DEBUG
18004             if (verbose)
18005             {
18006                 printf("\nWill not inline blocks that are in the catch handler region\n");
18007             }
18008
18009 #endif
18010
18011             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18012             return;
18013         }
18014
18015         if (bbInFilterILRange(compCurBB))
18016         {
18017 #ifdef DEBUG
18018             if (verbose)
18019             {
18020                 printf("\nWill not inline blocks that are in the filter region\n");
18021             }
18022 #endif
18023
18024             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18025             return;
18026         }
18027     }
18028
18029     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18030
18031     if (opts.compNeedSecurityCheck)
18032     {
18033         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18034         return;
18035     }
18036
18037     /* Check if we tried to inline this method before */
18038
18039     if (methAttr & CORINFO_FLG_DONT_INLINE)
18040     {
18041         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18042         return;
18043     }
18044
18045     /* Cannot inline synchronized methods */
18046
18047     if (methAttr & CORINFO_FLG_SYNCH)
18048     {
18049         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18050         return;
18051     }
18052
18053     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18054
18055     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18056     {
18057         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18058         return;
18059     }
18060
18061     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18062     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18063
18064     if (inlineResult.IsFailure())
18065     {
18066         return;
18067     }
18068
18069     // The old value should be NULL
18070     assert(call->gtInlineCandidateInfo == nullptr);
18071
18072     call->gtInlineCandidateInfo = inlineCandidateInfo;
18073
18074     // Mark the call node as inline candidate.
18075     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18076
18077     // Let the strategy know there's another candidate.
18078     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18079
18080     // Since we're not actually inlining yet, and this call site is
18081     // still just an inline candidate, there's nothing to report.
18082     inlineResult.SetReported();
18083 }
18084
18085 /******************************************************************************/
18086 // Returns true if the given intrinsic will be implemented by target-specific
18087 // instructions
18088
18089 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18090 {
18091 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18092     switch (intrinsicId)
18093     {
18094         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18095         //
18096         // TODO: Because the x86 backend only targets SSE for floating-point code,
18097         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18098         //       implemented those intrinsics as x87 instructions). If this poses
18099         //       a CQ problem, it may be necessary to change the implementation of
18100         //       the helper calls to decrease call overhead or switch back to the
18101         //       x87 instructions. This is tracked by #7097.
18102         case CORINFO_INTRINSIC_Sqrt:
18103         case CORINFO_INTRINSIC_Abs:
18104             return true;
18105
18106         default:
18107             return false;
18108     }
18109 #elif defined(_TARGET_ARM64_)
18110     switch (intrinsicId)
18111     {
18112         case CORINFO_INTRINSIC_Sqrt:
18113         case CORINFO_INTRINSIC_Abs:
18114         case CORINFO_INTRINSIC_Round:
18115             return true;
18116
18117         default:
18118             return false;
18119     }
18120 #elif defined(_TARGET_ARM_)
18121     switch (intrinsicId)
18122     {
18123         case CORINFO_INTRINSIC_Sqrt:
18124         case CORINFO_INTRINSIC_Abs:
18125         case CORINFO_INTRINSIC_Round:
18126             return true;
18127
18128         default:
18129             return false;
18130     }
18131 #elif defined(_TARGET_X86_)
18132     switch (intrinsicId)
18133     {
18134         case CORINFO_INTRINSIC_Sin:
18135         case CORINFO_INTRINSIC_Cos:
18136         case CORINFO_INTRINSIC_Sqrt:
18137         case CORINFO_INTRINSIC_Abs:
18138         case CORINFO_INTRINSIC_Round:
18139             return true;
18140
18141         default:
18142             return false;
18143     }
18144 #else
18145     // TODO: This portion of logic is not implemented for other arch.
18146     // The reason for returning true is that on all other arch the only intrinsic
18147     // enabled are target intrinsics.
18148     return true;
18149 #endif //_TARGET_AMD64_
18150 }
18151
18152 /******************************************************************************/
18153 // Returns true if the given intrinsic will be implemented by calling System.Math
18154 // methods.
18155
18156 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18157 {
18158     // Currently, if an math intrisic is not implemented by target-specific
18159     // intructions, it will be implemented by a System.Math call. In the
18160     // future, if we turn to implementing some of them with helper callers,
18161     // this predicate needs to be revisited.
18162     return !IsTargetIntrinsic(intrinsicId);
18163 }
18164
18165 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18166 {
18167     switch (intrinsicId)
18168     {
18169         case CORINFO_INTRINSIC_Sin:
18170         case CORINFO_INTRINSIC_Sqrt:
18171         case CORINFO_INTRINSIC_Abs:
18172         case CORINFO_INTRINSIC_Cos:
18173         case CORINFO_INTRINSIC_Round:
18174         case CORINFO_INTRINSIC_Cosh:
18175         case CORINFO_INTRINSIC_Sinh:
18176         case CORINFO_INTRINSIC_Tan:
18177         case CORINFO_INTRINSIC_Tanh:
18178         case CORINFO_INTRINSIC_Asin:
18179         case CORINFO_INTRINSIC_Acos:
18180         case CORINFO_INTRINSIC_Atan:
18181         case CORINFO_INTRINSIC_Atan2:
18182         case CORINFO_INTRINSIC_Log10:
18183         case CORINFO_INTRINSIC_Pow:
18184         case CORINFO_INTRINSIC_Exp:
18185         case CORINFO_INTRINSIC_Ceiling:
18186         case CORINFO_INTRINSIC_Floor:
18187             return true;
18188         default:
18189             return false;
18190     }
18191 }
18192
18193 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18194 {
18195     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18196 }
18197 /*****************************************************************************/