CoreRT calli transformation
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492     // On coreclr the check for GC includes a "may" to account for the special
1493     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1495     // pointer.
1496     const bool mayContainGCPtrs =
1497         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1498
1499 #ifdef FEATURE_SIMD
1500     // Check to see if this is a SIMD type.
1501     if (featureSIMD && !mayContainGCPtrs)
1502     {
1503         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1504
1505         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1506         {
1507             unsigned int sizeBytes;
1508             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509             if (simdBaseType != TYP_UNKNOWN)
1510             {
1511                 assert(sizeBytes == originalSize);
1512                 structType = getSIMDTypeForSize(sizeBytes);
1513                 if (pSimdBaseType != nullptr)
1514                 {
1515                     *pSimdBaseType = simdBaseType;
1516                 }
1517 #ifdef _TARGET_AMD64_
1518                 // Amd64: also indicate that we use floating point registers
1519                 compFloatingPointUsed = true;
1520 #endif
1521             }
1522         }
1523     }
1524 #endif // FEATURE_SIMD
1525
1526     // Fetch GC layout info if requested
1527     if (gcLayout != nullptr)
1528     {
1529         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1530
1531         // Verify that the quick test up above via the class attributes gave a
1532         // safe view of the type's GCness.
1533         //
1534         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1535         // does not report any gc fields.
1536
1537         assert(mayContainGCPtrs || (numGCVars == 0));
1538
1539         if (pNumGCVars != nullptr)
1540         {
1541             *pNumGCVars = numGCVars;
1542         }
1543     }
1544     else
1545     {
1546         // Can't safely ask for number of GC pointers without also
1547         // asking for layout.
1548         assert(pNumGCVars == nullptr);
1549     }
1550
1551     return structType;
1552 }
1553
1554 //****************************************************************************
1555 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1557 //
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1559                                       CORINFO_CLASS_HANDLE structHnd,
1560                                       unsigned             curLevel,
1561                                       bool                 forceNormalization /*=false*/)
1562 {
1563     assert(forceNormalization || varTypeIsStruct(structVal));
1564     assert(structHnd != NO_CLASS_HANDLE);
1565     var_types structType = structVal->TypeGet();
1566     bool      makeTemp   = false;
1567     if (structType == TYP_STRUCT)
1568     {
1569         structType = impNormStructType(structHnd);
1570     }
1571     bool                 alreadyNormalized = false;
1572     GenTreeLclVarCommon* structLcl         = nullptr;
1573
1574     genTreeOps oper = structVal->OperGet();
1575     switch (oper)
1576     {
1577         // GT_RETURN and GT_MKREFANY don't capture the handle.
1578         case GT_RETURN:
1579             break;
1580         case GT_MKREFANY:
1581             alreadyNormalized = true;
1582             break;
1583
1584         case GT_CALL:
1585             structVal->gtCall.gtRetClsHnd = structHnd;
1586             makeTemp                      = true;
1587             break;
1588
1589         case GT_RET_EXPR:
1590             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1591             makeTemp                         = true;
1592             break;
1593
1594         case GT_ARGPLACE:
1595             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1596             break;
1597
1598         case GT_INDEX:
1599             // This will be transformed to an OBJ later.
1600             alreadyNormalized                    = true;
1601             structVal->gtIndex.gtStructElemClass = structHnd;
1602             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1603             break;
1604
1605         case GT_FIELD:
1606             // Wrap it in a GT_OBJ.
1607             structVal->gtType = structType;
1608             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1609             break;
1610
1611         case GT_LCL_VAR:
1612         case GT_LCL_FLD:
1613             structLcl = structVal->AsLclVarCommon();
1614             // Wrap it in a GT_OBJ.
1615             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1616             __fallthrough;
1617
1618         case GT_OBJ:
1619         case GT_BLK:
1620         case GT_DYN_BLK:
1621         case GT_ASG:
1622             // These should already have the appropriate type.
1623             assert(structVal->gtType == structType);
1624             alreadyNormalized = true;
1625             break;
1626
1627         case GT_IND:
1628             assert(structVal->gtType == structType);
1629             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630             alreadyNormalized = true;
1631             break;
1632
1633 #ifdef FEATURE_SIMD
1634         case GT_SIMD:
1635             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1636             break;
1637 #endif // FEATURE_SIMD
1638
1639         case GT_COMMA:
1640         {
1641             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642             GenTree* blockNode = structVal->gtOp.gtOp2;
1643             assert(blockNode->gtType == structType);
1644
1645             // Is this GT_COMMA(op1, GT_COMMA())?
1646             GenTree* parent = structVal;
1647             if (blockNode->OperGet() == GT_COMMA)
1648             {
1649                 // Find the last node in the comma chain.
1650                 do
1651                 {
1652                     assert(blockNode->gtType == structType);
1653                     parent    = blockNode;
1654                     blockNode = blockNode->gtOp.gtOp2;
1655                 } while (blockNode->OperGet() == GT_COMMA);
1656             }
1657
1658 #ifdef FEATURE_SIMD
1659             if (blockNode->OperGet() == GT_SIMD)
1660             {
1661                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662                 alreadyNormalized  = true;
1663             }
1664             else
1665 #endif
1666             {
1667                 assert(blockNode->OperIsBlk());
1668
1669                 // Sink the GT_COMMA below the blockNode addr.
1670                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1672                 //
1673                 // In case of a chained GT_COMMA case, we sink the last
1674                 // GT_COMMA below the blockNode addr.
1675                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676                 assert(blockNodeAddr->gtType == TYP_BYREF);
1677                 GenTree* commaNode    = parent;
1678                 commaNode->gtType     = TYP_BYREF;
1679                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680                 blockNode->gtOp.gtOp1 = commaNode;
1681                 if (parent == structVal)
1682                 {
1683                     structVal = blockNode;
1684                 }
1685                 alreadyNormalized = true;
1686             }
1687         }
1688         break;
1689
1690         default:
1691             assert(!"Unexpected node in impNormStructVal()");
1692             break;
1693     }
1694     structVal->gtType  = structType;
1695     GenTree* structObj = structVal;
1696
1697     if (!alreadyNormalized || forceNormalization)
1698     {
1699         if (makeTemp)
1700         {
1701             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1702
1703             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1704
1705             // The structVal is now the temp itself
1706
1707             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1710         }
1711         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1712         {
1713             // Wrap it in a GT_OBJ
1714             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1715         }
1716     }
1717
1718     if (structLcl != nullptr)
1719     {
1720         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721         // so we don't set GTF_EXCEPT here.
1722         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1723         {
1724             structObj->gtFlags &= ~GTF_GLOB_REF;
1725         }
1726     }
1727     else
1728     {
1729         // In general a OBJ is an indirection and could raise an exception.
1730         structObj->gtFlags |= GTF_EXCEPT;
1731     }
1732     return (structObj);
1733 }
1734
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1738 //
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1741 //
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1744 // at compile-time.
1745 //
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747                                       BOOL*                   pRuntimeLookup /* = NULL */,
1748                                       BOOL                    mustRestoreHandle /* = FALSE */,
1749                                       BOOL                    importParent /* = FALSE */)
1750 {
1751     assert(!fgGlobalMorph);
1752
1753     CORINFO_GENERICHANDLE_RESULT embedInfo;
1754     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1755
1756     if (pRuntimeLookup)
1757     {
1758         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1759     }
1760
1761     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1762     {
1763         switch (embedInfo.handleType)
1764         {
1765             case CORINFO_HANDLETYPE_CLASS:
1766                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1767                 break;
1768
1769             case CORINFO_HANDLETYPE_METHOD:
1770                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1771                 break;
1772
1773             case CORINFO_HANDLETYPE_FIELD:
1774                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1776                 break;
1777
1778             default:
1779                 break;
1780         }
1781     }
1782
1783     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784                            embedInfo.compileTimeHandle);
1785 }
1786
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788                                      CORINFO_LOOKUP*         pLookup,
1789                                      unsigned                handleFlags,
1790                                      void*                   compileTimeHandle)
1791 {
1792     if (!pLookup->lookupKind.needsRuntimeLookup)
1793     {
1794         // No runtime lookup is required.
1795         // Access is direct or memory-indirect (of a fixed address) reference
1796
1797         CORINFO_GENERIC_HANDLE handle       = nullptr;
1798         void*                  pIndirection = nullptr;
1799         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1800
1801         if (pLookup->constLookup.accessType == IAT_VALUE)
1802         {
1803             handle = pLookup->constLookup.handle;
1804         }
1805         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1806         {
1807             pIndirection = pLookup->constLookup.addr;
1808         }
1809         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1810     }
1811     else if (compIsForInlining())
1812     {
1813         // Don't import runtime lookups when inlining
1814         // Inlining has to be aborted in such a case
1815         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1816         return nullptr;
1817     }
1818     else
1819     {
1820         // Need to use dictionary-based access which depends on the typeContext
1821         // which is only available at runtime, not at compile-time.
1822
1823         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1824     }
1825 }
1826
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829                                                unsigned              handleFlags,
1830                                                void*                 compileTimeHandle)
1831 {
1832     CORINFO_GENERIC_HANDLE handle       = nullptr;
1833     void*                  pIndirection = nullptr;
1834     assert(pLookup->accessType != IAT_PPVALUE);
1835
1836     if (pLookup->accessType == IAT_VALUE)
1837     {
1838         handle = pLookup->handle;
1839     }
1840     else if (pLookup->accessType == IAT_PVALUE)
1841     {
1842         pIndirection = pLookup->addr;
1843     }
1844     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1845 }
1846
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849     CorInfoHelpFunc         helper,
1850     var_types               type,
1851     GenTreeArgList*         args /* =NULL*/,
1852     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1853 {
1854     CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1857     {
1858         return nullptr;
1859     }
1860 #else
1861     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1862 #endif
1863
1864     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1865
1866     op1->gtCall.setEntryPoint(lookup);
1867
1868     return op1;
1869 }
1870 #endif
1871
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1873 {
1874     GenTreePtr op1 = nullptr;
1875
1876     switch (pCallInfo->kind)
1877     {
1878         case CORINFO_CALL:
1879             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1880
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882             if (opts.IsReadyToRun())
1883             {
1884                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1885                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1887             }
1888             else
1889             {
1890                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1891             }
1892 #endif
1893             break;
1894
1895         case CORINFO_CALL_CODE_POINTER:
1896             if (compIsForInlining())
1897             {
1898                 // Don't import runtime lookups when inlining
1899                 // Inlining has to be aborted in such a case
1900                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1901                 return nullptr;
1902             }
1903
1904             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1905             break;
1906
1907         default:
1908             noway_assert(!"unknown call kind");
1909             break;
1910     }
1911
1912     return op1;
1913 }
1914
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1917 //
1918 // Arguments:
1919 //    kind - lookup kind.
1920 //
1921 // Return Value:
1922 //    Return GenTree pointer to generic shared context.
1923 //
1924 // Notes:
1925 //    Reports about generic context using.
1926
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1928 {
1929     GenTreePtr ctxTree = nullptr;
1930
1931     // Collectible types requires that for shared generic code, if we use the generic context parameter
1932     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933     // context parameter is this that we don't need the eager reporting logic.)
1934     lvaGenericsContextUsed = true;
1935
1936     if (kind == CORINFO_LOOKUP_THISOBJ)
1937     {
1938         // this Object
1939         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1940
1941         // Vtable pointer of this object
1942         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1945     }
1946     else
1947     {
1948         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1949
1950         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1951     }
1952     return ctxTree;
1953 }
1954
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957    generic instantiations.
1958    The lookup depends on the typeContext which is only available at
1959    runtime, and not at compile-time.
1960    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1961    The cases are:
1962
1963    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964       instantiation-specific handle, and the tokens to lookup the handle.
1965    2. pLookup->indirections != CORINFO_USEHELPER :
1966       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1967           to get the handle.
1968       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969           If it is non-NULL, it is the handle required. Else, call a helper
1970           to lookup the handle.
1971  */
1972
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974                                             CORINFO_LOOKUP*         pLookup,
1975                                             void*                   compileTimeHandle)
1976 {
1977
1978     // This method can only be called from the importer instance of the Compiler.
1979     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980     assert(!compIsForInlining());
1981
1982     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1983
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985     if (opts.IsReadyToRun())
1986     {
1987         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1989     }
1990 #endif
1991
1992     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993     // It's available only via the run-time helper function
1994     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1995     {
1996         GenTreeArgList* helperArgs =
1997             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998                                                       nullptr, compileTimeHandle));
1999
2000         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2001     }
2002
2003     // Slot pointer
2004     GenTreePtr slotPtrTree = ctxTree;
2005
2006     if (pRuntimeLookup->testForNull)
2007     {
2008         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2010     }
2011
2012     // Applied repeated indirections
2013     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2014     {
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021         if (pRuntimeLookup->offsets[i] != 0)
2022         {
2023             slotPtrTree =
2024                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2025         }
2026     }
2027
2028     // No null test required
2029     if (!pRuntimeLookup->testForNull)
2030     {
2031         if (pRuntimeLookup->indirections == 0)
2032         {
2033             return slotPtrTree;
2034         }
2035
2036         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2038
2039         if (!pRuntimeLookup->testForFixup)
2040         {
2041             return slotPtrTree;
2042         }
2043
2044         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2045
2046         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047                                       nullptr DEBUGARG("impRuntimeLookup test"));
2048         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2049
2050         // Use a GT_AND to check for the lowest bit and indirect if it is set
2051         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053         relop->gtFlags |= GTF_RELOP_QMARK;
2054
2055         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056                            nullptr DEBUGARG("impRuntimeLookup indir"));
2057         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2060
2061         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2062
2063         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065         return gtNewLclvNode(tmp, TYP_I_IMPL);
2066     }
2067
2068     assert(pRuntimeLookup->indirections != 0);
2069
2070     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2071
2072     // Extract the handle
2073     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074     handle->gtFlags |= GTF_IND_NONFAULTING;
2075
2076     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2078
2079     // Call to helper
2080     GenTreeArgList* helperArgs =
2081         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082                                                   compileTimeHandle));
2083     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2084
2085     // Check for null and possibly call helper
2086     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087     relop->gtFlags |= GTF_RELOP_QMARK;
2088
2089     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090                                                          gtNewNothingNode(), // do nothing if nonnull
2091                                                          helperCall);
2092
2093     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2094
2095     unsigned tmp;
2096     if (handleCopy->IsLocal())
2097     {
2098         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2099     }
2100     else
2101     {
2102         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2103     }
2104
2105     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106     return gtNewLclvNode(tmp, TYP_I_IMPL);
2107 }
2108
2109 /******************************************************************************
2110  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112  *     else, grab a new temp.
2113  *  For structs (which can be pushed on the stack using obj, etc),
2114  *  special handling is needed
2115  */
2116
2117 struct RecursiveGuard
2118 {
2119 public:
2120     RecursiveGuard()
2121     {
2122         m_pAddress = nullptr;
2123     }
2124
2125     ~RecursiveGuard()
2126     {
2127         if (m_pAddress)
2128         {
2129             *m_pAddress = false;
2130         }
2131     }
2132
2133     void Init(bool* pAddress, bool bInitialize)
2134     {
2135         assert(pAddress && *pAddress == false && "Recursive guard violation");
2136         m_pAddress = pAddress;
2137
2138         if (bInitialize)
2139         {
2140             *m_pAddress = true;
2141         }
2142     }
2143
2144 protected:
2145     bool* m_pAddress;
2146 };
2147
2148 bool Compiler::impSpillStackEntry(unsigned level,
2149                                   unsigned tnum
2150 #ifdef DEBUG
2151                                   ,
2152                                   bool        bAssertOnRecursion,
2153                                   const char* reason
2154 #endif
2155                                   )
2156 {
2157
2158 #ifdef DEBUG
2159     RecursiveGuard guard;
2160     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2161 #endif
2162
2163     GenTreePtr tree = verCurrentState.esStack[level].val;
2164
2165     /* Allocate a temp if we haven't been asked to use a particular one */
2166
2167     if (tiVerificationNeeded)
2168     {
2169         // Ignore bad temp requests (they will happen with bad code and will be
2170         // catched when importing the destblock)
2171         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2172         {
2173             return false;
2174         }
2175     }
2176     else
2177     {
2178         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2179         {
2180             return false;
2181         }
2182     }
2183
2184     if (tnum == BAD_VAR_NUM)
2185     {
2186         tnum = lvaGrabTemp(true DEBUGARG(reason));
2187     }
2188     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2189     {
2190         // if verification is needed and tnum's type is incompatible with
2191         // type on that stack, we grab a new temp. This is safe since
2192         // we will throw a verification exception in the dest block.
2193
2194         var_types valTyp = tree->TypeGet();
2195         var_types dstTyp = lvaTable[tnum].TypeGet();
2196
2197         // if the two types are different, we return. This will only happen with bad code and will
2198         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2200             !(
2201 #ifndef _TARGET_64BIT_
2202                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2205         {
2206             if (verNeedsVerification())
2207             {
2208                 return false;
2209             }
2210         }
2211     }
2212
2213     /* Assign the spilled entry to the temp */
2214     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2215
2216     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2218     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2219     verCurrentState.esStack[level].val = temp;
2220
2221     return true;
2222 }
2223
2224 /*****************************************************************************
2225  *
2226  *  Ensure that the stack has only spilled values
2227  */
2228
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2230 {
2231     assert(!spillLeaves || opts.compDbgCode);
2232
2233     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2234     {
2235         GenTreePtr tree = verCurrentState.esStack[level].val;
2236
2237         if (!spillLeaves && tree->OperIsLeaf())
2238         {
2239             continue;
2240         }
2241
2242         // Temps introduced by the importer itself don't need to be spilled
2243
2244         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2245
2246         if (isTempLcl)
2247         {
2248             continue;
2249         }
2250
2251         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2252     }
2253 }
2254
2255 void Compiler::impSpillEvalStack()
2256 {
2257     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2258     {
2259         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2260     }
2261 }
2262
2263 /*****************************************************************************
2264  *
2265  *  If the stack contains any trees with side effects in them, assign those
2266  *  trees to temps and append the assignments to the statement list.
2267  *  On return the stack is guaranteed to be empty.
2268  */
2269
2270 inline void Compiler::impEvalSideEffects()
2271 {
2272     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273     verCurrentState.esStackDepth = 0;
2274 }
2275
2276 /*****************************************************************************
2277  *
2278  *  If the stack contains any trees with side effects in them, assign those
2279  *  trees to temps and replace them on the stack with refs to their temps.
2280  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2281  */
2282
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2284 {
2285     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2286
2287     /* Before we make any appends to the tree list we must spill the
2288      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2289
2290     impSpillSpecialSideEff();
2291
2292     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2293     {
2294         chkLevel = verCurrentState.esStackDepth;
2295     }
2296
2297     assert(chkLevel <= verCurrentState.esStackDepth);
2298
2299     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2300
2301     for (unsigned i = 0; i < chkLevel; i++)
2302     {
2303         GenTreePtr tree = verCurrentState.esStack[i].val;
2304
2305         GenTreePtr lclVarTree;
2306
2307         if ((tree->gtFlags & spillFlags) != 0 ||
2308             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2309              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311                                            // lvAddrTaken flag.
2312         {
2313             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2314         }
2315     }
2316 }
2317
2318 /*****************************************************************************
2319  *
2320  *  If the stack contains any trees with special side effects in them, assign
2321  *  those trees to temps and replace them on the stack with refs to their temps.
2322  */
2323
2324 inline void Compiler::impSpillSpecialSideEff()
2325 {
2326     // Only exception objects need to be carefully handled
2327
2328     if (!compCurBB->bbCatchTyp)
2329     {
2330         return;
2331     }
2332
2333     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2334     {
2335         GenTreePtr tree = verCurrentState.esStack[level].val;
2336         // Make sure if we have an exception object in the sub tree we spill ourselves.
2337         if (gtHasCatchArg(tree))
2338         {
2339             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2340         }
2341     }
2342 }
2343
2344 /*****************************************************************************
2345  *
2346  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2347  */
2348
2349 void Compiler::impSpillValueClasses()
2350 {
2351     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2352     {
2353         GenTreePtr tree = verCurrentState.esStack[level].val;
2354
2355         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2356         {
2357             // Tree walk was aborted, which means that we found a
2358             // value class on the stack.  Need to spill that
2359             // stack entry.
2360
2361             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2362         }
2363     }
2364 }
2365
2366 /*****************************************************************************
2367  *
2368  *  Callback that checks if a tree node is TYP_STRUCT
2369  */
2370
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2372 {
2373     fgWalkResult walkResult = WALK_CONTINUE;
2374
2375     if ((*pTree)->gtType == TYP_STRUCT)
2376     {
2377         // Abort the walk and indicate that we found a value class
2378
2379         walkResult = WALK_ABORT;
2380     }
2381
2382     return walkResult;
2383 }
2384
2385 /*****************************************************************************
2386  *
2387  *  If the stack contains any trees with references to local #lclNum, assign
2388  *  those trees to temps and replace their place on the stack with refs to
2389  *  their temps.
2390  */
2391
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2393 {
2394     /* Before we make any appends to the tree list we must spill the
2395      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2396
2397     impSpillSpecialSideEff();
2398
2399     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2400     {
2401         GenTreePtr tree = verCurrentState.esStack[level].val;
2402
2403         /* If the tree may throw an exception, and the block has a handler,
2404            then we need to spill assignments to the local if the local is
2405            live on entry to the handler.
2406            Just spill 'em all without considering the liveness */
2407
2408         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2409
2410         /* Skip the tree if it doesn't have an affected reference,
2411            unless xcptnCaught */
2412
2413         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2414         {
2415             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2416         }
2417     }
2418 }
2419
2420 /*****************************************************************************
2421  *
2422  *  Push catch arg onto the stack.
2423  *  If there are jumps to the beginning of the handler, insert basic block
2424  *  and spill catch arg to a temp. Update the handler block if necessary.
2425  *
2426  *  Returns the basic block of the actual handler.
2427  */
2428
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2430 {
2431     // Do not inject the basic block twice on reimport. This should be
2432     // hit only under JIT stress. See if the block is the one we injected.
2433     // Note that EH canonicalization can inject internal blocks here. We might
2434     // be able to re-use such a block (but we don't, right now).
2435     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2437     {
2438         GenTreePtr tree = hndBlk->bbTreeList;
2439
2440         if (tree != nullptr && tree->gtOper == GT_STMT)
2441         {
2442             tree = tree->gtStmt.gtStmtExpr;
2443             assert(tree != nullptr);
2444
2445             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2447             {
2448                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2449
2450                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2451
2452                 return hndBlk->bbNext;
2453             }
2454         }
2455
2456         // If we get here, it must have been some other kind of internal block. It's possible that
2457         // someone prepended something to our injected block, but that's unlikely.
2458     }
2459
2460     /* Push the exception address value on the stack */
2461     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2462
2463     /* Mark the node as having a side-effect - i.e. cannot be
2464      * moved around since it is tied to a fixed location (EAX) */
2465     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2466
2467     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2469     {
2470         if (hndBlk->bbRefs == 1)
2471         {
2472             hndBlk->bbRefs++;
2473         }
2474
2475         /* Create extra basic block for the spill */
2476         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478         newBlk->setBBWeight(hndBlk->bbWeight);
2479         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2480
2481         /* Account for the new link we are about to create */
2482         hndBlk->bbRefs++;
2483
2484         /* Spill into a temp */
2485         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486         lvaTable[tempNum].lvType = TYP_REF;
2487         arg                      = gtNewTempAssign(tempNum, arg);
2488
2489         hndBlk->bbStkTempsIn = tempNum;
2490
2491         /* Report the debug info. impImportBlockCode won't treat
2492          * the actual handler as exception block and thus won't do it for us. */
2493         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2494         {
2495             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496             arg            = gtNewStmt(arg, impCurStmtOffs);
2497         }
2498
2499         fgInsertStmtAtEnd(newBlk, arg);
2500
2501         arg = gtNewLclvNode(tempNum, TYP_REF);
2502     }
2503
2504     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2505
2506     return hndBlk;
2507 }
2508
2509 /*****************************************************************************
2510  *
2511  *  Given a tree, clone it. *pClone is set to the cloned tree.
2512  *  Returns the original tree if the cloning was easy,
2513  *   else returns the temp to which the tree had to be spilled to.
2514  *  If the tree has side-effects, it will be spilled to a temp.
2515  */
2516
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2518                                   GenTreePtr*          pClone,
2519                                   CORINFO_CLASS_HANDLE structHnd,
2520                                   unsigned             curLevel,
2521                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2522 {
2523     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2524     {
2525         GenTreePtr clone = gtClone(tree, true);
2526
2527         if (clone)
2528         {
2529             *pClone = clone;
2530             return tree;
2531         }
2532     }
2533
2534     /* Store the operand in a temp and return the temp */
2535
2536     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2537
2538     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539     // return a struct type. It also may modify the struct type to a more
2540     // specialized type (e.g. a SIMD type).  So we will get the type from
2541     // the lclVar AFTER calling impAssignTempGen().
2542
2543     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544     var_types type = genActualType(lvaTable[temp].TypeGet());
2545
2546     *pClone = gtNewLclvNode(temp, type);
2547     return gtNewLclvNode(temp, type);
2548 }
2549
2550 /*****************************************************************************
2551  * Remember the IL offset (including stack-empty info) for the trees we will
2552  * generate now.
2553  */
2554
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2556 {
2557     if (compIsForInlining())
2558     {
2559         GenTreePtr callStmt = impInlineInfo->iciStmt;
2560         assert(callStmt->gtOper == GT_STMT);
2561         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2562     }
2563     else
2564     {
2565         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567         impCurStmtOffs    = offs | stkBit;
2568     }
2569 }
2570
2571 /*****************************************************************************
2572  * Returns current IL offset with stack-empty and call-instruction info incorporated
2573  */
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2575 {
2576     if (compIsForInlining())
2577     {
2578         return BAD_IL_OFFSET;
2579     }
2580     else
2581     {
2582         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585         return offs | stkBit | callInstructionBit;
2586     }
2587 }
2588
2589 /*****************************************************************************
2590  *
2591  *  Remember the instr offset for the statements
2592  *
2593  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2595  *  as some of the trees corresponding to code up to impCurOpcOffs might
2596  *  still be sitting on the stack.
2597  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598  *  This should be called when an opcode finally/explicitly causes
2599  *  impAppendTree(tree) to be called (as opposed to being called because of
2600  *  a spill caused by the opcode)
2601  */
2602
2603 #ifdef DEBUG
2604
2605 void Compiler::impNoteLastILoffs()
2606 {
2607     if (impLastILoffsStmt == nullptr)
2608     {
2609         // We should have added a statement for the current basic block
2610         // Is this assert correct ?
2611
2612         assert(impTreeLast);
2613         assert(impTreeLast->gtOper == GT_STMT);
2614
2615         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2616     }
2617     else
2618     {
2619         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620         impLastILoffsStmt                          = nullptr;
2621     }
2622 }
2623
2624 #endif // DEBUG
2625
2626 /*****************************************************************************
2627  * We don't create any GenTree (excluding spills) for a branch.
2628  * For debugging info, we need a placeholder so that we can note
2629  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2630  */
2631
2632 void Compiler::impNoteBranchOffs()
2633 {
2634     if (opts.compDbgCode)
2635     {
2636         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2637     }
2638 }
2639
2640 /*****************************************************************************
2641  * Locate the next stmt boundary for which we need to record info.
2642  * We will have to spill the stack at such boundaries if it is not
2643  * already empty.
2644  * Returns the next stmt boundary (after the start of the block)
2645  */
2646
2647 unsigned Compiler::impInitBlockLineInfo()
2648 {
2649     /* Assume the block does not correspond with any IL offset. This prevents
2650        us from reporting extra offsets. Extra mappings can cause confusing
2651        stepping, especially if the extra mapping is a jump-target, and the
2652        debugger does not ignore extra mappings, but instead rewinds to the
2653        nearest known offset */
2654
2655     impCurStmtOffsSet(BAD_IL_OFFSET);
2656
2657     if (compIsForInlining())
2658     {
2659         return ~0;
2660     }
2661
2662     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2663
2664     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2665     {
2666         impCurStmtOffsSet(blockOffs);
2667     }
2668
2669     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2670     {
2671         impCurStmtOffsSet(blockOffs);
2672     }
2673
2674     /* Always report IL offset 0 or some tests get confused.
2675        Probably a good idea anyways */
2676
2677     if (blockOffs == 0)
2678     {
2679         impCurStmtOffsSet(blockOffs);
2680     }
2681
2682     if (!info.compStmtOffsetsCount)
2683     {
2684         return ~0;
2685     }
2686
2687     /* Find the lowest explicit stmt boundary within the block */
2688
2689     /* Start looking at an entry that is based on our instr offset */
2690
2691     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2692
2693     if (index >= info.compStmtOffsetsCount)
2694     {
2695         index = info.compStmtOffsetsCount - 1;
2696     }
2697
2698     /* If we've guessed too far, back up */
2699
2700     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2701     {
2702         index--;
2703     }
2704
2705     /* If we guessed short, advance ahead */
2706
2707     while (info.compStmtOffsets[index] < blockOffs)
2708     {
2709         index++;
2710
2711         if (index == info.compStmtOffsetsCount)
2712         {
2713             return info.compStmtOffsetsCount;
2714         }
2715     }
2716
2717     assert(index < info.compStmtOffsetsCount);
2718
2719     if (info.compStmtOffsets[index] == blockOffs)
2720     {
2721         /* There is an explicit boundary for the start of this basic block.
2722            So we will start with bbCodeOffs. Else we will wait until we
2723            get to the next explicit boundary */
2724
2725         impCurStmtOffsSet(blockOffs);
2726
2727         index++;
2728     }
2729
2730     return index;
2731 }
2732
2733 /*****************************************************************************/
2734
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2736 {
2737     switch (opcode)
2738     {
2739         case CEE_CALL:
2740         case CEE_CALLI:
2741         case CEE_CALLVIRT:
2742             return true;
2743
2744         default:
2745             return false;
2746     }
2747 }
2748
2749 /*****************************************************************************/
2750
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2752 {
2753     switch (opcode)
2754     {
2755         case CEE_CALL:
2756         case CEE_CALLI:
2757         case CEE_CALLVIRT:
2758         case CEE_JMP:
2759         case CEE_NEWOBJ:
2760         case CEE_NEWARR:
2761             return true;
2762
2763         default:
2764             return false;
2765     }
2766 }
2767
2768 /*****************************************************************************/
2769
2770 // One might think it is worth caching these values, but results indicate
2771 // that it isn't.
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2775 {
2776     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2778     return refAnyClass;
2779 }
2780
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2782 {
2783     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785     return typeHandleClass;
2786 }
2787
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2789 {
2790     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792     return argIteratorClass;
2793 }
2794
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2796 {
2797     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2799     return stringClass;
2800 }
2801
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2803 {
2804     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2806     return objectClass;
2807 }
2808
2809 /*****************************************************************************
2810  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811  *  set its type to TYP_BYREF when we create it. We know if it can be
2812  *  changed to TYP_I_IMPL only at the point where we use it
2813  */
2814
2815 /* static */
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2817 {
2818     if (tree1->IsVarAddr())
2819     {
2820         tree1->gtType = TYP_I_IMPL;
2821     }
2822
2823     if (tree2 && tree2->IsVarAddr())
2824     {
2825         tree2->gtType = TYP_I_IMPL;
2826     }
2827 }
2828
2829 /*****************************************************************************
2830  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831  *  to make that an explicit cast in our trees, so any implicit casts that
2832  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833  *  turned into explicit casts here.
2834  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2835  */
2836
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2838 {
2839     var_types currType   = genActualType(tree->gtType);
2840     var_types wantedType = genActualType(dstTyp);
2841
2842     if (wantedType != currType)
2843     {
2844         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2846         {
2847             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2848             {
2849                 tree->gtType = TYP_I_IMPL;
2850             }
2851         }
2852 #ifdef _TARGET_64BIT_
2853         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2854         {
2855             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2857         }
2858         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2859         {
2860             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2862         }
2863 #endif // _TARGET_64BIT_
2864     }
2865
2866     return tree;
2867 }
2868
2869 /*****************************************************************************
2870  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871  *  but we want to make that an explicit cast in our trees, so any implicit casts
2872  *  that exist in the IL are turned into explicit casts here.
2873  */
2874
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2876 {
2877 #ifndef LEGACY_BACKEND
2878     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2879     {
2880         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2881     }
2882 #endif // !LEGACY_BACKEND
2883
2884     return tree;
2885 }
2886
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 //    with a GT_COPYBLK node.
2890 //
2891 // Arguments:
2892 //    sig - The InitializeArray signature.
2893 //
2894 // Return Value:
2895 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 //    nullptr otherwise.
2897 //
2898 // Notes:
2899 //    The function recognizes the following IL pattern:
2900 //      ldc <length> or a list of ldc <lower bound>/<length>
2901 //      newarr or newobj
2902 //      dup
2903 //      ldtoken <field handle>
2904 //      call InitializeArray
2905 //    The lower bounds need not be constant except when the array rank is 1.
2906 //    The function recognizes all kinds of arrays thus enabling a small runtime
2907 //    such as CoreRT to skip providing an implementation for InitializeArray.
2908
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2910 {
2911     assert(sig->numArgs == 2);
2912
2913     GenTreePtr fieldTokenNode = impStackTop(0).val;
2914     GenTreePtr arrayLocalNode = impStackTop(1).val;
2915
2916     //
2917     // Verify that the field token is known and valid.  Note that It's also
2918     // possible for the token to come from reflection, in which case we cannot do
2919     // the optimization and must therefore revert to calling the helper.  You can
2920     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2921     //
2922
2923     // Check to see if the ldtoken helper call is what we see here.
2924     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2926     {
2927         return nullptr;
2928     }
2929
2930     // Strip helper call away
2931     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2932
2933     if (fieldTokenNode->gtOper == GT_IND)
2934     {
2935         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2936     }
2937
2938     // Check for constant
2939     if (fieldTokenNode->gtOper != GT_CNS_INT)
2940     {
2941         return nullptr;
2942     }
2943
2944     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2946     {
2947         return nullptr;
2948     }
2949
2950     //
2951     // We need to get the number of elements in the array and the size of each element.
2952     // We verify that the newarr statement is exactly what we expect it to be.
2953     // If it's not then we just return NULL and we don't optimize this call
2954     //
2955
2956     //
2957     // It is possible the we don't have any statements in the block yet
2958     //
2959     if (impTreeLast->gtOper != GT_STMT)
2960     {
2961         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2962         return nullptr;
2963     }
2964
2965     //
2966     // We start by looking at the last statement, making sure it's an assignment, and
2967     // that the target of the assignment is the array passed to InitializeArray.
2968     //
2969     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2973     {
2974         return nullptr;
2975     }
2976
2977     //
2978     // Make sure that the object being assigned is a helper call.
2979     //
2980
2981     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2983     {
2984         return nullptr;
2985     }
2986
2987     //
2988     // Verify that it is one of the new array helpers.
2989     //
2990
2991     bool isMDArray = false;
2992
2993     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2999 #endif
3000             )
3001     {
3002 #if COR_JIT_EE_VERSION > 460
3003         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3004         {
3005             return nullptr;
3006         }
3007
3008         isMDArray = true;
3009 #endif
3010     }
3011
3012     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3013
3014     //
3015     // Make sure we found a compile time handle to the array
3016     //
3017
3018     if (!arrayClsHnd)
3019     {
3020         return nullptr;
3021     }
3022
3023     unsigned rank = 0;
3024     S_UINT32 numElements;
3025
3026     if (isMDArray)
3027     {
3028         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3029
3030         if (rank == 0)
3031         {
3032             return nullptr;
3033         }
3034
3035         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036         assert(tokenArg != nullptr);
3037         GenTreeArgList* numArgsArg = tokenArg->Rest();
3038         assert(numArgsArg != nullptr);
3039         GenTreeArgList* argsArg = numArgsArg->Rest();
3040         assert(argsArg != nullptr);
3041
3042         //
3043         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044         // so at least one length must be present and the rank can't exceed 32 so there can
3045         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3046         //
3047
3048         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3050         {
3051             return nullptr;
3052         }
3053
3054         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055         bool     lowerBoundsSpecified;
3056
3057         if (numArgs == rank * 2)
3058         {
3059             lowerBoundsSpecified = true;
3060         }
3061         else if (numArgs == rank)
3062         {
3063             lowerBoundsSpecified = false;
3064
3065             //
3066             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068             // we get a SDArray as well, see the for loop below.
3069             //
3070
3071             if (rank == 1)
3072             {
3073                 isMDArray = false;
3074             }
3075         }
3076         else
3077         {
3078             return nullptr;
3079         }
3080
3081         //
3082         // The rank is known to be at least 1 so we can start with numElements being 1
3083         // to avoid the need to special case the first dimension.
3084         //
3085
3086         numElements = S_UINT32(1);
3087
3088         struct Match
3089         {
3090             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3091             {
3092                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3094             }
3095
3096             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3097             {
3098                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3101             }
3102
3103             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3104             {
3105                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3107             }
3108
3109             static bool IsComma(GenTree* tree)
3110             {
3111                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3112             }
3113         };
3114
3115         unsigned argIndex = 0;
3116         GenTree* comma;
3117
3118         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3119         {
3120             if (lowerBoundsSpecified)
3121             {
3122                 //
3123                 // In general lower bounds can be ignored because they're not needed to
3124                 // calculate the total number of elements. But for single dimensional arrays
3125                 // we need to know if the lower bound is 0 because in this case the runtime
3126                 // creates a SDArray and this affects the way the array data offset is calculated.
3127                 //
3128
3129                 if (rank == 1)
3130                 {
3131                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3132                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3134
3135                     if (lowerBoundNode->IsIntegralConst(0))
3136                     {
3137                         isMDArray = false;
3138                     }
3139                 }
3140
3141                 comma = comma->gtGetOp2();
3142                 argIndex++;
3143             }
3144
3145             GenTree* lengthNodeAssign = comma->gtGetOp1();
3146             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3148
3149             if (!lengthNode->IsCnsIntOrI())
3150             {
3151                 return nullptr;
3152             }
3153
3154             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3155             argIndex++;
3156         }
3157
3158         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3159
3160         if (argIndex != numArgs)
3161         {
3162             return nullptr;
3163         }
3164     }
3165     else
3166     {
3167         //
3168         // Make sure there are exactly two arguments:  the array class and
3169         // the number of elements.
3170         //
3171
3172         GenTreePtr arrayLengthNode;
3173
3174         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3177         {
3178             // Array length is 1st argument for readytorun helper
3179             arrayLengthNode = args->Current();
3180         }
3181         else
3182 #endif
3183         {
3184             // Array length is 2nd argument for regular helper
3185             arrayLengthNode = args->Rest()->Current();
3186         }
3187
3188         //
3189         // Make sure that the number of elements look valid.
3190         //
3191         if (arrayLengthNode->gtOper != GT_CNS_INT)
3192         {
3193             return nullptr;
3194         }
3195
3196         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3197
3198         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3199         {
3200             return nullptr;
3201         }
3202     }
3203
3204     CORINFO_CLASS_HANDLE elemClsHnd;
3205     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3206
3207     //
3208     // Note that genTypeSize will return zero for non primitive types, which is exactly
3209     // what we want (size will then be 0, and we will catch this in the conditional below).
3210     // Note that we don't expect this to fail for valid binaries, so we assert in the
3211     // non-verification case (the verification case should not assert but rather correctly
3212     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3213     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3214     // why.
3215     //
3216
3217     S_UINT32 elemSize(genTypeSize(elementType));
3218     S_UINT32 size = elemSize * S_UINT32(numElements);
3219
3220     if (size.IsOverflow())
3221     {
3222         return nullptr;
3223     }
3224
3225     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3226     {
3227         assert(verNeedsVerification());
3228         return nullptr;
3229     }
3230
3231     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3232     if (!initData)
3233     {
3234         return nullptr;
3235     }
3236
3237     //
3238     // At this point we are ready to commit to implementing the InitializeArray
3239     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3240     // return the struct assignment node.
3241     //
3242
3243     impPopStack();
3244     impPopStack();
3245
3246     const unsigned blkSize = size.Value();
3247     GenTreePtr     dst;
3248
3249     if (isMDArray)
3250     {
3251         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3252
3253         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3254     }
3255     else
3256     {
3257         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3258     }
3259     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3260     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3262
3263     return gtNewBlkOpNode(blk,     // dst
3264                           src,     // src
3265                           blkSize, // size
3266                           false,   // volatil
3267                           true);   // copyBlock
3268 }
3269
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3273
3274 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3275                                   CORINFO_CLASS_HANDLE  clsHnd,
3276                                   CORINFO_METHOD_HANDLE method,
3277                                   CORINFO_SIG_INFO*     sig,
3278                                   int                   memberRef,
3279                                   bool                  readonlyCall,
3280                                   bool                  tailCall,
3281                                   CorInfoIntrinsics*    pIntrinsicID)
3282 {
3283     bool mustExpand = false;
3284 #if COR_JIT_EE_VERSION > 460
3285     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3286 #else
3287     CorInfoIntrinsics intrinsicID                                      = info.compCompHnd->getIntrinsicID(method);
3288 #endif
3289     *pIntrinsicID = intrinsicID;
3290
3291 #ifndef _TARGET_ARM_
3292     genTreeOps interlockedOperator;
3293 #endif
3294
3295     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3296     {
3297         // must be done regardless of DbgCode and MinOpts
3298         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3299     }
3300 #ifdef _TARGET_64BIT_
3301     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3302     {
3303         // must be done regardless of DbgCode and MinOpts
3304         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3305     }
3306 #else
3307     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3308 #endif
3309
3310     GenTreePtr retNode = nullptr;
3311
3312     //
3313     // We disable the inlining of instrinsics for MinOpts.
3314     //
3315     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3316     {
3317         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3318         return retNode;
3319     }
3320
3321     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3322     // seem to work properly for Infinity values, we don't do
3323     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3324
3325     var_types callType = JITtype2varType(sig->retType);
3326
3327     /* First do the intrinsics which are always smaller than a call */
3328
3329     switch (intrinsicID)
3330     {
3331         GenTreePtr op1, op2;
3332
3333         case CORINFO_INTRINSIC_Sin:
3334         case CORINFO_INTRINSIC_Sqrt:
3335         case CORINFO_INTRINSIC_Abs:
3336         case CORINFO_INTRINSIC_Cos:
3337         case CORINFO_INTRINSIC_Round:
3338         case CORINFO_INTRINSIC_Cosh:
3339         case CORINFO_INTRINSIC_Sinh:
3340         case CORINFO_INTRINSIC_Tan:
3341         case CORINFO_INTRINSIC_Tanh:
3342         case CORINFO_INTRINSIC_Asin:
3343         case CORINFO_INTRINSIC_Acos:
3344         case CORINFO_INTRINSIC_Atan:
3345         case CORINFO_INTRINSIC_Atan2:
3346         case CORINFO_INTRINSIC_Log10:
3347         case CORINFO_INTRINSIC_Pow:
3348         case CORINFO_INTRINSIC_Exp:
3349         case CORINFO_INTRINSIC_Ceiling:
3350         case CORINFO_INTRINSIC_Floor:
3351
3352             // These are math intrinsics
3353
3354             assert(callType != TYP_STRUCT);
3355
3356             op1 = nullptr;
3357
3358 #if defined(LEGACY_BACKEND)
3359             if (IsTargetIntrinsic(intrinsicID))
3360 #elif !defined(_TARGET_X86_)
3361             // Intrinsics that are not implemented directly by target instructions will
3362             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3363             // don't do this optimization, because
3364             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3365             //  b) It will be non-trivial task or too late to re-materialize a surviving
3366             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3367             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3368 #else
3369             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3370             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3371             // code generation for certain EH constructs.
3372             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3373 #endif
3374             {
3375                 switch (sig->numArgs)
3376                 {
3377                     case 1:
3378                         op1 = impPopStack().val;
3379
3380 #if FEATURE_X87_DOUBLES
3381
3382                         // X87 stack doesn't differentiate between float/double
3383                         // so it doesn't need a cast, but everybody else does
3384                         // Just double check it is at least a FP type
3385                         noway_assert(varTypeIsFloating(op1));
3386
3387 #else // FEATURE_X87_DOUBLES
3388
3389                         if (op1->TypeGet() != callType)
3390                         {
3391                             op1 = gtNewCastNode(callType, op1, callType);
3392                         }
3393
3394 #endif // FEATURE_X87_DOUBLES
3395
3396                         op1 = new (this, GT_INTRINSIC)
3397                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3398                         break;
3399
3400                     case 2:
3401                         op2 = impPopStack().val;
3402                         op1 = impPopStack().val;
3403
3404 #if FEATURE_X87_DOUBLES
3405
3406                         // X87 stack doesn't differentiate between float/double
3407                         // so it doesn't need a cast, but everybody else does
3408                         // Just double check it is at least a FP type
3409                         noway_assert(varTypeIsFloating(op2));
3410                         noway_assert(varTypeIsFloating(op1));
3411
3412 #else // FEATURE_X87_DOUBLES
3413
3414                         if (op2->TypeGet() != callType)
3415                         {
3416                             op2 = gtNewCastNode(callType, op2, callType);
3417                         }
3418                         if (op1->TypeGet() != callType)
3419                         {
3420                             op1 = gtNewCastNode(callType, op1, callType);
3421                         }
3422
3423 #endif // FEATURE_X87_DOUBLES
3424
3425                         op1 = new (this, GT_INTRINSIC)
3426                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3427                         break;
3428
3429                     default:
3430                         NO_WAY("Unsupported number of args for Math Instrinsic");
3431                 }
3432
3433 #ifndef LEGACY_BACKEND
3434                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3435                 {
3436                     op1->gtFlags |= GTF_CALL;
3437                 }
3438 #endif
3439             }
3440
3441             retNode = op1;
3442             break;
3443
3444 #ifdef _TARGET_XARCH_
3445         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3446         case CORINFO_INTRINSIC_InterlockedAdd32:
3447             interlockedOperator = GT_LOCKADD;
3448             goto InterlockedBinOpCommon;
3449         case CORINFO_INTRINSIC_InterlockedXAdd32:
3450             interlockedOperator = GT_XADD;
3451             goto InterlockedBinOpCommon;
3452         case CORINFO_INTRINSIC_InterlockedXchg32:
3453             interlockedOperator = GT_XCHG;
3454             goto InterlockedBinOpCommon;
3455
3456 #ifdef _TARGET_AMD64_
3457         case CORINFO_INTRINSIC_InterlockedAdd64:
3458             interlockedOperator = GT_LOCKADD;
3459             goto InterlockedBinOpCommon;
3460         case CORINFO_INTRINSIC_InterlockedXAdd64:
3461             interlockedOperator = GT_XADD;
3462             goto InterlockedBinOpCommon;
3463         case CORINFO_INTRINSIC_InterlockedXchg64:
3464             interlockedOperator = GT_XCHG;
3465             goto InterlockedBinOpCommon;
3466 #endif // _TARGET_AMD64_
3467
3468         InterlockedBinOpCommon:
3469             assert(callType != TYP_STRUCT);
3470             assert(sig->numArgs == 2);
3471
3472             op2 = impPopStack().val;
3473             op1 = impPopStack().val;
3474
3475             // This creates:
3476             //   val
3477             // XAdd
3478             //   addr
3479             //     field (for example)
3480             //
3481             // In the case where the first argument is the address of a local, we might
3482             // want to make this *not* make the var address-taken -- but atomic instructions
3483             // on a local are probably pretty useless anyway, so we probably don't care.
3484
3485             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3486             op1->gtFlags |= GTF_GLOB_EFFECT;
3487             retNode = op1;
3488             break;
3489 #endif // _TARGET_XARCH_
3490
3491         case CORINFO_INTRINSIC_MemoryBarrier:
3492
3493             assert(sig->numArgs == 0);
3494
3495             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3496             op1->gtFlags |= GTF_GLOB_EFFECT;
3497             retNode = op1;
3498             break;
3499
3500 #ifdef _TARGET_XARCH_
3501         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3502         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3503 #ifdef _TARGET_AMD64_
3504         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3505 #endif
3506         {
3507             assert(callType != TYP_STRUCT);
3508             assert(sig->numArgs == 3);
3509             GenTreePtr op3;
3510
3511             op3 = impPopStack().val; // comparand
3512             op2 = impPopStack().val; // value
3513             op1 = impPopStack().val; // location
3514
3515             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3516
3517             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3518             retNode = node;
3519             break;
3520         }
3521 #endif
3522
3523         case CORINFO_INTRINSIC_StringLength:
3524             op1 = impPopStack().val;
3525             if (!opts.MinOpts() && !opts.compDbgCode)
3526             {
3527                 GenTreeArrLen* arrLen =
3528                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3529                 op1 = arrLen;
3530             }
3531             else
3532             {
3533                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3534                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3535                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3536                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3537             }
3538             retNode = op1;
3539             break;
3540
3541         case CORINFO_INTRINSIC_StringGetChar:
3542             op2 = impPopStack().val;
3543             op1 = impPopStack().val;
3544             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3545             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3546             retNode = op1;
3547             break;
3548
3549         case CORINFO_INTRINSIC_InitializeArray:
3550             retNode = impInitializeArrayIntrinsic(sig);
3551             break;
3552
3553         case CORINFO_INTRINSIC_Array_Address:
3554         case CORINFO_INTRINSIC_Array_Get:
3555         case CORINFO_INTRINSIC_Array_Set:
3556             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3557             break;
3558
3559         case CORINFO_INTRINSIC_GetTypeFromHandle:
3560             op1 = impStackTop(0).val;
3561             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3562                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3563             {
3564                 op1 = impPopStack().val;
3565                 // Change call to return RuntimeType directly.
3566                 op1->gtType = TYP_REF;
3567                 retNode     = op1;
3568             }
3569             // Call the regular function.
3570             break;
3571
3572         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3573             op1 = impStackTop(0).val;
3574             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3575                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3576             {
3577                 // Old tree
3578                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3579                 //
3580                 // New tree
3581                 // TreeToGetNativeTypeHandle
3582
3583                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3584                 // to that helper.
3585
3586                 op1 = impPopStack().val;
3587
3588                 // Get native TypeHandle argument to old helper
3589                 op1 = op1->gtCall.gtCallArgs;
3590                 assert(op1->OperIsList());
3591                 assert(op1->gtOp.gtOp2 == nullptr);
3592                 op1     = op1->gtOp.gtOp1;
3593                 retNode = op1;
3594             }
3595             // Call the regular function.
3596             break;
3597
3598 #ifndef LEGACY_BACKEND
3599         case CORINFO_INTRINSIC_Object_GetType:
3600
3601             op1 = impPopStack().val;
3602             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3603
3604             // Set the CALL flag to indicate that the operator is implemented by a call.
3605             // Set also the EXCEPTION flag because the native implementation of
3606             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3607             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3608             retNode = op1;
3609             break;
3610 #endif
3611         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3612         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3613         // substitution.  The parameter byref will be assigned into the newly allocated object.
3614         case CORINFO_INTRINSIC_ByReference_Ctor:
3615         {
3616             // Remove call to constructor and directly assign the byref passed
3617             // to the call to the first slot of the ByReference struct.
3618             op1                                    = impPopStack().val;
3619             GenTreePtr           thisptr           = newobjThis;
3620             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3621             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3622             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3623             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3624             assert(byReferenceStruct != nullptr);
3625             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3626             retNode = assign;
3627             break;
3628         }
3629         // Implement ptr value getter for ByReference struct.
3630         case CORINFO_INTRINSIC_ByReference_Value:
3631         {
3632             op1                         = impPopStack().val;
3633             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3634             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3635             retNode                     = field;
3636             break;
3637         }
3638         default:
3639             /* Unknown intrinsic */
3640             break;
3641     }
3642
3643     if (mustExpand)
3644     {
3645         if (retNode == nullptr)
3646         {
3647             NO_WAY("JIT must expand the intrinsic!");
3648         }
3649     }
3650
3651     return retNode;
3652 }
3653
3654 /*****************************************************************************/
3655
3656 GenTreePtr Compiler::impArrayAccessIntrinsic(
3657     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3658 {
3659     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3660        the following, as it generates fatter code.
3661     */
3662
3663     if (compCodeOpt() == SMALL_CODE)
3664     {
3665         return nullptr;
3666     }
3667
3668     /* These intrinsics generate fatter (but faster) code and are only
3669        done if we don't need SMALL_CODE */
3670
3671     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3672
3673     // The rank 1 case is special because it has to handle two array formats
3674     // we will simply not do that case
3675     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3676     {
3677         return nullptr;
3678     }
3679
3680     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3681     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3682
3683     // For the ref case, we will only be able to inline if the types match
3684     // (verifier checks for this, we don't care for the nonverified case and the
3685     // type is final (so we don't need to do the cast)
3686     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3687     {
3688         // Get the call site signature
3689         CORINFO_SIG_INFO LocalSig;
3690         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3691         assert(LocalSig.hasThis());
3692
3693         CORINFO_CLASS_HANDLE actualElemClsHnd;
3694
3695         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3696         {
3697             // Fetch the last argument, the one that indicates the type we are setting.
3698             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3699             for (unsigned r = 0; r < rank; r++)
3700             {
3701                 argType = info.compCompHnd->getArgNext(argType);
3702             }
3703
3704             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3705             actualElemClsHnd = argInfo.GetClassHandle();
3706         }
3707         else
3708         {
3709             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3710
3711             // Fetch the return type
3712             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3713             assert(retInfo.IsByRef());
3714             actualElemClsHnd = retInfo.GetClassHandle();
3715         }
3716
3717         // if it's not final, we can't do the optimization
3718         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3719         {
3720             return nullptr;
3721         }
3722     }
3723
3724     unsigned arrayElemSize;
3725     if (elemType == TYP_STRUCT)
3726     {
3727         assert(arrElemClsHnd);
3728
3729         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3730     }
3731     else
3732     {
3733         arrayElemSize = genTypeSize(elemType);
3734     }
3735
3736     if ((unsigned char)arrayElemSize != arrayElemSize)
3737     {
3738         // arrayElemSize would be truncated as an unsigned char.
3739         // This means the array element is too large. Don't do the optimization.
3740         return nullptr;
3741     }
3742
3743     GenTreePtr val = nullptr;
3744
3745     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3746     {
3747         // Assignment of a struct is more work, and there are more gets than sets.
3748         if (elemType == TYP_STRUCT)
3749         {
3750             return nullptr;
3751         }
3752
3753         val = impPopStack().val;
3754         assert(genActualType(elemType) == genActualType(val->gtType) ||
3755                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3756                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3757                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3758     }
3759
3760     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3761
3762     GenTreePtr inds[GT_ARR_MAX_RANK];
3763     for (unsigned k = rank; k > 0; k--)
3764     {
3765         inds[k - 1] = impPopStack().val;
3766     }
3767
3768     GenTreePtr arr = impPopStack().val;
3769     assert(arr->gtType == TYP_REF);
3770
3771     GenTreePtr arrElem =
3772         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3773                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3774
3775     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3776     {
3777         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3778     }
3779
3780     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3781     {
3782         assert(val != nullptr);
3783         return gtNewAssignNode(arrElem, val);
3784     }
3785     else
3786     {
3787         return arrElem;
3788     }
3789 }
3790
3791 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3792 {
3793     unsigned i;
3794
3795     // do some basic checks first
3796     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3797     {
3798         return FALSE;
3799     }
3800
3801     if (verCurrentState.esStackDepth > 0)
3802     {
3803         // merge stack types
3804         StackEntry* parentStack = block->bbStackOnEntry();
3805         StackEntry* childStack  = verCurrentState.esStack;
3806
3807         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3808         {
3809             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3810             {
3811                 return FALSE;
3812             }
3813         }
3814     }
3815
3816     // merge initialization status of this ptr
3817
3818     if (verTrackObjCtorInitState)
3819     {
3820         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3821         assert(verCurrentState.thisInitialized != TIS_Bottom);
3822
3823         // If the successor block's thisInit state is unknown, copy it from the current state.
3824         if (block->bbThisOnEntry() == TIS_Bottom)
3825         {
3826             *changed = true;
3827             verSetThisInit(block, verCurrentState.thisInitialized);
3828         }
3829         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3830         {
3831             if (block->bbThisOnEntry() != TIS_Top)
3832             {
3833                 *changed = true;
3834                 verSetThisInit(block, TIS_Top);
3835
3836                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3837                 {
3838                     // The block is bad. Control can flow through the block to any handler that catches the
3839                     // verification exception, but the importer ignores bad blocks and therefore won't model
3840                     // this flow in the normal way. To complete the merge into the bad block, the new state
3841                     // needs to be manually pushed to the handlers that may be reached after the verification
3842                     // exception occurs.
3843                     //
3844                     // Usually, the new state was already propagated to the relevant handlers while processing
3845                     // the predecessors of the bad block. The exception is when the bad block is at the start
3846                     // of a try region, meaning it is protected by additional handlers that do not protect its
3847                     // predecessors.
3848                     //
3849                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3850                     {
3851                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3852                         // recursive calls back into this code path (if successors of the current bad block are
3853                         // also bad blocks).
3854                         //
3855                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3856                         verCurrentState.thisInitialized = TIS_Top;
3857                         impVerifyEHBlock(block, true);
3858                         verCurrentState.thisInitialized = origTIS;
3859                     }
3860                 }
3861             }
3862         }
3863     }
3864     else
3865     {
3866         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3867     }
3868
3869     return TRUE;
3870 }
3871
3872 /*****************************************************************************
3873  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3874  *   already logged it (presumably in a more detailed fashion than done here)
3875  * 'bVerificationException' is true for a verification exception, false for a
3876  *   "call unauthorized by host" exception.
3877  */
3878
3879 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3880 {
3881     block->bbJumpKind = BBJ_THROW;
3882     block->bbFlags |= BBF_FAILED_VERIFICATION;
3883
3884     impCurStmtOffsSet(block->bbCodeOffs);
3885
3886 #ifdef DEBUG
3887     // we need this since BeginTreeList asserts otherwise
3888     impTreeList = impTreeLast = nullptr;
3889     block->bbFlags &= ~BBF_IMPORTED;
3890
3891     if (logMsg)
3892     {
3893         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3894                 block->bbCodeOffs, block->bbCodeOffsEnd));
3895         if (verbose)
3896         {
3897             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3898         }
3899     }
3900
3901     if (JitConfig.DebugBreakOnVerificationFailure())
3902     {
3903         DebugBreak();
3904     }
3905 #endif
3906
3907     impBeginTreeList();
3908
3909     // if the stack is non-empty evaluate all the side-effects
3910     if (verCurrentState.esStackDepth > 0)
3911     {
3912         impEvalSideEffects();
3913     }
3914     assert(verCurrentState.esStackDepth == 0);
3915
3916     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3917                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3918     // verCurrentState.esStackDepth = 0;
3919     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3920
3921     // The inliner is not able to handle methods that require throw block, so
3922     // make sure this methods never gets inlined.
3923     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3924 }
3925
3926 /*****************************************************************************
3927  *
3928  */
3929 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3930
3931 {
3932     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3933     // slightly different mechanism in which it calls the JIT to perform IL verification:
3934     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3935     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3936     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3937     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3938     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3939     // to fail upon runtime of the jitted method.
3940     //
3941     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3942     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3943     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3944     // we detect these two conditions, instead of generating a throw statement inside the offending
3945     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3946     // to return false and make RyuJIT behave the same way JIT64 does.
3947     //
3948     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3949     // RyuJIT for the time being until we completely replace JIT64.
3950     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3951
3952     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3953     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3954     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3955     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3956     // be turned off during importation).
3957     CLANG_FORMAT_COMMENT_ANCHOR;
3958
3959 #ifdef _TARGET_64BIT_
3960
3961 #ifdef DEBUG
3962     bool canSkipVerificationResult =
3963         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3964     assert(tiVerificationNeeded || canSkipVerificationResult);
3965 #endif // DEBUG
3966
3967     // Add the non verifiable flag to the compiler
3968     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3969     {
3970         tiIsVerifiableCode = FALSE;
3971     }
3972 #endif //_TARGET_64BIT_
3973     verResetCurrentState(block, &verCurrentState);
3974     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3975
3976 #ifdef DEBUG
3977     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3978 #endif                   // DEBUG
3979 }
3980
3981 /******************************************************************************/
3982 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3983 {
3984     assert(ciType < CORINFO_TYPE_COUNT);
3985
3986     typeInfo tiResult;
3987     switch (ciType)
3988     {
3989         case CORINFO_TYPE_STRING:
3990         case CORINFO_TYPE_CLASS:
3991             tiResult = verMakeTypeInfo(clsHnd);
3992             if (!tiResult.IsType(TI_REF))
3993             { // type must be consistent with element type
3994                 return typeInfo();
3995             }
3996             break;
3997
3998 #ifdef _TARGET_64BIT_
3999         case CORINFO_TYPE_NATIVEINT:
4000         case CORINFO_TYPE_NATIVEUINT:
4001             if (clsHnd)
4002             {
4003                 // If we have more precise information, use it
4004                 return verMakeTypeInfo(clsHnd);
4005             }
4006             else
4007             {
4008                 return typeInfo::nativeInt();
4009             }
4010             break;
4011 #endif // _TARGET_64BIT_
4012
4013         case CORINFO_TYPE_VALUECLASS:
4014         case CORINFO_TYPE_REFANY:
4015             tiResult = verMakeTypeInfo(clsHnd);
4016             // type must be constant with element type;
4017             if (!tiResult.IsValueClass())
4018             {
4019                 return typeInfo();
4020             }
4021             break;
4022         case CORINFO_TYPE_VAR:
4023             return verMakeTypeInfo(clsHnd);
4024
4025         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4026         case CORINFO_TYPE_VOID:
4027             return typeInfo();
4028             break;
4029
4030         case CORINFO_TYPE_BYREF:
4031         {
4032             CORINFO_CLASS_HANDLE childClassHandle;
4033             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4034             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4035         }
4036         break;
4037
4038         default:
4039             if (clsHnd)
4040             { // If we have more precise information, use it
4041                 return typeInfo(TI_STRUCT, clsHnd);
4042             }
4043             else
4044             {
4045                 return typeInfo(JITtype2tiType(ciType));
4046             }
4047     }
4048     return tiResult;
4049 }
4050
4051 /******************************************************************************/
4052
4053 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4054 {
4055     if (clsHnd == nullptr)
4056     {
4057         return typeInfo();
4058     }
4059
4060     // Byrefs should only occur in method and local signatures, which are accessed
4061     // using ICorClassInfo and ICorClassInfo.getChildType.
4062     // So findClass() and getClassAttribs() should not be called for byrefs
4063
4064     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4065     {
4066         assert(!"Did findClass() return a Byref?");
4067         return typeInfo();
4068     }
4069
4070     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4071
4072     if (attribs & CORINFO_FLG_VALUECLASS)
4073     {
4074         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4075
4076         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4077         // not occur here, so we may want to change this to an assert instead.
4078         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4079         {
4080             return typeInfo();
4081         }
4082
4083 #ifdef _TARGET_64BIT_
4084         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4085         {
4086             return typeInfo::nativeInt();
4087         }
4088 #endif // _TARGET_64BIT_
4089
4090         if (t != CORINFO_TYPE_UNDEF)
4091         {
4092             return (typeInfo(JITtype2tiType(t)));
4093         }
4094         else if (bashStructToRef)
4095         {
4096             return (typeInfo(TI_REF, clsHnd));
4097         }
4098         else
4099         {
4100             return (typeInfo(TI_STRUCT, clsHnd));
4101         }
4102     }
4103     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4104     {
4105         // See comment in _typeInfo.h for why we do it this way.
4106         return (typeInfo(TI_REF, clsHnd, true));
4107     }
4108     else
4109     {
4110         return (typeInfo(TI_REF, clsHnd));
4111     }
4112 }
4113
4114 /******************************************************************************/
4115 BOOL Compiler::verIsSDArray(typeInfo ti)
4116 {
4117     if (ti.IsNullObjRef())
4118     { // nulls are SD arrays
4119         return TRUE;
4120     }
4121
4122     if (!ti.IsType(TI_REF))
4123     {
4124         return FALSE;
4125     }
4126
4127     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4128     {
4129         return FALSE;
4130     }
4131     return TRUE;
4132 }
4133
4134 /******************************************************************************/
4135 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4136 /* Returns an error type if anything goes wrong */
4137
4138 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4139 {
4140     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4141
4142     if (!verIsSDArray(arrayObjectType))
4143     {
4144         return typeInfo();
4145     }
4146
4147     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4148     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4149
4150     return verMakeTypeInfo(ciType, childClassHandle);
4151 }
4152
4153 /*****************************************************************************
4154  */
4155 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4156 {
4157     CORINFO_CLASS_HANDLE classHandle;
4158     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4159
4160     var_types type = JITtype2varType(ciType);
4161     if (varTypeIsGC(type))
4162     {
4163         // For efficiency, getArgType only returns something in classHandle for
4164         // value types.  For other types that have addition type info, you
4165         // have to call back explicitly
4166         classHandle = info.compCompHnd->getArgClass(sig, args);
4167         if (!classHandle)
4168         {
4169             NO_WAY("Could not figure out Class specified in argument or local signature");
4170         }
4171     }
4172
4173     return verMakeTypeInfo(ciType, classHandle);
4174 }
4175
4176 /*****************************************************************************/
4177
4178 // This does the expensive check to figure out whether the method
4179 // needs to be verified. It is called only when we fail verification,
4180 // just before throwing the verification exception.
4181
4182 BOOL Compiler::verNeedsVerification()
4183 {
4184     // If we have previously determined that verification is NOT needed
4185     // (for example in Compiler::compCompile), that means verification is really not needed.
4186     // Return the same decision we made before.
4187     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4188
4189     if (!tiVerificationNeeded)
4190     {
4191         return tiVerificationNeeded;
4192     }
4193
4194     assert(tiVerificationNeeded);
4195
4196     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4197     // obtain the answer.
4198     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4199         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4200
4201     // canSkipVerification will return one of the following three values:
4202     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4203     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4204     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4205     //     but need to insert a callout to the VM to ask during runtime
4206     //     whether to skip verification or not.
4207
4208     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4209     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4210     {
4211         tiRuntimeCalloutNeeded = true;
4212     }
4213
4214     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4215     {
4216         // Dev10 706080 - Testers don't like the assert, so just silence it
4217         // by not using the macros that invoke debugAssert.
4218         badCode();
4219     }
4220
4221     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4222     // The following line means we will NOT do jit time verification if canSkipVerification
4223     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4224     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4225     return tiVerificationNeeded;
4226 }
4227
4228 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4229 {
4230     if (ti.IsByRef())
4231     {
4232         return TRUE;
4233     }
4234     if (!ti.IsType(TI_STRUCT))
4235     {
4236         return FALSE;
4237     }
4238     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4239 }
4240
4241 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4242 {
4243     if (ti.IsPermanentHomeByRef())
4244     {
4245         return TRUE;
4246     }
4247     else
4248     {
4249         return FALSE;
4250     }
4251 }
4252
4253 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4254 {
4255     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4256             || ti.IsUnboxedGenericTypeVar() ||
4257             (ti.IsType(TI_STRUCT) &&
4258              // exclude byreflike structs
4259              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4260 }
4261
4262 // Is it a boxed value type?
4263 bool Compiler::verIsBoxedValueType(typeInfo ti)
4264 {
4265     if (ti.GetType() == TI_REF)
4266     {
4267         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4268         return !!eeIsValueClass(clsHnd);
4269     }
4270     else
4271     {
4272         return false;
4273     }
4274 }
4275
4276 /*****************************************************************************
4277  *
4278  *  Check if a TailCall is legal.
4279  */
4280
4281 bool Compiler::verCheckTailCallConstraint(
4282     OPCODE                  opcode,
4283     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4284     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4285     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4286                                                        // return false to the caller.
4287                                                        // If false, it will throw.
4288     )
4289 {
4290     DWORD            mflags;
4291     CORINFO_SIG_INFO sig;
4292     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4293                                    // this counter is used to keep track of how many items have been
4294                                    // virtually popped
4295
4296     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4297     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4298     unsigned              methodClassFlgs = 0;
4299
4300     assert(impOpcodeIsCallOpcode(opcode));
4301
4302     if (compIsForInlining())
4303     {
4304         return false;
4305     }
4306
4307     // for calli, VerifyOrReturn that this is not a virtual method
4308     if (opcode == CEE_CALLI)
4309     {
4310         /* Get the call sig */
4311         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4312
4313         // We don't know the target method, so we have to infer the flags, or
4314         // assume the worst-case.
4315         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4316     }
4317     else
4318     {
4319         methodHnd = pResolvedToken->hMethod;
4320
4321         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4322
4323         // When verifying generic code we pair the method handle with its
4324         // owning class to get the exact method signature.
4325         methodClassHnd = pResolvedToken->hClass;
4326         assert(methodClassHnd);
4327
4328         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4329
4330         // opcode specific check
4331         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4332     }
4333
4334     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4335     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4336
4337     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4338     {
4339         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4340     }
4341
4342     // check compatibility of the arguments
4343     unsigned int argCount;
4344     argCount = sig.numArgs;
4345     CORINFO_ARG_LIST_HANDLE args;
4346     args = sig.args;
4347     while (argCount--)
4348     {
4349         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4350
4351         // check that the argument is not a byref for tailcalls
4352         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4353
4354         // For unsafe code, we might have parameters containing pointer to the stack location.
4355         // Disallow the tailcall for this kind.
4356         CORINFO_CLASS_HANDLE classHandle;
4357         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4358         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4359
4360         args = info.compCompHnd->getArgNext(args);
4361     }
4362
4363     // update popCount
4364     popCount += sig.numArgs;
4365
4366     // check for 'this' which is on non-static methods, not called via NEWOBJ
4367     if (!(mflags & CORINFO_FLG_STATIC))
4368     {
4369         // Always update the popCount.
4370         // This is crucial for the stack calculation to be correct.
4371         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4372         popCount++;
4373
4374         if (opcode == CEE_CALLI)
4375         {
4376             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4377             // on the stack.
4378             if (tiThis.IsValueClass())
4379             {
4380                 tiThis.MakeByRef();
4381             }
4382             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4383         }
4384         else
4385         {
4386             // Check type compatibility of the this argument
4387             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4388             if (tiDeclaredThis.IsValueClass())
4389             {
4390                 tiDeclaredThis.MakeByRef();
4391             }
4392
4393             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4394         }
4395     }
4396
4397     // Tail calls on constrained calls should be illegal too:
4398     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4399     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4400
4401     // Get the exact view of the signature for an array method
4402     if (sig.retType != CORINFO_TYPE_VOID)
4403     {
4404         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4405         {
4406             assert(opcode != CEE_CALLI);
4407             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4408         }
4409     }
4410
4411     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4412     typeInfo tiCallerRetType =
4413         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4414
4415     // void return type gets morphed into the error type, so we have to treat them specially here
4416     if (sig.retType == CORINFO_TYPE_VOID)
4417     {
4418         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4419                                   speculative);
4420     }
4421     else
4422     {
4423         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4424                                                    NormaliseForStack(tiCallerRetType), true),
4425                                   "tailcall return mismatch", speculative);
4426     }
4427
4428     // for tailcall, stack must be empty
4429     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4430
4431     return true; // Yes, tailcall is legal
4432 }
4433
4434 /*****************************************************************************
4435  *
4436  *  Checks the IL verification rules for the call
4437  */
4438
4439 void Compiler::verVerifyCall(OPCODE                  opcode,
4440                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4441                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4442                              bool                    tailCall,
4443                              bool                    readonlyCall,
4444                              const BYTE*             delegateCreateStart,
4445                              const BYTE*             codeAddr,
4446                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4447 {
4448     DWORD             mflags;
4449     CORINFO_SIG_INFO* sig      = nullptr;
4450     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4451                                     // this counter is used to keep track of how many items have been
4452                                     // virtually popped
4453
4454     // for calli, VerifyOrReturn that this is not a virtual method
4455     if (opcode == CEE_CALLI)
4456     {
4457         Verify(false, "Calli not verifiable");
4458         return;
4459     }
4460
4461     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4462     mflags = callInfo->verMethodFlags;
4463
4464     sig = &callInfo->verSig;
4465
4466     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4467     {
4468         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4469     }
4470
4471     // opcode specific check
4472     unsigned methodClassFlgs = callInfo->classFlags;
4473     switch (opcode)
4474     {
4475         case CEE_CALLVIRT:
4476             // cannot do callvirt on valuetypes
4477             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4478             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4479             break;
4480
4481         case CEE_NEWOBJ:
4482         {
4483             assert(!tailCall); // Importer should not allow this
4484             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4485                            "newobj must be on instance");
4486
4487             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4488             {
4489                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4490                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4491                 typeInfo tiDeclaredFtn =
4492                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4493                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4494
4495                 assert(popCount == 0);
4496                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4497                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4498
4499                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4500                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4501                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4502                                "delegate object type mismatch");
4503
4504                 CORINFO_CLASS_HANDLE objTypeHandle =
4505                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4506
4507                 // the method signature must be compatible with the delegate's invoke method
4508
4509                 // check that for virtual functions, the type of the object used to get the
4510                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4511                 // since this is a bit of work to determine in general, we pattern match stylized
4512                 // code sequences
4513
4514                 // the delegate creation code check, which used to be done later, is now done here
4515                 // so we can read delegateMethodRef directly from
4516                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4517                 // we then use it in our call to isCompatibleDelegate().
4518
4519                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4520                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4521                                "must create delegates with certain IL");
4522
4523                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4524                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4525                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4526                 delegateResolvedToken.token        = delegateMethodRef;
4527                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4528                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4529
4530                 CORINFO_CALL_INFO delegateCallInfo;
4531                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4532                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4533
4534                 BOOL isOpenDelegate = FALSE;
4535                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4536                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4537                                                                       &isOpenDelegate),
4538                                "function incompatible with delegate");
4539
4540                 // check the constraints on the target method
4541                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4542                                "delegate target has unsatisfied class constraints");
4543                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4544                                                                             tiActualFtn.GetMethod()),
4545                                "delegate target has unsatisfied method constraints");
4546
4547                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4548                 // for additional verification rules for delegates
4549                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4550                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4551                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4552                 {
4553
4554                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4555 #ifdef DEBUG
4556                         && StrictCheckForNonVirtualCallToVirtualMethod()
4557 #endif
4558                             )
4559                     {
4560                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4561                         {
4562                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4563                                                verIsBoxedValueType(tiActualObj),
4564                                            "The 'this' parameter to the call must be either the calling method's "
4565                                            "'this' parameter or "
4566                                            "a boxed value type.");
4567                         }
4568                     }
4569                 }
4570
4571                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4572                 {
4573                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4574
4575                     Verify(targetIsStatic || !isOpenDelegate,
4576                            "Unverifiable creation of an open instance delegate for a protected member.");
4577
4578                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4579                                                                 ? info.compClassHnd
4580                                                                 : tiActualObj.GetClassHandleForObjRef();
4581
4582                     // In the case of protected methods, it is a requirement that the 'this'
4583                     // pointer be a subclass of the current context.  Perform this check.
4584                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4585                            "Accessing protected method through wrong type.");
4586                 }
4587                 goto DONE_ARGS;
4588             }
4589         }
4590         // fall thru to default checks
4591         default:
4592             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4593     }
4594     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4595                    "can only newobj a delegate constructor");
4596
4597     // check compatibility of the arguments
4598     unsigned int argCount;
4599     argCount = sig->numArgs;
4600     CORINFO_ARG_LIST_HANDLE args;
4601     args = sig->args;
4602     while (argCount--)
4603     {
4604         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4605
4606         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4607         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4608
4609         args = info.compCompHnd->getArgNext(args);
4610     }
4611
4612 DONE_ARGS:
4613
4614     // update popCount
4615     popCount += sig->numArgs;
4616
4617     // check for 'this' which are is non-static methods, not called via NEWOBJ
4618     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4619     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4620     {
4621         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4622         popCount++;
4623
4624         // If it is null, we assume we can access it (since it will AV shortly)
4625         // If it is anything but a reference class, there is no hierarchy, so
4626         // again, we don't need the precise instance class to compute 'protected' access
4627         if (tiThis.IsType(TI_REF))
4628         {
4629             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4630         }
4631
4632         // Check type compatibility of the this argument
4633         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4634         if (tiDeclaredThis.IsValueClass())
4635         {
4636             tiDeclaredThis.MakeByRef();
4637         }
4638
4639         // If this is a call to the base class .ctor, set thisPtr Init for
4640         // this block.
4641         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4642         {
4643             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4644                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4645             {
4646                 assert(verCurrentState.thisInitialized !=
4647                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4648                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4649                                "Call to base class constructor when 'this' is possibly initialized");
4650                 // Otherwise, 'this' is now initialized.
4651                 verCurrentState.thisInitialized = TIS_Init;
4652                 tiThis.SetInitialisedObjRef();
4653             }
4654             else
4655             {
4656                 // We allow direct calls to value type constructors
4657                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4658                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4659                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4660                                "Bad call to a constructor");
4661             }
4662         }
4663
4664         if (pConstrainedResolvedToken != nullptr)
4665         {
4666             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4667
4668             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4669
4670             // We just dereference this and test for equality
4671             tiThis.DereferenceByRef();
4672             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4673                            "this type mismatch with constrained type operand");
4674
4675             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4676             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4677         }
4678
4679         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4680         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4681         {
4682             tiDeclaredThis.SetIsReadonlyByRef();
4683         }
4684
4685         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4686
4687         if (tiThis.IsByRef())
4688         {
4689             // Find the actual type where the method exists (as opposed to what is declared
4690             // in the metadata). This is to prevent passing a byref as the "this" argument
4691             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4692
4693             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4694             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4695                            "Call to base type of valuetype (which is never a valuetype)");
4696         }
4697
4698         // Rules for non-virtual call to a non-final virtual method:
4699
4700         // Define:
4701         // The "this" pointer is considered to be "possibly written" if
4702         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4703         //   (or)
4704         //   2. It has been stored to (STARG.0) anywhere in the method.
4705
4706         // A non-virtual call to a non-final virtual method is only allowed if
4707         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4708         //   (or)
4709         //   2. The this pointer passed to the callee is the current method's this pointer.
4710         //      (and) The current method's this pointer is not "possibly written".
4711
4712         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4713         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4714         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4715         // hard and more error prone.
4716
4717         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4718 #ifdef DEBUG
4719             && StrictCheckForNonVirtualCallToVirtualMethod()
4720 #endif
4721                 )
4722         {
4723             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4724             {
4725                 VerifyOrReturn(
4726                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4727                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4728                     "a boxed value type.");
4729             }
4730         }
4731     }
4732
4733     // check any constraints on the callee's class and type parameters
4734     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4735                    "method has unsatisfied class constraints");
4736     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4737                    "method has unsatisfied method constraints");
4738
4739     if (mflags & CORINFO_FLG_PROTECTED)
4740     {
4741         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4742                        "Can't access protected method");
4743     }
4744
4745     // Get the exact view of the signature for an array method
4746     if (sig->retType != CORINFO_TYPE_VOID)
4747     {
4748         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4749     }
4750
4751     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4752     // The methods supported by array types are under the control of the EE
4753     // so we can trust that only the Address operation returns a byref.
4754     if (readonlyCall)
4755     {
4756         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4757         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4758                        "unexpected use of readonly prefix");
4759     }
4760
4761     // Verify the tailcall
4762     if (tailCall)
4763     {
4764         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4765     }
4766 }
4767
4768 /*****************************************************************************
4769  *  Checks that a delegate creation is done using the following pattern:
4770  *     dup
4771  *     ldvirtftn targetMemberRef
4772  *  OR
4773  *     ldftn targetMemberRef
4774  *
4775  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4776  *  not in this basic block)
4777  *
4778  *  targetMemberRef is read from the code sequence.
4779  *  targetMemberRef is validated iff verificationNeeded.
4780  */
4781
4782 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4783                                         const BYTE*  codeAddr,
4784                                         mdMemberRef& targetMemberRef)
4785 {
4786     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4787     {
4788         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4789         return TRUE;
4790     }
4791     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4792     {
4793         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4794         return TRUE;
4795     }
4796
4797     return FALSE;
4798 }
4799
4800 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4801 {
4802     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4803     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4804     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4805     if (!tiCompatibleWith(value, normPtrVal, true))
4806     {
4807         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4808         compUnsafeCastUsed = true;
4809     }
4810     return ptrVal;
4811 }
4812
4813 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4814 {
4815     assert(!instrType.IsStruct());
4816
4817     typeInfo ptrVal;
4818     if (ptr.IsByRef())
4819     {
4820         ptrVal = DereferenceByRef(ptr);
4821         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4822         {
4823             Verify(false, "bad pointer");
4824             compUnsafeCastUsed = true;
4825         }
4826         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4827         {
4828             Verify(false, "pointer not consistent with instr");
4829             compUnsafeCastUsed = true;
4830         }
4831     }
4832     else
4833     {
4834         Verify(false, "pointer not byref");
4835         compUnsafeCastUsed = true;
4836     }
4837
4838     return ptrVal;
4839 }
4840
4841 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4842 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4843 // ld*flda or a st*fld.
4844 // 'enclosingClass' is given if we are accessing a field in some specific type.
4845
4846 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4847                               const CORINFO_FIELD_INFO& fieldInfo,
4848                               const typeInfo*           tiThis,
4849                               BOOL                      mutator,
4850                               BOOL                      allowPlainStructAsThis)
4851 {
4852     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4853     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4854     CORINFO_CLASS_HANDLE instanceClass =
4855         info.compClassHnd; // for statics, we imagine the instance is the current class.
4856
4857     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4858     if (mutator)
4859     {
4860         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4861         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4862         {
4863             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4864                        info.compIsStatic == isStaticField,
4865                    "bad use of initonly field (set or address taken)");
4866         }
4867     }
4868
4869     if (tiThis == nullptr)
4870     {
4871         Verify(isStaticField, "used static opcode with non-static field");
4872     }
4873     else
4874     {
4875         typeInfo tThis = *tiThis;
4876
4877         if (allowPlainStructAsThis && tThis.IsValueClass())
4878         {
4879             tThis.MakeByRef();
4880         }
4881
4882         // If it is null, we assume we can access it (since it will AV shortly)
4883         // If it is anything but a refernce class, there is no hierarchy, so
4884         // again, we don't need the precise instance class to compute 'protected' access
4885         if (tiThis->IsType(TI_REF))
4886         {
4887             instanceClass = tiThis->GetClassHandleForObjRef();
4888         }
4889
4890         // Note that even if the field is static, we require that the this pointer
4891         // satisfy the same constraints as a non-static field  This happens to
4892         // be simpler and seems reasonable
4893         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4894         if (tiDeclaredThis.IsValueClass())
4895         {
4896             tiDeclaredThis.MakeByRef();
4897
4898             // we allow read-only tThis, on any field access (even stores!), because if the
4899             // class implementor wants to prohibit stores he should make the field private.
4900             // we do this by setting the read-only bit on the type we compare tThis to.
4901             tiDeclaredThis.SetIsReadonlyByRef();
4902         }
4903         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4904         {
4905             // Any field access is legal on "uninitialized" this pointers.
4906             // The easiest way to implement this is to simply set the
4907             // initialized bit for the duration of the type check on the
4908             // field access only.  It does not change the state of the "this"
4909             // for the function as a whole. Note that the "tThis" is a copy
4910             // of the original "this" type (*tiThis) passed in.
4911             tThis.SetInitialisedObjRef();
4912         }
4913
4914         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4915     }
4916
4917     // Presently the JIT does not check that we don't store or take the address of init-only fields
4918     // since we cannot guarantee their immutability and it is not a security issue.
4919
4920     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4921     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4922                    "field has unsatisfied class constraints");
4923     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4924     {
4925         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4926                "Accessing protected method through wrong type.");
4927     }
4928 }
4929
4930 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4931 {
4932     if (tiOp1.IsNumberType())
4933     {
4934 #ifdef _TARGET_64BIT_
4935         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4936 #else  // _TARGET_64BIT
4937         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4938         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4939         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4940         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4941 #endif // !_TARGET_64BIT_
4942     }
4943     else if (tiOp1.IsObjRef())
4944     {
4945         switch (opcode)
4946         {
4947             case CEE_BEQ_S:
4948             case CEE_BEQ:
4949             case CEE_BNE_UN_S:
4950             case CEE_BNE_UN:
4951             case CEE_CEQ:
4952             case CEE_CGT_UN:
4953                 break;
4954             default:
4955                 Verify(FALSE, "Cond not allowed on object types");
4956         }
4957         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4958     }
4959     else if (tiOp1.IsByRef())
4960     {
4961         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4962     }
4963     else
4964     {
4965         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4966     }
4967 }
4968
4969 void Compiler::verVerifyThisPtrInitialised()
4970 {
4971     if (verTrackObjCtorInitState)
4972     {
4973         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4974     }
4975 }
4976
4977 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4978 {
4979     // Either target == context, in this case calling an alternate .ctor
4980     // Or target is the immediate parent of context
4981
4982     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4983 }
4984
4985 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4986                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4987                                         CORINFO_CALL_INFO*      pCallInfo)
4988 {
4989     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4990     {
4991         NO_WAY("Virtual call to a function added via EnC is not supported");
4992     }
4993
4994 #ifdef FEATURE_READYTORUN_COMPILER
4995     if (opts.IsReadyToRun())
4996     {
4997         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4998         {
4999             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
5000                                                     gtNewArgList(thisPtr));
5001
5002             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5003
5004             return call;
5005         }
5006
5007         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5008         if (IsTargetAbi(CORINFO_CORERT_ABI))
5009         {
5010             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5011
5012             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5013                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5014         }
5015     }
5016 #endif
5017
5018     // Get the exact descriptor for the static callsite
5019     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5020     if (exactTypeDesc == nullptr)
5021     { // compDonotInline()
5022         return nullptr;
5023     }
5024
5025     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5026     if (exactMethodDesc == nullptr)
5027     { // compDonotInline()
5028         return nullptr;
5029     }
5030
5031     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5032
5033     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5034
5035     helpArgs = gtNewListNode(thisPtr, helpArgs);
5036
5037     // Call helper function.  This gets the target address of the final destination callsite.
5038
5039     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5040 }
5041
5042 /*****************************************************************************
5043  *
5044  *  Build and import a box node
5045  */
5046
5047 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5048 {
5049     // Get the tree for the type handle for the boxed object.  In the case
5050     // of shared generic code or ngen'd code this might be an embedded
5051     // computation.
5052     // Note we can only box do it if the class construtor has been called
5053     // We can always do it on primitive types
5054
5055     GenTreePtr op1 = nullptr;
5056     GenTreePtr op2 = nullptr;
5057     var_types  lclTyp;
5058
5059     impSpillSpecialSideEff();
5060
5061     // Now get the expression to box from the stack.
5062     CORINFO_CLASS_HANDLE operCls;
5063     GenTreePtr           exprToBox = impPopStack(operCls).val;
5064
5065     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5066     if (boxHelper == CORINFO_HELP_BOX)
5067     {
5068         // we are doing 'normal' boxing.  This means that we can inline the box operation
5069         // Box(expr) gets morphed into
5070         // temp = new(clsHnd)
5071         // cpobj(temp+4, expr, clsHnd)
5072         // push temp
5073         // The code paths differ slightly below for structs and primitives because
5074         // "cpobj" differs in these cases.  In one case you get
5075         //    impAssignStructPtr(temp+4, expr, clsHnd)
5076         // and the other you get
5077         //    *(temp+4) = expr
5078
5079         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5080         {
5081             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5082         }
5083
5084         // needs to stay in use until this box expression is appended
5085         // some other node.  We approximate this by keeping it alive until
5086         // the opcode stack becomes empty
5087         impBoxTempInUse = true;
5088
5089 #ifdef FEATURE_READYTORUN_COMPILER
5090         bool usingReadyToRunHelper = false;
5091
5092         if (opts.IsReadyToRun())
5093         {
5094             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5095             usingReadyToRunHelper = (op1 != nullptr);
5096         }
5097
5098         if (!usingReadyToRunHelper)
5099 #endif
5100         {
5101             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5102             // and the newfast call with a single call to a dynamic R2R cell that will:
5103             //      1) Load the context
5104             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5105             //      3) Allocate and return the new object for boxing
5106             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5107
5108             // Ensure that the value class is restored
5109             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5110             if (op2 == nullptr)
5111             { // compDonotInline()
5112                 return;
5113             }
5114
5115             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5116                                       gtNewArgList(op2));
5117         }
5118
5119         /* Remember that this basic block contains 'new' of an array */
5120         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5121
5122         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5123
5124         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5125
5126         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5127         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5128         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5129
5130         if (varTypeIsStruct(exprToBox))
5131         {
5132             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5133             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5134         }
5135         else
5136         {
5137             lclTyp = exprToBox->TypeGet();
5138             if (lclTyp == TYP_BYREF)
5139             {
5140                 lclTyp = TYP_I_IMPL;
5141             }
5142             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5143             if (impIsPrimitive(jitType))
5144             {
5145                 lclTyp = JITtype2varType(jitType);
5146             }
5147             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5148                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5149             var_types srcTyp = exprToBox->TypeGet();
5150             var_types dstTyp = lclTyp;
5151
5152             if (srcTyp != dstTyp)
5153             {
5154                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5155                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5156                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5157             }
5158             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5159         }
5160
5161         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5162         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5163
5164         // Record that this is a "box" node.
5165         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5166
5167         // If it is a value class, mark the "box" node.  We can use this information
5168         // to optimise several cases:
5169         //    "box(x) == null" --> false
5170         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5171         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5172
5173         op1->gtFlags |= GTF_BOX_VALUE;
5174         assert(op1->IsBoxedValue());
5175         assert(asg->gtOper == GT_ASG);
5176     }
5177     else
5178     {
5179         // Don't optimize, just call the helper and be done with it
5180
5181         // Ensure that the value class is restored
5182         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5183         if (op2 == nullptr)
5184         { // compDonotInline()
5185             return;
5186         }
5187
5188         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5189         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5190     }
5191
5192     /* Push the result back on the stack, */
5193     /* even if clsHnd is a value class we want the TI_REF */
5194     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5195     impPushOnStack(op1, tiRetVal);
5196 }
5197
5198 //------------------------------------------------------------------------
5199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5200 //
5201 // Arguments:
5202 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5203 //                     by a call to CEEInfo::resolveToken().
5204 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5205 //                by a call to CEEInfo::getCallInfo().
5206 //
5207 // Assumptions:
5208 //    The multi-dimensional array constructor arguments (array dimensions) are
5209 //    pushed on the IL stack on entry to this method.
5210 //
5211 // Notes:
5212 //    Multi-dimensional array constructors are imported as calls to a JIT
5213 //    helper, not as regular calls.
5214
5215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5216 {
5217     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5218     if (classHandle == nullptr)
5219     { // compDonotInline()
5220         return;
5221     }
5222
5223     assert(pCallInfo->sig.numArgs);
5224
5225     GenTreePtr      node;
5226     GenTreeArgList* args;
5227
5228     //
5229     // There are two different JIT helpers that can be used to allocate
5230     // multi-dimensional arrays:
5231     //
5232     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5233     //      This variant is deprecated. It should be eventually removed.
5234     //
5235     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5236     //      pointer to block of int32s. This variant is more portable.
5237     //
5238     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5239     // unconditionally would require ReadyToRun version bump.
5240     //
5241     CLANG_FORMAT_COMMENT_ANCHOR;
5242
5243 #if COR_JIT_EE_VERSION > 460
5244     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5245     {
5246         LclVarDsc* newObjArrayArgsVar;
5247
5248         // Reuse the temp used to pass the array dimensions to avoid bloating
5249         // the stack frame in case there are multiple calls to multi-dim array
5250         // constructors within a single method.
5251         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5252         {
5253             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5254             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5255             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5256         }
5257
5258         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5259         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5260         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5261             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5262
5263         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5264         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5265         // to one allocation at a time.
5266         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5267
5268         //
5269         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5270         //  - Array class handle
5271         //  - Number of dimension arguments
5272         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5273         //
5274
5275         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5276         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5277
5278         // Pop dimension arguments from the stack one at a time and store it
5279         // into lvaNewObjArrayArgs temp.
5280         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5281         {
5282             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5283
5284             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5285             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5286             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5287                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5288             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5289
5290             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5291         }
5292
5293         args = gtNewArgList(node);
5294
5295         // pass number of arguments to the helper
5296         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5297
5298         args = gtNewListNode(classHandle, args);
5299
5300         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5301     }
5302     else
5303 #endif
5304     {
5305         //
5306         // The varargs helper needs the type and method handles as last
5307         // and  last-1 param (this is a cdecl call, so args will be
5308         // pushed in reverse order on the CPU stack)
5309         //
5310
5311         args = gtNewArgList(classHandle);
5312
5313         // pass number of arguments to the helper
5314         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5315
5316         unsigned argFlags = 0;
5317         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5318
5319         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5320
5321         // varargs, so we pop the arguments
5322         node->gtFlags |= GTF_CALL_POP_ARGS;
5323
5324 #ifdef DEBUG
5325         // At the present time we don't track Caller pop arguments
5326         // that have GC references in them
5327         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5328         {
5329             assert(temp->Current()->gtType != TYP_REF);
5330         }
5331 #endif
5332     }
5333
5334     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5335     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5336
5337     // Remember that this basic block contains 'new' of a md array
5338     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5339
5340     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5341 }
5342
5343 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5344                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5345                                       CORINFO_THIS_TRANSFORM  transform)
5346 {
5347     switch (transform)
5348     {
5349         case CORINFO_DEREF_THIS:
5350         {
5351             GenTreePtr obj = thisPtr;
5352
5353             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5354             impBashVarAddrsToI(obj);
5355             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5356             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5357
5358             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5359             // ldind could point anywhere, example a boxed class static int
5360             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5361
5362             return obj;
5363         }
5364
5365         case CORINFO_BOX_THIS:
5366         {
5367             // Constraint calls where there might be no
5368             // unboxed entry point require us to implement the call via helper.
5369             // These only occur when a possible target of the call
5370             // may have inherited an implementation of an interface
5371             // method from System.Object or System.ValueType.  The EE does not provide us with
5372             // "unboxed" versions of these methods.
5373
5374             GenTreePtr obj = thisPtr;
5375
5376             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5377             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5378             obj->gtFlags |= GTF_EXCEPT;
5379
5380             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5381             var_types   objType = JITtype2varType(jitTyp);
5382             if (impIsPrimitive(jitTyp))
5383             {
5384                 if (obj->OperIsBlk())
5385                 {
5386                     obj->ChangeOperUnchecked(GT_IND);
5387
5388                     // Obj could point anywhere, example a boxed class static int
5389                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5390                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5391                 }
5392
5393                 obj->gtType = JITtype2varType(jitTyp);
5394                 assert(varTypeIsArithmetic(obj->gtType));
5395             }
5396
5397             // This pushes on the dereferenced byref
5398             // This is then used immediately to box.
5399             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5400
5401             // This pops off the byref-to-a-value-type remaining on the stack and
5402             // replaces it with a boxed object.
5403             // This is then used as the object to the virtual call immediately below.
5404             impImportAndPushBox(pConstrainedResolvedToken);
5405             if (compDonotInline())
5406             {
5407                 return nullptr;
5408             }
5409
5410             obj = impPopStack().val;
5411             return obj;
5412         }
5413         case CORINFO_NO_THIS_TRANSFORM:
5414         default:
5415             return thisPtr;
5416     }
5417 }
5418
5419 //------------------------------------------------------------------------
5420 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5421 //
5422 // Return Value:
5423 //    true if PInvoke inlining should be enabled in current method, false otherwise
5424 //
5425 // Notes:
5426 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5427
5428 bool Compiler::impCanPInvokeInline()
5429 {
5430     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5431            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5432         ;
5433 }
5434
5435 //------------------------------------------------------------------------
5436 // impCanPInvokeInlineCallSite: basic legality checks using information
5437 // from a call to see if the call qualifies as an inline pinvoke.
5438 //
5439 // Arguments:
5440 //    block      - block contaning the call, or for inlinees, block
5441 //                 containing the call being inlined
5442 //
5443 // Return Value:
5444 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5445 //
5446 // Notes:
5447 //    For runtimes that support exception handling interop there are
5448 //    restrictions on using inline pinvoke in handler regions.
5449 //
5450 //    * We have to disable pinvoke inlining inside of filters because
5451 //    in case the main execution (i.e. in the try block) is inside
5452 //    unmanaged code, we cannot reuse the inlined stub (we still need
5453 //    the original state until we are in the catch handler)
5454 //
5455 //    * We disable pinvoke inlining inside handlers since the GSCookie
5456 //    is in the inlined Frame (see
5457 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5458 //    this would not protect framelets/return-address of handlers.
5459 //
5460 //    These restrictions are currently also in place for CoreCLR but
5461 //    can be relaxed when coreclr/#8459 is addressed.
5462
5463 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5464 {
5465     if (block->hasHndIndex())
5466     {
5467         return false;
5468     }
5469
5470     // The remaining limitations do not apply to CoreRT
5471     if (IsTargetAbi(CORINFO_CORERT_ABI))
5472     {
5473         return true;
5474     }
5475
5476 #ifdef _TARGET_AMD64_
5477     // On x64, we disable pinvoke inlining inside of try regions.
5478     // Here is the comment from JIT64 explaining why:
5479     //
5480     //   [VSWhidbey: 611015] - because the jitted code links in the
5481     //   Frame (instead of the stub) we rely on the Frame not being
5482     //   'active' until inside the stub.  This normally happens by the
5483     //   stub setting the return address pointer in the Frame object
5484     //   inside the stub.  On a normal return, the return address
5485     //   pointer is zeroed out so the Frame can be safely re-used, but
5486     //   if an exception occurs, nobody zeros out the return address
5487     //   pointer.  Thus if we re-used the Frame object, it would go
5488     //   'active' as soon as we link it into the Frame chain.
5489     //
5490     //   Technically we only need to disable PInvoke inlining if we're
5491     //   in a handler or if we're in a try body with a catch or
5492     //   filter/except where other non-handler code in this method
5493     //   might run and try to re-use the dirty Frame object.
5494     //
5495     //   A desktop test case where this seems to matter is
5496     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5497     if (block->hasTryIndex())
5498     {
5499         return false;
5500     }
5501 #endif // _TARGET_AMD64_
5502
5503     return true;
5504 }
5505
5506 //------------------------------------------------------------------------
5507 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5508 // if it can be expressed as an inline pinvoke.
5509 //
5510 // Arguments:
5511 //    call       - tree for the call
5512 //    methHnd    - handle for the method being called (may be null)
5513 //    sig        - signature of the method being called
5514 //    mflags     - method flags for the method being called
5515 //    block      - block contaning the call, or for inlinees, block
5516 //                 containing the call being inlined
5517 //
5518 // Notes:
5519 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5520 //
5521 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5522 //   call passes a combination of legality and profitabilty checks.
5523 //
5524 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5525
5526 void Compiler::impCheckForPInvokeCall(
5527     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5528 {
5529     CorInfoUnmanagedCallConv unmanagedCallConv;
5530
5531     // If VM flagged it as Pinvoke, flag the call node accordingly
5532     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5533     {
5534         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5535     }
5536
5537     if (methHnd)
5538     {
5539         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5540         {
5541             return;
5542         }
5543
5544         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5545     }
5546     else
5547     {
5548         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5549         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5550         {
5551             // Used by the IL Stubs.
5552             callConv = CORINFO_CALLCONV_C;
5553         }
5554         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5555         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5556         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5557         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5558
5559         assert(!call->gtCall.gtCallCookie);
5560     }
5561
5562     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5563         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5564     {
5565         return;
5566     }
5567     optNativeCallCount++;
5568
5569     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5570     {
5571         // PInvoke CALLI in IL stubs must be inlined
5572     }
5573     else
5574     {
5575         // Check legality
5576         if (!impCanPInvokeInlineCallSite(block))
5577         {
5578             return;
5579         }
5580
5581         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5582         // profitability checks
5583         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5584         {
5585             if (!impCanPInvokeInline())
5586             {
5587                 return;
5588             }
5589
5590             // Size-speed tradeoff: don't use inline pinvoke at rarely
5591             // executed call sites.  The non-inline version is more
5592             // compact.
5593             if (block->isRunRarely())
5594             {
5595                 return;
5596             }
5597         }
5598
5599         // The expensive check should be last
5600         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5601         {
5602             return;
5603         }
5604     }
5605
5606     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5607
5608     call->gtFlags |= GTF_CALL_UNMANAGED;
5609     info.compCallUnmanaged++;
5610
5611     // AMD64 convention is same for native and managed
5612     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5613     {
5614         call->gtFlags |= GTF_CALL_POP_ARGS;
5615     }
5616
5617     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5618     {
5619         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5620     }
5621 }
5622
5623 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5624 {
5625     var_types callRetTyp = JITtype2varType(sig->retType);
5626
5627     /* The function pointer is on top of the stack - It may be a
5628      * complex expression. As it is evaluated after the args,
5629      * it may cause registered args to be spilled. Simply spill it.
5630      */
5631
5632     // Ignore this trivial case.
5633     if (impStackTop().val->gtOper != GT_LCL_VAR)
5634     {
5635         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5636                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5637     }
5638
5639     /* Get the function pointer */
5640
5641     GenTreePtr fptr = impPopStack().val;
5642     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5643
5644 #ifdef DEBUG
5645     // This temporary must never be converted to a double in stress mode,
5646     // because that can introduce a call to the cast helper after the
5647     // arguments have already been evaluated.
5648
5649     if (fptr->OperGet() == GT_LCL_VAR)
5650     {
5651         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5652     }
5653 #endif
5654
5655     /* Create the call node */
5656
5657     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5658
5659     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5660
5661     return call;
5662 }
5663
5664 /*****************************************************************************/
5665
5666 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5667 {
5668     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5669
5670     /* Since we push the arguments in reverse order (i.e. right -> left)
5671      * spill any side effects from the stack
5672      *
5673      * OBS: If there is only one side effect we do not need to spill it
5674      *      thus we have to spill all side-effects except last one
5675      */
5676
5677     unsigned lastLevelWithSideEffects = UINT_MAX;
5678
5679     unsigned argsToReverse = sig->numArgs;
5680
5681     // For "thiscall", the first argument goes in a register. Since its
5682     // order does not need to be changed, we do not need to spill it
5683
5684     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5685     {
5686         assert(argsToReverse);
5687         argsToReverse--;
5688     }
5689
5690 #ifndef _TARGET_X86_
5691     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5692     argsToReverse = 0;
5693 #endif
5694
5695     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5696     {
5697         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5698         {
5699             assert(lastLevelWithSideEffects == UINT_MAX);
5700
5701             impSpillStackEntry(level,
5702                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5703         }
5704         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5705         {
5706             if (lastLevelWithSideEffects != UINT_MAX)
5707             {
5708                 /* We had a previous side effect - must spill it */
5709                 impSpillStackEntry(lastLevelWithSideEffects,
5710                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5711
5712                 /* Record the level for the current side effect in case we will spill it */
5713                 lastLevelWithSideEffects = level;
5714             }
5715             else
5716             {
5717                 /* This is the first side effect encountered - record its level */
5718
5719                 lastLevelWithSideEffects = level;
5720             }
5721         }
5722     }
5723
5724     /* The argument list is now "clean" - no out-of-order side effects
5725      * Pop the argument list in reverse order */
5726
5727     unsigned   argFlags = 0;
5728     GenTreePtr args     = call->gtCall.gtCallArgs =
5729         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5730
5731     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5732     {
5733         GenTreePtr thisPtr = args->Current();
5734         impBashVarAddrsToI(thisPtr);
5735         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5736     }
5737
5738     if (args)
5739     {
5740         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5741     }
5742 }
5743
5744 //------------------------------------------------------------------------
5745 // impInitClass: Build a node to initialize the class before accessing the
5746 //               field if necessary
5747 //
5748 // Arguments:
5749 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5750 //                     by a call to CEEInfo::resolveToken().
5751 //
5752 // Return Value: If needed, a pointer to the node that will perform the class
5753 //               initializtion.  Otherwise, nullptr.
5754 //
5755
5756 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5757 {
5758     CorInfoInitClassResult initClassResult =
5759         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5760
5761     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5762     {
5763         return nullptr;
5764     }
5765     BOOL runtimeLookup;
5766
5767     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5768
5769     if (node == nullptr)
5770     {
5771         assert(compDonotInline());
5772         return nullptr;
5773     }
5774
5775     if (runtimeLookup)
5776     {
5777         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5778     }
5779     else
5780     {
5781         // Call the shared non gc static helper, as its the fastest
5782         node = fgGetSharedCCtor(pResolvedToken->hClass);
5783     }
5784
5785     return node;
5786 }
5787
5788 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5789 {
5790     GenTreePtr op1 = nullptr;
5791
5792     switch (lclTyp)
5793     {
5794         int     ival;
5795         __int64 lval;
5796         double  dval;
5797
5798         case TYP_BOOL:
5799             ival = *((bool*)fldAddr);
5800             goto IVAL_COMMON;
5801
5802         case TYP_BYTE:
5803             ival = *((signed char*)fldAddr);
5804             goto IVAL_COMMON;
5805
5806         case TYP_UBYTE:
5807             ival = *((unsigned char*)fldAddr);
5808             goto IVAL_COMMON;
5809
5810         case TYP_SHORT:
5811             ival = *((short*)fldAddr);
5812             goto IVAL_COMMON;
5813
5814         case TYP_CHAR:
5815         case TYP_USHORT:
5816             ival = *((unsigned short*)fldAddr);
5817             goto IVAL_COMMON;
5818
5819         case TYP_UINT:
5820         case TYP_INT:
5821             ival = *((int*)fldAddr);
5822         IVAL_COMMON:
5823             op1 = gtNewIconNode(ival);
5824             break;
5825
5826         case TYP_LONG:
5827         case TYP_ULONG:
5828             lval = *((__int64*)fldAddr);
5829             op1  = gtNewLconNode(lval);
5830             break;
5831
5832         case TYP_FLOAT:
5833             dval = *((float*)fldAddr);
5834             op1  = gtNewDconNode(dval);
5835 #if !FEATURE_X87_DOUBLES
5836             // X87 stack doesn't differentiate between float/double
5837             // so R4 is treated as R8, but everybody else does
5838             op1->gtType = TYP_FLOAT;
5839 #endif // FEATURE_X87_DOUBLES
5840             break;
5841
5842         case TYP_DOUBLE:
5843             dval = *((double*)fldAddr);
5844             op1  = gtNewDconNode(dval);
5845             break;
5846
5847         default:
5848             assert(!"Unexpected lclTyp");
5849             break;
5850     }
5851
5852     return op1;
5853 }
5854
5855 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5856                                                 CORINFO_ACCESS_FLAGS    access,
5857                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5858                                                 var_types               lclTyp)
5859 {
5860     GenTreePtr op1;
5861
5862     switch (pFieldInfo->fieldAccessor)
5863     {
5864         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5865         {
5866             assert(!compIsForInlining());
5867
5868             // We first call a special helper to get the statics base pointer
5869             op1 = impParentClassTokenToHandle(pResolvedToken);
5870
5871             // compIsForInlining() is false so we should not neve get NULL here
5872             assert(op1 != nullptr);
5873
5874             var_types type = TYP_BYREF;
5875
5876             switch (pFieldInfo->helper)
5877             {
5878                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5879                     type = TYP_I_IMPL;
5880                     break;
5881                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5882                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5883                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5884                     break;
5885                 default:
5886                     assert(!"unknown generic statics helper");
5887                     break;
5888             }
5889
5890             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5891
5892             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5893             op1              = gtNewOperNode(GT_ADD, type, op1,
5894                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5895         }
5896         break;
5897
5898         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5899         {
5900 #ifdef FEATURE_READYTORUN_COMPILER
5901             if (opts.IsReadyToRun())
5902             {
5903                 unsigned callFlags = 0;
5904
5905                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5906                 {
5907                     callFlags |= GTF_CALL_HOISTABLE;
5908                 }
5909
5910                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5911
5912                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5913             }
5914             else
5915 #endif
5916             {
5917                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5918             }
5919
5920             {
5921                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5923                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5924             }
5925             break;
5926         }
5927 #if COR_JIT_EE_VERSION > 460
5928         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5929         {
5930 #ifdef FEATURE_READYTORUN_COMPILER
5931             noway_assert(opts.IsReadyToRun());
5932             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5933             assert(kind.needsRuntimeLookup);
5934
5935             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5936             GenTreeArgList* args    = gtNewArgList(ctxTree);
5937
5938             unsigned callFlags = 0;
5939
5940             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5941             {
5942                 callFlags |= GTF_CALL_HOISTABLE;
5943             }
5944             var_types type = TYP_BYREF;
5945             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5946
5947             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5948             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5949             op1              = gtNewOperNode(GT_ADD, type, op1,
5950                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5951 #else
5952             unreached();
5953 #endif // FEATURE_READYTORUN_COMPILER
5954         }
5955         break;
5956 #endif // COR_JIT_EE_VERSION > 460
5957         default:
5958         {
5959             if (!(access & CORINFO_ACCESS_ADDRESS))
5960             {
5961                 // In future, it may be better to just create the right tree here instead of folding it later.
5962                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5963
5964                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5965                 {
5966                     op1->gtType = TYP_REF; // points at boxed object
5967                     FieldSeqNode* firstElemFldSeq =
5968                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5969                     op1 =
5970                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5971                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5972
5973                     if (varTypeIsStruct(lclTyp))
5974                     {
5975                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5976                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5977                     }
5978                     else
5979                     {
5980                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5981                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5982                     }
5983                 }
5984
5985                 return op1;
5986             }
5987             else
5988             {
5989                 void** pFldAddr = nullptr;
5990                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5991
5992                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5993
5994                 /* Create the data member node */
5995                 if (pFldAddr == nullptr)
5996                 {
5997                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5998                 }
5999                 else
6000                 {
6001                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6002
6003                     // There are two cases here, either the static is RVA based,
6004                     // in which case the type of the FIELD node is not a GC type
6005                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6006                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6007                     // because handles to statics now go into the large object heap
6008
6009                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6010                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6011                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6012                 }
6013             }
6014             break;
6015         }
6016     }
6017
6018     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6019     {
6020         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6021
6022         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6023
6024         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6025                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6026     }
6027
6028     if (!(access & CORINFO_ACCESS_ADDRESS))
6029     {
6030         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6031         op1->gtFlags |= GTF_GLOB_REF;
6032     }
6033
6034     return op1;
6035 }
6036
6037 // In general try to call this before most of the verification work.  Most people expect the access
6038 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6039 // out if you can't access something we also think that you're unverifiable for other reasons.
6040 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6041 {
6042     if (result != CORINFO_ACCESS_ALLOWED)
6043     {
6044         impHandleAccessAllowedInternal(result, helperCall);
6045     }
6046 }
6047
6048 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6049 {
6050     switch (result)
6051     {
6052         case CORINFO_ACCESS_ALLOWED:
6053             break;
6054         case CORINFO_ACCESS_ILLEGAL:
6055             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6056             // method is verifiable.  Otherwise, delay the exception to runtime.
6057             if (compIsForImportOnly())
6058             {
6059                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6060             }
6061             else
6062             {
6063                 impInsertHelperCall(helperCall);
6064             }
6065             break;
6066         case CORINFO_ACCESS_RUNTIME_CHECK:
6067             impInsertHelperCall(helperCall);
6068             break;
6069     }
6070 }
6071
6072 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6073 {
6074     // Construct the argument list
6075     GenTreeArgList* args = nullptr;
6076     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6077     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6078     {
6079         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6080         GenTreePtr                currentArg = nullptr;
6081         switch (helperArg.argType)
6082         {
6083             case CORINFO_HELPER_ARG_TYPE_Field:
6084                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6085                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6086                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6087                 break;
6088             case CORINFO_HELPER_ARG_TYPE_Method:
6089                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6090                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6091                 break;
6092             case CORINFO_HELPER_ARG_TYPE_Class:
6093                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6094                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6095                 break;
6096             case CORINFO_HELPER_ARG_TYPE_Module:
6097                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6098                 break;
6099             case CORINFO_HELPER_ARG_TYPE_Const:
6100                 currentArg = gtNewIconNode(helperArg.constant);
6101                 break;
6102             default:
6103                 NO_WAY("Illegal helper arg type");
6104         }
6105         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6106     }
6107
6108     /* TODO-Review:
6109      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6110      * Also, consider sticking this in the first basic block.
6111      */
6112     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6113     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6114 }
6115
6116 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6117                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6118                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6119 {
6120 #ifdef FEATURE_CORECLR
6121     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6122     {
6123         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6124         // This helper throws an exception if the CLR host disallows the call.
6125
6126         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6127                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6128                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6129         // Append the callout statement
6130         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6131     }
6132 #endif // FEATURE_CORECLR
6133 }
6134
6135 // Checks whether the return types of caller and callee are compatible
6136 // so that callee can be tail called. Note that here we don't check
6137 // compatibility in IL Verifier sense, but on the lines of return type
6138 // sizes are equal and get returned in the same return register.
6139 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6140                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6141                                             var_types            calleeRetType,
6142                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6143 {
6144     // Note that we can not relax this condition with genActualType() as the
6145     // calling convention dictates that the caller of a function with a small
6146     // typed return value is responsible for normalizing the return val.
6147     if (callerRetType == calleeRetType)
6148     {
6149         return true;
6150     }
6151
6152 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6153     // Jit64 compat:
6154     if (callerRetType == TYP_VOID)
6155     {
6156         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6157         //     tail.call
6158         //     pop
6159         //     ret
6160         //
6161         // Note that the above IL pattern is not valid as per IL verification rules.
6162         // Therefore, only full trust code can take advantage of this pattern.
6163         return true;
6164     }
6165
6166     // These checks return true if the return value type sizes are the same and
6167     // get returned in the same return register i.e. caller doesn't need to normalize
6168     // return value. Some of the tail calls permitted by below checks would have
6169     // been rejected by IL Verifier before we reached here.  Therefore, only full
6170     // trust code can make those tail calls.
6171     unsigned callerRetTypeSize = 0;
6172     unsigned calleeRetTypeSize = 0;
6173     bool     isCallerRetTypMBEnreg =
6174         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6175     bool isCalleeRetTypMBEnreg =
6176         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6177
6178     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6179     {
6180         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6181     }
6182 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6183
6184     return false;
6185 }
6186
6187 // For prefixFlags
6188 enum
6189 {
6190     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6191     PREFIX_TAILCALL_IMPLICIT =
6192         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6193     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6194     PREFIX_VOLATILE    = 0x00000100,
6195     PREFIX_UNALIGNED   = 0x00001000,
6196     PREFIX_CONSTRAINED = 0x00010000,
6197     PREFIX_READONLY    = 0x00100000
6198 };
6199
6200 /********************************************************************************
6201  *
6202  * Returns true if the current opcode and and the opcodes following it correspond
6203  * to a supported tail call IL pattern.
6204  *
6205  */
6206 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6207                                       OPCODE      curOpcode,
6208                                       const BYTE* codeAddrOfNextOpcode,
6209                                       const BYTE* codeEnd,
6210                                       bool        isRecursive,
6211                                       bool*       isCallPopAndRet /* = nullptr */)
6212 {
6213     // Bail out if the current opcode is not a call.
6214     if (!impOpcodeIsCallOpcode(curOpcode))
6215     {
6216         return false;
6217     }
6218
6219 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6220     // If shared ret tail opt is not enabled, we will enable
6221     // it for recursive methods.
6222     if (isRecursive)
6223 #endif
6224     {
6225         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6226         // sequence. Make sure we don't go past the end of the IL however.
6227         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6228     }
6229
6230     // Bail out if there is no next opcode after call
6231     if (codeAddrOfNextOpcode >= codeEnd)
6232     {
6233         return false;
6234     }
6235
6236     // Scan the opcodes to look for the following IL patterns if either
6237     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6238     //  ii) if tail prefixed, IL verification is not needed for the method.
6239     //
6240     // Only in the above two cases we can allow the below tail call patterns
6241     // violating ECMA spec.
6242     //
6243     // Pattern1:
6244     //       call
6245     //       nop*
6246     //       ret
6247     //
6248     // Pattern2:
6249     //       call
6250     //       nop*
6251     //       pop
6252     //       nop*
6253     //       ret
6254     int    cntPop = 0;
6255     OPCODE nextOpcode;
6256
6257 #ifdef _TARGET_AMD64_
6258     do
6259     {
6260         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6261         codeAddrOfNextOpcode += sizeof(__int8);
6262     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6263              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6264              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6265                                                                                          // one pop seen so far.
6266 #else
6267     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6268 #endif
6269
6270     if (isCallPopAndRet)
6271     {
6272         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6273         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6274     }
6275
6276 #ifdef _TARGET_AMD64_
6277     // Jit64 Compat:
6278     // Tail call IL pattern could be either of the following
6279     // 1) call/callvirt/calli + ret
6280     // 2) call/callvirt/calli + pop + ret in a method returning void.
6281     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6282 #else //!_TARGET_AMD64_
6283     return (nextOpcode == CEE_RET) && (cntPop == 0);
6284 #endif
6285 }
6286
6287 /*****************************************************************************
6288  *
6289  * Determine whether the call could be converted to an implicit tail call
6290  *
6291  */
6292 bool Compiler::impIsImplicitTailCallCandidate(
6293     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6294 {
6295
6296 #if FEATURE_TAILCALL_OPT
6297     if (!opts.compTailCallOpt)
6298     {
6299         return false;
6300     }
6301
6302     if (opts.compDbgCode || opts.MinOpts())
6303     {
6304         return false;
6305     }
6306
6307     // must not be tail prefixed
6308     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6309     {
6310         return false;
6311     }
6312
6313 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6314     // the block containing call is marked as BBJ_RETURN
6315     // We allow shared ret tail call optimization on recursive calls even under
6316     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6317     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6318         return false;
6319 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6320
6321     // must be call+ret or call+pop+ret
6322     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6323     {
6324         return false;
6325     }
6326
6327     return true;
6328 #else
6329     return false;
6330 #endif // FEATURE_TAILCALL_OPT
6331 }
6332
6333 //------------------------------------------------------------------------
6334 // impImportCall: import a call-inspiring opcode
6335 //
6336 // Arguments:
6337 //    opcode                    - opcode that inspires the call
6338 //    pResolvedToken            - resolved token for the call target
6339 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6340 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6341 //    prefixFlags               - IL prefix flags for the call
6342 //    callInfo                  - EE supplied info for the call
6343 //    rawILOffset               - IL offset of the opcode
6344 //
6345 // Returns:
6346 //    Type of the call's return value.
6347 //
6348 // Notes:
6349 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6350 //
6351 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6352 //    uninitalized object.
6353
6354 #ifdef _PREFAST_
6355 #pragma warning(push)
6356 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6357 #endif
6358
6359 var_types Compiler::impImportCall(OPCODE                  opcode,
6360                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6361                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6362                                   GenTreePtr              newobjThis,
6363                                   int                     prefixFlags,
6364                                   CORINFO_CALL_INFO*      callInfo,
6365                                   IL_OFFSET               rawILOffset)
6366 {
6367     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6368
6369     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6370     var_types              callRetTyp                     = TYP_COUNT;
6371     CORINFO_SIG_INFO*      sig                            = nullptr;
6372     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6373     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6374     unsigned               clsFlags                       = 0;
6375     unsigned               mflags                         = 0;
6376     unsigned               argFlags                       = 0;
6377     GenTreePtr             call                           = nullptr;
6378     GenTreeArgList*        args                           = nullptr;
6379     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6380     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6381     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6382     bool                   canTailCall                    = true;
6383     const char*            szCanTailCallFailReason        = nullptr;
6384     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6385     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6386
6387     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6388     // do that before tailcalls, but that is probably not the intended
6389     // semantic. So just disallow tailcalls from synchronized methods.
6390     // Also, popping arguments in a varargs function is more work and NYI
6391     // If we have a security object, we have to keep our frame around for callers
6392     // to see any imperative security.
6393     if (info.compFlags & CORINFO_FLG_SYNCH)
6394     {
6395         canTailCall             = false;
6396         szCanTailCallFailReason = "Caller is synchronized";
6397     }
6398 #if !FEATURE_FIXED_OUT_ARGS
6399     else if (info.compIsVarArgs)
6400     {
6401         canTailCall             = false;
6402         szCanTailCallFailReason = "Caller is varargs";
6403     }
6404 #endif // FEATURE_FIXED_OUT_ARGS
6405     else if (opts.compNeedSecurityCheck)
6406     {
6407         canTailCall             = false;
6408         szCanTailCallFailReason = "Caller requires a security check.";
6409     }
6410
6411     // We only need to cast the return value of pinvoke inlined calls that return small types
6412
6413     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6414     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6415     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6416     // the time being that the callee might be compiled by the other JIT and thus the return
6417     // value will need to be widened by us (or not widened at all...)
6418
6419     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6420
6421     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6422     bool bIntrinsicImported = false;
6423
6424     CORINFO_SIG_INFO calliSig;
6425     GenTreeArgList*  extraArg = nullptr;
6426
6427     /*-------------------------------------------------------------------------
6428      * First create the call node
6429      */
6430
6431     if (opcode == CEE_CALLI)
6432     {
6433         /* Get the call site sig */
6434         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6435
6436         callRetTyp = JITtype2varType(calliSig.retType);
6437         clsHnd     = calliSig.retTypeClass;
6438
6439         call = impImportIndirectCall(&calliSig, ilOffset);
6440
6441         // We don't know the target method, so we have to infer the flags, or
6442         // assume the worst-case.
6443         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6444
6445 #ifdef DEBUG
6446         if (verbose)
6447         {
6448             unsigned structSize =
6449                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6450             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6451                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6452         }
6453 #endif
6454         // This should be checked in impImportBlockCode.
6455         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6456
6457         sig = &calliSig;
6458
6459 #ifdef DEBUG
6460         // We cannot lazily obtain the signature of a CALLI call because it has no method
6461         // handle that we can use, so we need to save its full call signature here.
6462         assert(call->gtCall.callSig == nullptr);
6463         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6464         *call->gtCall.callSig = calliSig;
6465 #endif // DEBUG
6466
6467         if (IsTargetAbi(CORINFO_CORERT_ABI))
6468         {
6469             bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
6470             if (managedCall)
6471             {
6472                 call->AsCall()->SetFatPointerCandidate();
6473                 setMethodHasFatPointer();
6474             }
6475         }
6476     }
6477     else // (opcode != CEE_CALLI)
6478     {
6479         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6480
6481         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6482         // supply the instantiation parameters necessary to make direct calls to underlying
6483         // shared generic code, rather than calling through instantiating stubs.  If the
6484         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6485         // must indeed pass an instantiation parameter.
6486
6487         methHnd = callInfo->hMethod;
6488
6489         sig        = &(callInfo->sig);
6490         callRetTyp = JITtype2varType(sig->retType);
6491
6492         mflags = callInfo->methodFlags;
6493
6494 #ifdef DEBUG
6495         if (verbose)
6496         {
6497             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6498             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6499                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6500         }
6501 #endif
6502         if (compIsForInlining())
6503         {
6504             /* Does this call site have security boundary restrictions? */
6505
6506             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6507             {
6508                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6509                 return callRetTyp;
6510             }
6511
6512             /* Does the inlinee need a security check token on the frame */
6513
6514             if (mflags & CORINFO_FLG_SECURITYCHECK)
6515             {
6516                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6517                 return callRetTyp;
6518             }
6519
6520             /* Does the inlinee use StackCrawlMark */
6521
6522             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6523             {
6524                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6525                 return callRetTyp;
6526             }
6527
6528             /* For now ignore delegate invoke */
6529
6530             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6531             {
6532                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6533                 return callRetTyp;
6534             }
6535
6536             /* For now ignore varargs */
6537             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6538             {
6539                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6540                 return callRetTyp;
6541             }
6542
6543             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6544             {
6545                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6546                 return callRetTyp;
6547             }
6548
6549             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6550             {
6551                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6552                 return callRetTyp;
6553             }
6554         }
6555
6556         clsHnd = pResolvedToken->hClass;
6557
6558         clsFlags = callInfo->classFlags;
6559
6560 #ifdef DEBUG
6561         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6562
6563         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6564         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6565         const char* modName;
6566         const char* className;
6567         const char* methodName;
6568         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6569             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6570             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6571         {
6572             return impImportJitTestLabelMark(sig->numArgs);
6573         }
6574 #endif // DEBUG
6575
6576         // <NICE> Factor this into getCallInfo </NICE>
6577         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6578         {
6579             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6580                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6581
6582             if (call != nullptr)
6583             {
6584                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6585                        (clsFlags & CORINFO_FLG_FINAL));
6586
6587 #ifdef FEATURE_READYTORUN_COMPILER
6588                 if (call->OperGet() == GT_INTRINSIC)
6589                 {
6590                     if (opts.IsReadyToRun())
6591                     {
6592                         noway_assert(callInfo->kind == CORINFO_CALL);
6593                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6594                     }
6595                     else
6596                     {
6597                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6598                     }
6599                 }
6600 #endif
6601
6602                 bIntrinsicImported = true;
6603                 goto DONE_CALL;
6604             }
6605         }
6606
6607 #ifdef FEATURE_SIMD
6608         if (featureSIMD)
6609         {
6610             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6611             if (call != nullptr)
6612             {
6613                 bIntrinsicImported = true;
6614                 goto DONE_CALL;
6615             }
6616         }
6617 #endif // FEATURE_SIMD
6618
6619         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6620         {
6621             NO_WAY("Virtual call to a function added via EnC is not supported");
6622         }
6623
6624         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6625             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6626             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6627         {
6628             BADCODE("Bad calling convention");
6629         }
6630
6631         //-------------------------------------------------------------------------
6632         //  Construct the call node
6633         //
6634         // Work out what sort of call we're making.
6635         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6636
6637         constraintCallThisTransform = callInfo->thisTransform;
6638
6639         exactContextHnd                = callInfo->contextHandle;
6640         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6641
6642         // Recursive call is treaded as a loop to the begining of the method.
6643         if (methHnd == info.compMethodHnd)
6644         {
6645 #ifdef DEBUG
6646             if (verbose)
6647             {
6648                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6649                         fgFirstBB->bbNum, compCurBB->bbNum);
6650             }
6651 #endif
6652             fgMarkBackwardJump(fgFirstBB, compCurBB);
6653         }
6654
6655         switch (callInfo->kind)
6656         {
6657
6658             case CORINFO_VIRTUALCALL_STUB:
6659             {
6660                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6661                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6662                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6663                 {
6664
6665                     if (compIsForInlining())
6666                     {
6667                         // Don't import runtime lookups when inlining
6668                         // Inlining has to be aborted in such a case
6669                         /* XXX Fri 3/20/2009
6670                          * By the way, this would never succeed.  If the handle lookup is into the generic
6671                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6672                          * inlined code will crash.
6673                          *
6674                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6675                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6676                          * failing here.
6677                          */
6678                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6679                         return callRetTyp;
6680                     }
6681
6682                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6683                     assert(!compDonotInline());
6684
6685                     // This is the rough code to set up an indirect stub call
6686                     assert(stubAddr != nullptr);
6687
6688                     // The stubAddr may be a
6689                     // complex expression. As it is evaluated after the args,
6690                     // it may cause registered args to be spilled. Simply spill it.
6691
6692                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6693                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6694                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6695
6696                     // Create the actual call node
6697
6698                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6699                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6700
6701                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6702
6703                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6704                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6705
6706 #ifdef _TARGET_X86_
6707                     // No tailcalls allowed for these yet...
6708                     canTailCall             = false;
6709                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6710 #endif
6711                 }
6712                 else
6713                 {
6714                     // ok, the stub is available at compile type.
6715
6716                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6717                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6718                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6719                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6720                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6721                     {
6722                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6723                     }
6724                 }
6725
6726 #ifdef FEATURE_READYTORUN_COMPILER
6727                 if (opts.IsReadyToRun())
6728                 {
6729                     // Null check is sometimes needed for ready to run to handle
6730                     // non-virtual <-> virtual changes between versions
6731                     if (callInfo->nullInstanceCheck)
6732                     {
6733                         call->gtFlags |= GTF_CALL_NULLCHECK;
6734                     }
6735                 }
6736 #endif
6737
6738                 break;
6739             }
6740
6741             case CORINFO_VIRTUALCALL_VTABLE:
6742             {
6743                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6744                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6745                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6746                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6747                 break;
6748             }
6749
6750             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6751             {
6752                 if (compIsForInlining())
6753                 {
6754                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6755                     return callRetTyp;
6756                 }
6757
6758                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6759                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6760                 // OK, We've been told to call via LDVIRTFTN, so just
6761                 // take the call now....
6762
6763                 args = impPopList(sig->numArgs, &argFlags, sig);
6764
6765                 GenTreePtr thisPtr = impPopStack().val;
6766                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6767                 if (compDonotInline())
6768                 {
6769                     return callRetTyp;
6770                 }
6771
6772                 // Clone the (possibly transformed) "this" pointer
6773                 GenTreePtr thisPtrCopy;
6774                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6775                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6776
6777                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6778                 if (compDonotInline())
6779                 {
6780                     return callRetTyp;
6781                 }
6782
6783                 thisPtr = nullptr; // can't reuse it
6784
6785                 // Now make an indirect call through the function pointer
6786
6787                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6788                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6789                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6790
6791                 // Create the actual call node
6792
6793                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6794                 call->gtCall.gtCallObjp = thisPtrCopy;
6795                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6796
6797 #ifdef FEATURE_READYTORUN_COMPILER
6798                 if (opts.IsReadyToRun())
6799                 {
6800                     // Null check is needed for ready to run to handle
6801                     // non-virtual <-> virtual changes between versions
6802                     call->gtFlags |= GTF_CALL_NULLCHECK;
6803                 }
6804 #endif
6805
6806                 // Sine we are jumping over some code, check that its OK to skip that code
6807                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6808                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6809                 goto DONE;
6810             }
6811
6812             case CORINFO_CALL:
6813             {
6814                 // This is for a non-virtual, non-interface etc. call
6815                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6816
6817                 // We remove the nullcheck for the GetType call instrinsic.
6818                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6819                 // and instrinsics.
6820                 if (callInfo->nullInstanceCheck &&
6821                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6822                 {
6823                     call->gtFlags |= GTF_CALL_NULLCHECK;
6824                 }
6825
6826 #ifdef FEATURE_READYTORUN_COMPILER
6827                 if (opts.IsReadyToRun())
6828                 {
6829                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6830                 }
6831 #endif
6832                 break;
6833             }
6834
6835             case CORINFO_CALL_CODE_POINTER:
6836             {
6837                 // The EE has asked us to call by computing a code pointer and then doing an
6838                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6839
6840                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6841                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6842
6843                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6844                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6845
6846                 GenTreePtr fptr =
6847                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6848
6849                 if (compDonotInline())
6850                 {
6851                     return callRetTyp;
6852                 }
6853
6854                 // Now make an indirect call through the function pointer
6855
6856                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6857                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6858                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6859
6860                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6861                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6862                 if (callInfo->nullInstanceCheck)
6863                 {
6864                     call->gtFlags |= GTF_CALL_NULLCHECK;
6865                 }
6866
6867                 break;
6868             }
6869
6870             default:
6871                 assert(!"unknown call kind");
6872                 break;
6873         }
6874
6875         //-------------------------------------------------------------------------
6876         // Set more flags
6877
6878         PREFIX_ASSUME(call != nullptr);
6879
6880         if (mflags & CORINFO_FLG_NOGCCHECK)
6881         {
6882             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6883         }
6884
6885         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6886         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6887             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6888             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6889         {
6890             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6891         }
6892     }
6893     assert(sig);
6894     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6895
6896     /* Some sanity checks */
6897
6898     // CALL_VIRT and NEWOBJ must have a THIS pointer
6899     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6900     // static bit and hasThis are negations of one another
6901     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6902     assert(call != nullptr);
6903
6904     /*-------------------------------------------------------------------------
6905      * Check special-cases etc
6906      */
6907
6908     /* Special case - Check if it is a call to Delegate.Invoke(). */
6909
6910     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6911     {
6912         assert(!compIsForInlining());
6913         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6914         assert(mflags & CORINFO_FLG_FINAL);
6915
6916         /* Set the delegate flag */
6917         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6918
6919         if (callInfo->secureDelegateInvoke)
6920         {
6921             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6922         }
6923
6924         if (opcode == CEE_CALLVIRT)
6925         {
6926             assert(mflags & CORINFO_FLG_FINAL);
6927
6928             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6929             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6930             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6931         }
6932     }
6933
6934     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6935     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6936     if (varTypeIsStruct(callRetTyp))
6937     {
6938         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6939         call->gtType = callRetTyp;
6940     }
6941
6942 #if !FEATURE_VARARG
6943     /* Check for varargs */
6944     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6945         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6946     {
6947         BADCODE("Varargs not supported.");
6948     }
6949 #endif // !FEATURE_VARARG
6950
6951     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6952         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6953     {
6954         assert(!compIsForInlining());
6955
6956         /* Set the right flags */
6957
6958         call->gtFlags |= GTF_CALL_POP_ARGS;
6959         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6960
6961         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6962            will be expecting to pop a certain number of arguments, but if we
6963            tailcall to a function with a different number of arguments, we
6964            are hosed. There are ways around this (caller remembers esp value,
6965            varargs is not caller-pop, etc), but not worth it. */
6966         CLANG_FORMAT_COMMENT_ANCHOR;
6967
6968 #ifdef _TARGET_X86_
6969         if (canTailCall)
6970         {
6971             canTailCall             = false;
6972             szCanTailCallFailReason = "Callee is varargs";
6973         }
6974 #endif
6975
6976         /* Get the total number of arguments - this is already correct
6977          * for CALLI - for methods we have to get it from the call site */
6978
6979         if (opcode != CEE_CALLI)
6980         {
6981 #ifdef DEBUG
6982             unsigned numArgsDef = sig->numArgs;
6983 #endif
6984             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6985
6986 #ifdef DEBUG
6987             // We cannot lazily obtain the signature of a vararg call because using its method
6988             // handle will give us only the declared argument list, not the full argument list.
6989             assert(call->gtCall.callSig == nullptr);
6990             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6991             *call->gtCall.callSig = *sig;
6992 #endif
6993
6994             // For vararg calls we must be sure to load the return type of the
6995             // method actually being called, as well as the return types of the
6996             // specified in the vararg signature. With type equivalency, these types
6997             // may not be the same.
6998             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6999             {
7000                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7001                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7002                     sig->retType != CORINFO_TYPE_VAR)
7003                 {
7004                     // Make sure that all valuetypes (including enums) that we push are loaded.
7005                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7006                     // all valuetypes in the method signature are already loaded.
7007                     // We need to be able to find the size of the valuetypes, but we cannot
7008                     // do a class-load from within GC.
7009                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7010                 }
7011             }
7012
7013             assert(numArgsDef <= sig->numArgs);
7014         }
7015
7016         /* We will have "cookie" as the last argument but we cannot push
7017          * it on the operand stack because we may overflow, so we append it
7018          * to the arg list next after we pop them */
7019     }
7020
7021     if (mflags & CORINFO_FLG_SECURITYCHECK)
7022     {
7023         assert(!compIsForInlining());
7024
7025         // Need security prolog/epilog callouts when there is
7026         // imperative security in the method. This is to give security a
7027         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7028
7029         if (compIsForInlining())
7030         {
7031             // Cannot handle this if the method being imported is an inlinee by itself.
7032             // Because inlinee method does not have its own frame.
7033
7034             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7035             return callRetTyp;
7036         }
7037         else
7038         {
7039             tiSecurityCalloutNeeded = true;
7040
7041             // If the current method calls a method which needs a security check,
7042             // (i.e. the method being compiled has imperative security)
7043             // we need to reserve a slot for the security object in
7044             // the current method's stack frame
7045             opts.compNeedSecurityCheck = true;
7046         }
7047     }
7048
7049     //--------------------------- Inline NDirect ------------------------------
7050
7051     // For inline cases we technically should look at both the current
7052     // block and the call site block (or just the latter if we've
7053     // fused the EH trees). However the block-related checks pertain to
7054     // EH and we currently won't inline a method with EH. So for
7055     // inlinees, just checking the call site block is sufficient.
7056     {
7057         // New lexical block here to avoid compilation errors because of GOTOs.
7058         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7059         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7060     }
7061
7062     if (call->gtFlags & GTF_CALL_UNMANAGED)
7063     {
7064         // We set up the unmanaged call by linking the frame, disabling GC, etc
7065         // This needs to be cleaned up on return
7066         if (canTailCall)
7067         {
7068             canTailCall             = false;
7069             szCanTailCallFailReason = "Callee is native";
7070         }
7071
7072         checkForSmallType = true;
7073
7074         impPopArgsForUnmanagedCall(call, sig);
7075
7076         goto DONE;
7077     }
7078     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7079                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7080                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7081                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7082     {
7083         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7084         {
7085             // Normally this only happens with inlining.
7086             // However, a generic method (or type) being NGENd into another module
7087             // can run into this issue as well.  There's not an easy fall-back for NGEN
7088             // so instead we fallback to JIT.
7089             if (compIsForInlining())
7090             {
7091                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7092             }
7093             else
7094             {
7095                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7096             }
7097
7098             return callRetTyp;
7099         }
7100
7101         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7102
7103         // This cookie is required to be either a simple GT_CNS_INT or
7104         // an indirection of a GT_CNS_INT
7105         //
7106         GenTreePtr cookieConst = cookie;
7107         if (cookie->gtOper == GT_IND)
7108         {
7109             cookieConst = cookie->gtOp.gtOp1;
7110         }
7111         assert(cookieConst->gtOper == GT_CNS_INT);
7112
7113         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7114         // we won't allow this tree to participate in any CSE logic
7115         //
7116         cookie->gtFlags |= GTF_DONT_CSE;
7117         cookieConst->gtFlags |= GTF_DONT_CSE;
7118
7119         call->gtCall.gtCallCookie = cookie;
7120
7121         if (canTailCall)
7122         {
7123             canTailCall             = false;
7124             szCanTailCallFailReason = "PInvoke calli";
7125         }
7126     }
7127
7128     /*-------------------------------------------------------------------------
7129      * Create the argument list
7130      */
7131
7132     //-------------------------------------------------------------------------
7133     // Special case - for varargs we have an implicit last argument
7134
7135     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7136     {
7137         assert(!compIsForInlining());
7138
7139         void *varCookie, *pVarCookie;
7140         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7141         {
7142             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7143             return callRetTyp;
7144         }
7145
7146         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7147         assert((!varCookie) != (!pVarCookie));
7148         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7149
7150         assert(extraArg == nullptr);
7151         extraArg = gtNewArgList(cookie);
7152     }
7153
7154     //-------------------------------------------------------------------------
7155     // Extra arg for shared generic code and array methods
7156     //
7157     // Extra argument containing instantiation information is passed in the
7158     // following circumstances:
7159     // (a) To the "Address" method on array classes; the extra parameter is
7160     //     the array's type handle (a TypeDesc)
7161     // (b) To shared-code instance methods in generic structs; the extra parameter
7162     //     is the struct's type handle (a vtable ptr)
7163     // (c) To shared-code per-instantiation non-generic static methods in generic
7164     //     classes and structs; the extra parameter is the type handle
7165     // (d) To shared-code generic methods; the extra parameter is an
7166     //     exact-instantiation MethodDesc
7167     //
7168     // We also set the exact type context associated with the call so we can
7169     // inline the call correctly later on.
7170
7171     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7172     {
7173         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7174         if (clsHnd == nullptr)
7175         {
7176             NO_WAY("CALLI on parameterized type");
7177         }
7178
7179         assert(opcode != CEE_CALLI);
7180
7181         GenTreePtr instParam;
7182         BOOL       runtimeLookup;
7183
7184         // Instantiated generic method
7185         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7186         {
7187             CORINFO_METHOD_HANDLE exactMethodHandle =
7188                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7189
7190             if (!exactContextNeedsRuntimeLookup)
7191             {
7192 #ifdef FEATURE_READYTORUN_COMPILER
7193                 if (opts.IsReadyToRun())
7194                 {
7195                     instParam =
7196                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7197                     if (instParam == nullptr)
7198                     {
7199                         return callRetTyp;
7200                     }
7201                 }
7202                 else
7203 #endif
7204                 {
7205                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7206                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7207                 }
7208             }
7209             else
7210             {
7211                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7212                 if (instParam == nullptr)
7213                 {
7214                     return callRetTyp;
7215                 }
7216             }
7217         }
7218
7219         // otherwise must be an instance method in a generic struct,
7220         // a static method in a generic type, or a runtime-generated array method
7221         else
7222         {
7223             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7224             CORINFO_CLASS_HANDLE exactClassHandle =
7225                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7226
7227             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7228             {
7229                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7230                 return callRetTyp;
7231             }
7232
7233             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7234             {
7235                 // We indicate "readonly" to the Address operation by using a null
7236                 // instParam.
7237                 instParam = gtNewIconNode(0, TYP_REF);
7238             }
7239
7240             if (!exactContextNeedsRuntimeLookup)
7241             {
7242 #ifdef FEATURE_READYTORUN_COMPILER
7243                 if (opts.IsReadyToRun())
7244                 {
7245                     instParam =
7246                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7247                     if (instParam == nullptr)
7248                     {
7249                         return callRetTyp;
7250                     }
7251                 }
7252                 else
7253 #endif
7254                 {
7255                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7256                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7257                 }
7258             }
7259             else
7260             {
7261                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7262                 if (instParam == nullptr)
7263                 {
7264                     return callRetTyp;
7265                 }
7266             }
7267         }
7268
7269         assert(extraArg == nullptr);
7270         extraArg = gtNewArgList(instParam);
7271     }
7272
7273     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7274     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7275     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7276     // exactContextHnd is not currently required when inlining shared generic code into shared
7277     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7278     // (e.g. anything marked needsRuntimeLookup)
7279     if (exactContextNeedsRuntimeLookup)
7280     {
7281         exactContextHnd = nullptr;
7282     }
7283
7284     //-------------------------------------------------------------------------
7285     // The main group of arguments
7286
7287     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7288
7289     if (args)
7290     {
7291         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7292     }
7293
7294     //-------------------------------------------------------------------------
7295     // The "this" pointer
7296
7297     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7298     {
7299         GenTreePtr obj;
7300
7301         if (opcode == CEE_NEWOBJ)
7302         {
7303             obj = newobjThis;
7304         }
7305         else
7306         {
7307             obj = impPopStack().val;
7308             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7309             if (compDonotInline())
7310             {
7311                 return callRetTyp;
7312             }
7313         }
7314
7315         /* Is this a virtual or interface call? */
7316
7317         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7318         {
7319             /* only true object pointers can be virtual */
7320
7321             assert(obj->gtType == TYP_REF);
7322         }
7323         else
7324         {
7325             if (impIsThis(obj))
7326             {
7327                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7328             }
7329         }
7330
7331         /* Store the "this" value in the call */
7332
7333         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7334         call->gtCall.gtCallObjp = obj;
7335     }
7336
7337     //-------------------------------------------------------------------------
7338     // The "this" pointer for "newobj"
7339
7340     if (opcode == CEE_NEWOBJ)
7341     {
7342         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7343         {
7344             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7345             // This is a 'new' of a variable sized object, wher
7346             // the constructor is to return the object.  In this case
7347             // the constructor claims to return VOID but we know it
7348             // actually returns the new object
7349             assert(callRetTyp == TYP_VOID);
7350             callRetTyp   = TYP_REF;
7351             call->gtType = TYP_REF;
7352             impSpillSpecialSideEff();
7353
7354             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7355         }
7356         else
7357         {
7358             if (clsFlags & CORINFO_FLG_DELEGATE)
7359             {
7360                 // New inliner morph it in impImportCall.
7361                 // This will allow us to inline the call to the delegate constructor.
7362                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7363             }
7364
7365             if (!bIntrinsicImported)
7366             {
7367
7368 #if defined(DEBUG) || defined(INLINE_DATA)
7369
7370                 // Keep track of the raw IL offset of the call
7371                 call->gtCall.gtRawILOffset = rawILOffset;
7372
7373 #endif // defined(DEBUG) || defined(INLINE_DATA)
7374
7375                 // Is it an inline candidate?
7376                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7377             }
7378
7379             // append the call node.
7380             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7381
7382             // Now push the value of the 'new onto the stack
7383
7384             // This is a 'new' of a non-variable sized object.
7385             // Append the new node (op1) to the statement list,
7386             // and then push the local holding the value of this
7387             // new instruction on the stack.
7388
7389             if (clsFlags & CORINFO_FLG_VALUECLASS)
7390             {
7391                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7392
7393                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7394                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7395             }
7396             else
7397             {
7398                 if (newobjThis->gtOper == GT_COMMA)
7399                 {
7400                     // In coreclr the callout can be inserted even if verification is disabled
7401                     // so we cannot rely on tiVerificationNeeded alone
7402
7403                     // We must have inserted the callout. Get the real newobj.
7404                     newobjThis = newobjThis->gtOp.gtOp2;
7405                 }
7406
7407                 assert(newobjThis->gtOper == GT_LCL_VAR);
7408                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7409             }
7410         }
7411         return callRetTyp;
7412     }
7413
7414 DONE:
7415
7416     if (tailCall)
7417     {
7418         // This check cannot be performed for implicit tail calls for the reason
7419         // that impIsImplicitTailCallCandidate() is not checking whether return
7420         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7421         // As a result it is possible that in the following case, we find that
7422         // the type stack is non-empty if Callee() is considered for implicit
7423         // tail calling.
7424         //      int Caller(..) { .... void Callee(); ret val; ... }
7425         //
7426         // Note that we cannot check return type compatibility before ImpImportCall()
7427         // as we don't have required info or need to duplicate some of the logic of
7428         // ImpImportCall().
7429         //
7430         // For implicit tail calls, we perform this check after return types are
7431         // known to be compatible.
7432         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7433         {
7434             BADCODE("Stack should be empty after tailcall");
7435         }
7436
7437         // Note that we can not relax this condition with genActualType() as
7438         // the calling convention dictates that the caller of a function with
7439         // a small-typed return value is responsible for normalizing the return val
7440
7441         if (canTailCall &&
7442             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7443                                           callInfo->sig.retTypeClass))
7444         {
7445             canTailCall             = false;
7446             szCanTailCallFailReason = "Return types are not tail call compatible";
7447         }
7448
7449         // Stack empty check for implicit tail calls.
7450         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7451         {
7452 #ifdef _TARGET_AMD64_
7453             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7454             // in JIT64, not an InvalidProgramException.
7455             Verify(false, "Stack should be empty after tailcall");
7456 #else  // _TARGET_64BIT_
7457             BADCODE("Stack should be empty after tailcall");
7458 #endif //!_TARGET_64BIT_
7459         }
7460
7461         // assert(compCurBB is not a catch, finally or filter block);
7462         // assert(compCurBB is not a try block protected by a finally block);
7463
7464         // Check for permission to tailcall
7465         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7466
7467         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7468
7469         if (canTailCall)
7470         {
7471             // True virtual or indirect calls, shouldn't pass in a callee handle.
7472             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7473                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7474                                                        ? nullptr
7475                                                        : methHnd;
7476             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7477
7478             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7479             {
7480                 canTailCall = true;
7481                 if (explicitTailCall)
7482                 {
7483                     // In case of explicit tail calls, mark it so that it is not considered
7484                     // for in-lining.
7485                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7486 #ifdef DEBUG
7487                     if (verbose)
7488                     {
7489                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7490                         printTreeID(call);
7491                         printf("\n");
7492                     }
7493 #endif
7494                 }
7495                 else
7496                 {
7497 #if FEATURE_TAILCALL_OPT
7498                     // Must be an implicit tail call.
7499                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7500
7501                     // It is possible that a call node is both an inline candidate and marked
7502                     // for opportunistic tail calling.  In-lining happens before morhphing of
7503                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7504                     // reason, it will survive to the morphing stage at which point it will be
7505                     // transformed into a tail call after performing additional checks.
7506
7507                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7508 #ifdef DEBUG
7509                     if (verbose)
7510                     {
7511                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7512                         printTreeID(call);
7513                         printf("\n");
7514                     }
7515 #endif
7516
7517 #else //! FEATURE_TAILCALL_OPT
7518                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7519
7520 #endif // FEATURE_TAILCALL_OPT
7521                 }
7522
7523                 // we can't report success just yet...
7524             }
7525             else
7526             {
7527                 canTailCall = false;
7528 // canTailCall reported its reasons already
7529 #ifdef DEBUG
7530                 if (verbose)
7531                 {
7532                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7533                     printTreeID(call);
7534                     printf("\n");
7535                 }
7536 #endif
7537             }
7538         }
7539         else
7540         {
7541             // If this assert fires it means that canTailCall was set to false without setting a reason!
7542             assert(szCanTailCallFailReason != nullptr);
7543
7544 #ifdef DEBUG
7545             if (verbose)
7546             {
7547                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7548                 printTreeID(call);
7549                 printf(": %s\n", szCanTailCallFailReason);
7550             }
7551 #endif
7552             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7553                                                      szCanTailCallFailReason);
7554         }
7555     }
7556
7557     // Note: we assume that small return types are already normalized by the managed callee
7558     // or by the pinvoke stub for calls to unmanaged code.
7559
7560     if (!bIntrinsicImported)
7561     {
7562         //
7563         // Things needed to be checked when bIntrinsicImported is false.
7564         //
7565
7566         assert(call->gtOper == GT_CALL);
7567         assert(sig != nullptr);
7568
7569         // Tail calls require us to save the call site's sig info so we can obtain an argument
7570         // copying thunk from the EE later on.
7571         if (call->gtCall.callSig == nullptr)
7572         {
7573             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7574             *call->gtCall.callSig = *sig;
7575         }
7576
7577         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7578         {
7579             GenTreePtr callObj = call->gtCall.gtCallObjp;
7580             assert(callObj != nullptr);
7581
7582             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7583
7584             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7585                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7586                                                                    impInlineInfo->inlArgInfo))
7587             {
7588                 impInlineInfo->thisDereferencedFirst = true;
7589             }
7590         }
7591
7592 #if defined(DEBUG) || defined(INLINE_DATA)
7593
7594         // Keep track of the raw IL offset of the call
7595         call->gtCall.gtRawILOffset = rawILOffset;
7596
7597 #endif // defined(DEBUG) || defined(INLINE_DATA)
7598
7599         // Is it an inline candidate?
7600         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7601     }
7602
7603 DONE_CALL:
7604     // Push or append the result of the call
7605     if (callRetTyp == TYP_VOID)
7606     {
7607         if (opcode == CEE_NEWOBJ)
7608         {
7609             // we actually did push something, so don't spill the thing we just pushed.
7610             assert(verCurrentState.esStackDepth > 0);
7611             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7612         }
7613         else
7614         {
7615             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7616         }
7617     }
7618     else
7619     {
7620         impSpillSpecialSideEff();
7621
7622         if (clsFlags & CORINFO_FLG_ARRAY)
7623         {
7624             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7625         }
7626
7627         // Find the return type used for verification by interpreting the method signature.
7628         // NB: we are clobbering the already established sig.
7629         if (tiVerificationNeeded)
7630         {
7631             // Actually, we never get the sig for the original method.
7632             sig = &(callInfo->verSig);
7633         }
7634
7635         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7636         tiRetVal.NormaliseForStack();
7637
7638         // The CEE_READONLY prefix modifies the verification semantics of an Address
7639         // operation on an array type.
7640         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7641         {
7642             tiRetVal.SetIsReadonlyByRef();
7643         }
7644
7645         if (tiVerificationNeeded)
7646         {
7647             // We assume all calls return permanent home byrefs. If they
7648             // didn't they wouldn't be verifiable. This is also covering
7649             // the Address() helper for multidimensional arrays.
7650             if (tiRetVal.IsByRef())
7651             {
7652                 tiRetVal.SetIsPermanentHomeByRef();
7653             }
7654         }
7655
7656         if (call->IsCall())
7657         {
7658             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7659
7660             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7661             if (varTypeIsStruct(callRetTyp))
7662             {
7663                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7664             }
7665
7666             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7667             {
7668                 assert(opts.OptEnabled(CLFLG_INLINING));
7669                 assert(!fatPointerCandidate); // We should not try to inline calli.
7670
7671                 // Make the call its own tree (spill the stack if needed).
7672                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7673
7674                 // TODO: Still using the widened type.
7675                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7676             }
7677             else
7678             {
7679                 if (fatPointerCandidate)
7680                 {
7681                     // fatPointer candidates should be in statements of the form call() or var = call().
7682                     // Such form allows to find statements with fat calls without walking through whole trees
7683                     // and removes problems with cutting trees.
7684                     assert(!bIntrinsicImported);
7685                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
7686                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7687                     {
7688                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
7689                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
7690                         varDsc->lvVerTypeInfo = tiRetVal;
7691                         impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
7692                         // impAssignTempGen can change src arg list and return type for call that returns struct.
7693                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7694                         call           = gtNewLclvNode(calliSlot, type);
7695                     }
7696                 }
7697                 // For non-candidates we must also spill, since we
7698                 // might have locals live on the eval stack that this
7699                 // call can modify.
7700                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7701             }
7702         }
7703
7704         if (!bIntrinsicImported)
7705         {
7706             //-------------------------------------------------------------------------
7707             //
7708             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7709                 before returning.
7710                 However, we need to normalize small type values returned by unmanaged
7711                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7712                 if we use the shorter inlined pinvoke stub. */
7713
7714             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7715             {
7716                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7717             }
7718         }
7719
7720         impPushOnStack(call, tiRetVal);
7721     }
7722
7723     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7724     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7725     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7726     //  callInfoCache.uncacheCallInfo();
7727
7728     return callRetTyp;
7729 }
7730 #ifdef _PREFAST_
7731 #pragma warning(pop)
7732 #endif
7733
7734 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7735 {
7736     CorInfoType corType = methInfo->args.retType;
7737
7738     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7739     {
7740         // We have some kind of STRUCT being returned
7741
7742         structPassingKind howToReturnStruct = SPK_Unknown;
7743
7744         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7745
7746         if (howToReturnStruct == SPK_ByReference)
7747         {
7748             return true;
7749         }
7750     }
7751
7752     return false;
7753 }
7754
7755 #ifdef DEBUG
7756 //
7757 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7758 {
7759     TestLabelAndNum tlAndN;
7760     if (numArgs == 2)
7761     {
7762         tlAndN.m_num  = 0;
7763         StackEntry se = impPopStack();
7764         assert(se.seTypeInfo.GetType() == TI_INT);
7765         GenTreePtr val = se.val;
7766         assert(val->IsCnsIntOrI());
7767         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7768     }
7769     else if (numArgs == 3)
7770     {
7771         StackEntry se = impPopStack();
7772         assert(se.seTypeInfo.GetType() == TI_INT);
7773         GenTreePtr val = se.val;
7774         assert(val->IsCnsIntOrI());
7775         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7776         se           = impPopStack();
7777         assert(se.seTypeInfo.GetType() == TI_INT);
7778         val = se.val;
7779         assert(val->IsCnsIntOrI());
7780         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7781     }
7782     else
7783     {
7784         assert(false);
7785     }
7786
7787     StackEntry expSe = impPopStack();
7788     GenTreePtr node  = expSe.val;
7789
7790     // There are a small number of special cases, where we actually put the annotation on a subnode.
7791     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7792     {
7793         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7794         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7795         // offset within the the static field block whose address is returned by the helper call.
7796         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7797         GenTreePtr helperCall = nullptr;
7798         assert(node->OperGet() == GT_IND);
7799         tlAndN.m_num -= 100;
7800         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7801         GetNodeTestData()->Remove(node);
7802     }
7803     else
7804     {
7805         GetNodeTestData()->Set(node, tlAndN);
7806     }
7807
7808     impPushOnStack(node, expSe.seTypeInfo);
7809     return node->TypeGet();
7810 }
7811 #endif // DEBUG
7812
7813 //-----------------------------------------------------------------------------------
7814 //  impFixupCallStructReturn: For a call node that returns a struct type either
7815 //  adjust the return type to an enregisterable type, or set the flag to indicate
7816 //  struct return via retbuf arg.
7817 //
7818 //  Arguments:
7819 //    call       -  GT_CALL GenTree node
7820 //    retClsHnd  -  Class handle of return type of the call
7821 //
7822 //  Return Value:
7823 //    Returns new GenTree node after fixing struct return of call node
7824 //
7825 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7826 {
7827     assert(call->gtOper == GT_CALL);
7828
7829     if (!varTypeIsStruct(call))
7830     {
7831         return call;
7832     }
7833
7834     call->gtCall.gtRetClsHnd = retClsHnd;
7835
7836     GenTreeCall* callNode = call->AsCall();
7837
7838 #if FEATURE_MULTIREG_RET
7839     // Initialize Return type descriptor of call node
7840     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7841     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7842 #endif // FEATURE_MULTIREG_RET
7843
7844 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7845
7846     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7847     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7848
7849     // The return type will remain as the incoming struct type unless normalized to a
7850     // single eightbyte return type below.
7851     callNode->gtReturnType = call->gtType;
7852
7853     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7854     if (retRegCount != 0)
7855     {
7856         if (retRegCount == 1)
7857         {
7858             // struct returned in a single register
7859             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7860         }
7861         else
7862         {
7863             // must be a struct returned in two registers
7864             assert(retRegCount == 2);
7865
7866             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7867             {
7868                 // Force a call returning multi-reg struct to be always of the IR form
7869                 //   tmp = call
7870                 //
7871                 // No need to assign a multi-reg struct to a local var if:
7872                 //  - It is a tail call or
7873                 //  - The call is marked for in-lining later
7874                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7875             }
7876         }
7877     }
7878     else
7879     {
7880         // struct not returned in registers i.e returned via hiddden retbuf arg.
7881         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7882     }
7883
7884 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7885
7886 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7887     // There is no fixup necessary if the return type is a HFA struct.
7888     // HFA structs are returned in registers for ARM32 and ARM64
7889     //
7890     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7891     {
7892         if (call->gtCall.CanTailCall())
7893         {
7894             if (info.compIsVarArgs)
7895             {
7896                 // We cannot tail call because control needs to return to fixup the calling
7897                 // convention for result return.
7898                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7899             }
7900             else
7901             {
7902                 // If we can tail call returning HFA, then don't assign it to
7903                 // a variable back and forth.
7904                 return call;
7905             }
7906         }
7907
7908         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7909         {
7910             return call;
7911         }
7912
7913         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7914         if (retRegCount >= 2)
7915         {
7916             return impAssignMultiRegTypeToVar(call, retClsHnd);
7917         }
7918     }
7919 #endif // _TARGET_ARM_
7920
7921     // Check for TYP_STRUCT type that wraps a primitive type
7922     // Such structs are returned using a single register
7923     // and we change the return type on those calls here.
7924     //
7925     structPassingKind howToReturnStruct;
7926     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7927
7928     if (howToReturnStruct == SPK_ByReference)
7929     {
7930         assert(returnType == TYP_UNKNOWN);
7931         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7932     }
7933     else
7934     {
7935         assert(returnType != TYP_UNKNOWN);
7936         call->gtCall.gtReturnType = returnType;
7937
7938         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7939         if ((returnType == TYP_LONG) && (compLongUsed == false))
7940         {
7941             compLongUsed = true;
7942         }
7943         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7944         {
7945             compFloatingPointUsed = true;
7946         }
7947
7948 #if FEATURE_MULTIREG_RET
7949         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7950         assert(retRegCount != 0);
7951
7952         if (retRegCount >= 2)
7953         {
7954             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7955             {
7956                 // Force a call returning multi-reg struct to be always of the IR form
7957                 //   tmp = call
7958                 //
7959                 // No need to assign a multi-reg struct to a local var if:
7960                 //  - It is a tail call or
7961                 //  - The call is marked for in-lining later
7962                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7963             }
7964         }
7965 #endif // FEATURE_MULTIREG_RET
7966     }
7967
7968 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7969
7970     return call;
7971 }
7972
7973 /*****************************************************************************
7974    For struct return values, re-type the operand in the case where the ABI
7975    does not use a struct return buffer
7976    Note that this method is only call for !_TARGET_X86_
7977  */
7978
7979 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7980 {
7981     assert(varTypeIsStruct(info.compRetType));
7982     assert(info.compRetBuffArg == BAD_VAR_NUM);
7983
7984 #if defined(_TARGET_XARCH_)
7985
7986 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7987     // No VarArgs for CoreCLR on x64 Unix
7988     assert(!info.compIsVarArgs);
7989
7990     // Is method returning a multi-reg struct?
7991     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7992     {
7993         // In case of multi-reg struct return, we force IR to be one of the following:
7994         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7995         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7996
7997         if (op->gtOper == GT_LCL_VAR)
7998         {
7999             // Make sure that this struct stays in memory and doesn't get promoted.
8000             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8001             lvaTable[lclNum].lvIsMultiRegRet = true;
8002
8003             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8004             op->gtFlags |= GTF_DONT_CSE;
8005
8006             return op;
8007         }
8008
8009         if (op->gtOper == GT_CALL)
8010         {
8011             return op;
8012         }
8013
8014         return impAssignMultiRegTypeToVar(op, retClsHnd);
8015     }
8016 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8017     assert(info.compRetNativeType != TYP_STRUCT);
8018 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8019
8020 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8021
8022     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8023     {
8024         if (op->gtOper == GT_LCL_VAR)
8025         {
8026             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8027             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8028             // Make sure this struct type stays as struct so that we can return it as an HFA
8029             lvaTable[lclNum].lvIsMultiRegRet = true;
8030
8031             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8032             op->gtFlags |= GTF_DONT_CSE;
8033
8034             return op;
8035         }
8036
8037         if (op->gtOper == GT_CALL)
8038         {
8039             if (op->gtCall.IsVarargs())
8040             {
8041                 // We cannot tail call because control needs to return to fixup the calling
8042                 // convention for result return.
8043                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8044                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8045             }
8046             else
8047             {
8048                 return op;
8049             }
8050         }
8051         return impAssignMultiRegTypeToVar(op, retClsHnd);
8052     }
8053
8054 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8055
8056     // Is method returning a multi-reg struct?
8057     if (IsMultiRegReturnedType(retClsHnd))
8058     {
8059         if (op->gtOper == GT_LCL_VAR)
8060         {
8061             // This LCL_VAR stays as a TYP_STRUCT
8062             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8063
8064             // Make sure this struct type is not struct promoted
8065             lvaTable[lclNum].lvIsMultiRegRet = true;
8066
8067             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8068             op->gtFlags |= GTF_DONT_CSE;
8069
8070             return op;
8071         }
8072
8073         if (op->gtOper == GT_CALL)
8074         {
8075             if (op->gtCall.IsVarargs())
8076             {
8077                 // We cannot tail call because control needs to return to fixup the calling
8078                 // convention for result return.
8079                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8080                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8081             }
8082             else
8083             {
8084                 return op;
8085             }
8086         }
8087         return impAssignMultiRegTypeToVar(op, retClsHnd);
8088     }
8089
8090 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8091
8092 REDO_RETURN_NODE:
8093     // adjust the type away from struct to integral
8094     // and no normalizing
8095     if (op->gtOper == GT_LCL_VAR)
8096     {
8097         op->ChangeOper(GT_LCL_FLD);
8098     }
8099     else if (op->gtOper == GT_OBJ)
8100     {
8101         GenTreePtr op1 = op->AsObj()->Addr();
8102
8103         // We will fold away OBJ/ADDR
8104         // except for OBJ/ADDR/INDEX
8105         //     as the array type influences the array element's offset
8106         //     Later in this method we change op->gtType to info.compRetNativeType
8107         //     This is not correct when op is a GT_INDEX as the starting offset
8108         //     for the array elements 'elemOffs' is different for an array of
8109         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8110         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8111         //
8112         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8113         {
8114             // Change '*(&X)' to 'X' and see if we can do better
8115             op = op1->gtOp.gtOp1;
8116             goto REDO_RETURN_NODE;
8117         }
8118         op->gtObj.gtClass = NO_CLASS_HANDLE;
8119         op->ChangeOperUnchecked(GT_IND);
8120         op->gtFlags |= GTF_IND_TGTANYWHERE;
8121     }
8122     else if (op->gtOper == GT_CALL)
8123     {
8124         if (op->AsCall()->TreatAsHasRetBufArg(this))
8125         {
8126             // This must be one of those 'special' helpers that don't
8127             // really have a return buffer, but instead use it as a way
8128             // to keep the trees cleaner with fewer address-taken temps.
8129             //
8130             // Well now we have to materialize the the return buffer as
8131             // an address-taken temp. Then we can return the temp.
8132             //
8133             // NOTE: this code assumes that since the call directly
8134             // feeds the return, then the call must be returning the
8135             // same structure/class/type.
8136             //
8137             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8138
8139             // No need to spill anything as we're about to return.
8140             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8141
8142             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8143             // jump directly to a GT_LCL_FLD.
8144             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8145             op->ChangeOper(GT_LCL_FLD);
8146         }
8147         else
8148         {
8149             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8150
8151             // Don't change the gtType of the node just yet, it will get changed later.
8152             return op;
8153         }
8154     }
8155     else if (op->gtOper == GT_COMMA)
8156     {
8157         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8158     }
8159
8160     op->gtType = info.compRetNativeType;
8161
8162     return op;
8163 }
8164
8165 /*****************************************************************************
8166    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8167    finally-protected try. We find the finally blocks protecting the current
8168    offset (in order) by walking over the complete exception table and
8169    finding enclosing clauses. This assumes that the table is sorted.
8170    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8171
8172    If we are leaving a catch handler, we need to attach the
8173    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8174
8175    After this function, the BBJ_LEAVE block has been converted to a different type.
8176  */
8177
8178 #if !FEATURE_EH_FUNCLETS
8179
8180 void Compiler::impImportLeave(BasicBlock* block)
8181 {
8182 #ifdef DEBUG
8183     if (verbose)
8184     {
8185         printf("\nBefore import CEE_LEAVE:\n");
8186         fgDispBasicBlocks();
8187         fgDispHandlerTab();
8188     }
8189 #endif // DEBUG
8190
8191     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8192     unsigned    blkAddr         = block->bbCodeOffs;
8193     BasicBlock* leaveTarget     = block->bbJumpDest;
8194     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8195
8196     // LEAVE clears the stack, spill side effects, and set stack to 0
8197
8198     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8199     verCurrentState.esStackDepth = 0;
8200
8201     assert(block->bbJumpKind == BBJ_LEAVE);
8202     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8203
8204     BasicBlock* step         = DUMMY_INIT(NULL);
8205     unsigned    encFinallies = 0; // Number of enclosing finallies.
8206     GenTreePtr  endCatches   = NULL;
8207     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8208
8209     unsigned  XTnum;
8210     EHblkDsc* HBtab;
8211
8212     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8213     {
8214         // Grab the handler offsets
8215
8216         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8217         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8218         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8219         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8220
8221         /* Is this a catch-handler we are CEE_LEAVEing out of?
8222          * If so, we need to call CORINFO_HELP_ENDCATCH.
8223          */
8224
8225         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8226         {
8227             // Can't CEE_LEAVE out of a finally/fault handler
8228             if (HBtab->HasFinallyOrFaultHandler())
8229                 BADCODE("leave out of fault/finally block");
8230
8231             // Create the call to CORINFO_HELP_ENDCATCH
8232             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8233
8234             // Make a list of all the currently pending endCatches
8235             if (endCatches)
8236                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8237             else
8238                 endCatches = endCatch;
8239
8240 #ifdef DEBUG
8241             if (verbose)
8242             {
8243                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8244                        "CORINFO_HELP_ENDCATCH\n",
8245                        block->bbNum, XTnum);
8246             }
8247 #endif
8248         }
8249         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8250                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8251         {
8252             /* This is a finally-protected try we are jumping out of */
8253
8254             /* If there are any pending endCatches, and we have already
8255                jumped out of a finally-protected try, then the endCatches
8256                have to be put in a block in an outer try for async
8257                exceptions to work correctly.
8258                Else, just use append to the original block */
8259
8260             BasicBlock* callBlock;
8261
8262             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8263
8264             if (encFinallies == 0)
8265             {
8266                 assert(step == DUMMY_INIT(NULL));
8267                 callBlock             = block;
8268                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8269
8270                 if (endCatches)
8271                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8272
8273 #ifdef DEBUG
8274                 if (verbose)
8275                 {
8276                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8277                            "block BB%02u [%08p]\n",
8278                            callBlock->bbNum, dspPtr(callBlock));
8279                 }
8280 #endif
8281             }
8282             else
8283             {
8284                 assert(step != DUMMY_INIT(NULL));
8285
8286                 /* Calling the finally block */
8287                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8288                 assert(step->bbJumpKind == BBJ_ALWAYS);
8289                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8290                                               // finally in the chain)
8291                 step->bbJumpDest->bbRefs++;
8292
8293                 /* The new block will inherit this block's weight */
8294                 callBlock->setBBWeight(block->bbWeight);
8295                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8296
8297 #ifdef DEBUG
8298                 if (verbose)
8299                 {
8300                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8301                            "[%08p]\n",
8302                            callBlock->bbNum, dspPtr(callBlock));
8303                 }
8304 #endif
8305
8306                 GenTreePtr lastStmt;
8307
8308                 if (endCatches)
8309                 {
8310                     lastStmt         = gtNewStmt(endCatches);
8311                     endLFin->gtNext  = lastStmt;
8312                     lastStmt->gtPrev = endLFin;
8313                 }
8314                 else
8315                 {
8316                     lastStmt = endLFin;
8317                 }
8318
8319                 // note that this sets BBF_IMPORTED on the block
8320                 impEndTreeList(callBlock, endLFin, lastStmt);
8321             }
8322
8323             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8324             /* The new block will inherit this block's weight */
8325             step->setBBWeight(block->bbWeight);
8326             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8327
8328 #ifdef DEBUG
8329             if (verbose)
8330             {
8331                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8332                        "BB%02u [%08p]\n",
8333                        step->bbNum, dspPtr(step));
8334             }
8335 #endif
8336
8337             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8338             assert(finallyNesting <= compHndBBtabCount);
8339
8340             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8341             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8342             endLFin               = gtNewStmt(endLFin);
8343             endCatches            = NULL;
8344
8345             encFinallies++;
8346
8347             invalidatePreds = true;
8348         }
8349     }
8350
8351     /* Append any remaining endCatches, if any */
8352
8353     assert(!encFinallies == !endLFin);
8354
8355     if (encFinallies == 0)
8356     {
8357         assert(step == DUMMY_INIT(NULL));
8358         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8359
8360         if (endCatches)
8361             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8362
8363 #ifdef DEBUG
8364         if (verbose)
8365         {
8366             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8367                    "block BB%02u [%08p]\n",
8368                    block->bbNum, dspPtr(block));
8369         }
8370 #endif
8371     }
8372     else
8373     {
8374         // If leaveTarget is the start of another try block, we want to make sure that
8375         // we do not insert finalStep into that try block. Hence, we find the enclosing
8376         // try block.
8377         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8378
8379         // Insert a new BB either in the try region indicated by tryIndex or
8380         // the handler region indicated by leaveTarget->bbHndIndex,
8381         // depending on which is the inner region.
8382         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8383         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8384         step->bbJumpDest = finalStep;
8385
8386         /* The new block will inherit this block's weight */
8387         finalStep->setBBWeight(block->bbWeight);
8388         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8389
8390 #ifdef DEBUG
8391         if (verbose)
8392         {
8393             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8394                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8395         }
8396 #endif
8397
8398         GenTreePtr lastStmt;
8399
8400         if (endCatches)
8401         {
8402             lastStmt         = gtNewStmt(endCatches);
8403             endLFin->gtNext  = lastStmt;
8404             lastStmt->gtPrev = endLFin;
8405         }
8406         else
8407         {
8408             lastStmt = endLFin;
8409         }
8410
8411         impEndTreeList(finalStep, endLFin, lastStmt);
8412
8413         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8414
8415         // Queue up the jump target for importing
8416
8417         impImportBlockPending(leaveTarget);
8418
8419         invalidatePreds = true;
8420     }
8421
8422     if (invalidatePreds && fgComputePredsDone)
8423     {
8424         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8425         fgRemovePreds();
8426     }
8427
8428 #ifdef DEBUG
8429     fgVerifyHandlerTab();
8430
8431     if (verbose)
8432     {
8433         printf("\nAfter import CEE_LEAVE:\n");
8434         fgDispBasicBlocks();
8435         fgDispHandlerTab();
8436     }
8437 #endif // DEBUG
8438 }
8439
8440 #else // FEATURE_EH_FUNCLETS
8441
8442 void Compiler::impImportLeave(BasicBlock* block)
8443 {
8444 #ifdef DEBUG
8445     if (verbose)
8446     {
8447         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8448         fgDispBasicBlocks();
8449         fgDispHandlerTab();
8450     }
8451 #endif // DEBUG
8452
8453     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8454     unsigned    blkAddr         = block->bbCodeOffs;
8455     BasicBlock* leaveTarget     = block->bbJumpDest;
8456     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8457
8458     // LEAVE clears the stack, spill side effects, and set stack to 0
8459
8460     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8461     verCurrentState.esStackDepth = 0;
8462
8463     assert(block->bbJumpKind == BBJ_LEAVE);
8464     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8465
8466     BasicBlock* step = nullptr;
8467
8468     enum StepType
8469     {
8470         // No step type; step == NULL.
8471         ST_None,
8472
8473         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8474         // That is, is step->bbJumpDest where a finally will return to?
8475         ST_FinallyReturn,
8476
8477         // The step block is a catch return.
8478         ST_Catch,
8479
8480         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8481         ST_Try
8482     };
8483     StepType stepType = ST_None;
8484
8485     unsigned  XTnum;
8486     EHblkDsc* HBtab;
8487
8488     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8489     {
8490         // Grab the handler offsets
8491
8492         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8493         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8494         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8495         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8496
8497         /* Is this a catch-handler we are CEE_LEAVEing out of?
8498          */
8499
8500         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8501         {
8502             // Can't CEE_LEAVE out of a finally/fault handler
8503             if (HBtab->HasFinallyOrFaultHandler())
8504             {
8505                 BADCODE("leave out of fault/finally block");
8506             }
8507
8508             /* We are jumping out of a catch */
8509
8510             if (step == nullptr)
8511             {
8512                 step             = block;
8513                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8514                 stepType         = ST_Catch;
8515
8516 #ifdef DEBUG
8517                 if (verbose)
8518                 {
8519                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8520                            "block\n",
8521                            XTnum, step->bbNum);
8522                 }
8523 #endif
8524             }
8525             else
8526             {
8527                 BasicBlock* exitBlock;
8528
8529                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8530                  * scope */
8531                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8532
8533                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8534                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8535                                               // exit) returns to this block
8536                 step->bbJumpDest->bbRefs++;
8537
8538 #if defined(_TARGET_ARM_)
8539                 if (stepType == ST_FinallyReturn)
8540                 {
8541                     assert(step->bbJumpKind == BBJ_ALWAYS);
8542                     // Mark the target of a finally return
8543                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8544                 }
8545 #endif // defined(_TARGET_ARM_)
8546
8547                 /* The new block will inherit this block's weight */
8548                 exitBlock->setBBWeight(block->bbWeight);
8549                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8550
8551                 /* This exit block is the new step */
8552                 step     = exitBlock;
8553                 stepType = ST_Catch;
8554
8555                 invalidatePreds = true;
8556
8557 #ifdef DEBUG
8558                 if (verbose)
8559                 {
8560                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8561                            exitBlock->bbNum);
8562                 }
8563 #endif
8564             }
8565         }
8566         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8567                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8568         {
8569             /* We are jumping out of a finally-protected try */
8570
8571             BasicBlock* callBlock;
8572
8573             if (step == nullptr)
8574             {
8575 #if FEATURE_EH_CALLFINALLY_THUNKS
8576
8577                 // Put the call to the finally in the enclosing region.
8578                 unsigned callFinallyTryIndex =
8579                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8580                 unsigned callFinallyHndIndex =
8581                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8582                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8583
8584                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8585                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8586                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8587                 // next block, and flow optimizations will remove it.
8588                 block->bbJumpKind = BBJ_ALWAYS;
8589                 block->bbJumpDest = callBlock;
8590                 block->bbJumpDest->bbRefs++;
8591
8592                 /* The new block will inherit this block's weight */
8593                 callBlock->setBBWeight(block->bbWeight);
8594                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8595
8596 #ifdef DEBUG
8597                 if (verbose)
8598                 {
8599                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8600                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8601                            XTnum, block->bbNum, callBlock->bbNum);
8602                 }
8603 #endif
8604
8605 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8606
8607                 callBlock             = block;
8608                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8609
8610 #ifdef DEBUG
8611                 if (verbose)
8612                 {
8613                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8614                            "BBJ_CALLFINALLY block\n",
8615                            XTnum, callBlock->bbNum);
8616                 }
8617 #endif
8618
8619 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8620             }
8621             else
8622             {
8623                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8624                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8625                 // a 'finally'), or the step block is the return from a catch.
8626                 //
8627                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8628                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8629                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8630                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8631                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8632                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8633                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8634                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8635                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8636                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8637                 // stack walks.)
8638
8639                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8640
8641 #if FEATURE_EH_CALLFINALLY_THUNKS
8642                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8643                 {
8644                     // Need to create another step block in the 'try' region that will actually branch to the
8645                     // call-to-finally thunk.
8646                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8647                     step->bbJumpDest  = step2;
8648                     step->bbJumpDest->bbRefs++;
8649                     step2->setBBWeight(block->bbWeight);
8650                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8651
8652 #ifdef DEBUG
8653                     if (verbose)
8654                     {
8655                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8656                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8657                                XTnum, step->bbNum, step2->bbNum);
8658                     }
8659 #endif
8660
8661                     step = step2;
8662                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8663                 }
8664 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8665
8666 #if FEATURE_EH_CALLFINALLY_THUNKS
8667                 unsigned callFinallyTryIndex =
8668                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8669                 unsigned callFinallyHndIndex =
8670                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8671 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8672                 unsigned callFinallyTryIndex = XTnum + 1;
8673                 unsigned callFinallyHndIndex = 0; // don't care
8674 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8675
8676                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8677                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8678                                               // finally in the chain)
8679                 step->bbJumpDest->bbRefs++;
8680
8681 #if defined(_TARGET_ARM_)
8682                 if (stepType == ST_FinallyReturn)
8683                 {
8684                     assert(step->bbJumpKind == BBJ_ALWAYS);
8685                     // Mark the target of a finally return
8686                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8687                 }
8688 #endif // defined(_TARGET_ARM_)
8689
8690                 /* The new block will inherit this block's weight */
8691                 callBlock->setBBWeight(block->bbWeight);
8692                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8693
8694 #ifdef DEBUG
8695                 if (verbose)
8696                 {
8697                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8698                            "BB%02u\n",
8699                            XTnum, callBlock->bbNum);
8700                 }
8701 #endif
8702             }
8703
8704             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8705             stepType = ST_FinallyReturn;
8706
8707             /* The new block will inherit this block's weight */
8708             step->setBBWeight(block->bbWeight);
8709             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8710
8711 #ifdef DEBUG
8712             if (verbose)
8713             {
8714                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8715                        "block BB%02u\n",
8716                        XTnum, step->bbNum);
8717             }
8718 #endif
8719
8720             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8721
8722             invalidatePreds = true;
8723         }
8724         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8725                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8726         {
8727             // We are jumping out of a catch-protected try.
8728             //
8729             // If we are returning from a call to a finally, then we must have a step block within a try
8730             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8731             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8732             // and invoke the appropriate catch.
8733             //
8734             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8735             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8736             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8737             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8738             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8739             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8740             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8741             // For example:
8742             //
8743             // try {
8744             //    try {
8745             //       // something here raises ThreadAbortException
8746             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8747             //    } catch (Exception) {
8748             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8749             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8750             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8751             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8752             //       // need to do this transformation if the current EH block is a try/catch that catches
8753             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8754             //       // information, so currently we do it for all catch types.
8755             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8756             //    }
8757             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8758             // } catch (ThreadAbortException) {
8759             // }
8760             // LABEL_1:
8761             //
8762             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8763             // compiler.
8764
8765             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8766             {
8767                 BasicBlock* catchStep;
8768
8769                 assert(step);
8770
8771                 if (stepType == ST_FinallyReturn)
8772                 {
8773                     assert(step->bbJumpKind == BBJ_ALWAYS);
8774                 }
8775                 else
8776                 {
8777                     assert(stepType == ST_Catch);
8778                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8779                 }
8780
8781                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8782                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8783                 step->bbJumpDest = catchStep;
8784                 step->bbJumpDest->bbRefs++;
8785
8786 #if defined(_TARGET_ARM_)
8787                 if (stepType == ST_FinallyReturn)
8788                 {
8789                     // Mark the target of a finally return
8790                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8791                 }
8792 #endif // defined(_TARGET_ARM_)
8793
8794                 /* The new block will inherit this block's weight */
8795                 catchStep->setBBWeight(block->bbWeight);
8796                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8797
8798 #ifdef DEBUG
8799                 if (verbose)
8800                 {
8801                     if (stepType == ST_FinallyReturn)
8802                     {
8803                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8804                                "BBJ_ALWAYS block BB%02u\n",
8805                                XTnum, catchStep->bbNum);
8806                     }
8807                     else
8808                     {
8809                         assert(stepType == ST_Catch);
8810                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8811                                "BBJ_ALWAYS block BB%02u\n",
8812                                XTnum, catchStep->bbNum);
8813                     }
8814                 }
8815 #endif // DEBUG
8816
8817                 /* This block is the new step */
8818                 step     = catchStep;
8819                 stepType = ST_Try;
8820
8821                 invalidatePreds = true;
8822             }
8823         }
8824     }
8825
8826     if (step == nullptr)
8827     {
8828         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8829
8830 #ifdef DEBUG
8831         if (verbose)
8832         {
8833             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8834                    "block BB%02u to BBJ_ALWAYS\n",
8835                    block->bbNum);
8836         }
8837 #endif
8838     }
8839     else
8840     {
8841         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8842
8843 #if defined(_TARGET_ARM_)
8844         if (stepType == ST_FinallyReturn)
8845         {
8846             assert(step->bbJumpKind == BBJ_ALWAYS);
8847             // Mark the target of a finally return
8848             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8849         }
8850 #endif // defined(_TARGET_ARM_)
8851
8852 #ifdef DEBUG
8853         if (verbose)
8854         {
8855             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8856         }
8857 #endif
8858
8859         // Queue up the jump target for importing
8860
8861         impImportBlockPending(leaveTarget);
8862     }
8863
8864     if (invalidatePreds && fgComputePredsDone)
8865     {
8866         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8867         fgRemovePreds();
8868     }
8869
8870 #ifdef DEBUG
8871     fgVerifyHandlerTab();
8872
8873     if (verbose)
8874     {
8875         printf("\nAfter import CEE_LEAVE:\n");
8876         fgDispBasicBlocks();
8877         fgDispHandlerTab();
8878     }
8879 #endif // DEBUG
8880 }
8881
8882 #endif // FEATURE_EH_FUNCLETS
8883
8884 /*****************************************************************************/
8885 // This is called when reimporting a leave block. It resets the JumpKind,
8886 // JumpDest, and bbNext to the original values
8887
8888 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8889 {
8890 #if FEATURE_EH_FUNCLETS
8891     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8892     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8893     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8894     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8895     // only predecessor are also considered orphans and attempted to be deleted.
8896     //
8897     //  try  {
8898     //     ....
8899     //     try
8900     //     {
8901     //         ....
8902     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8903     //     } finally { }
8904     //  } finally { }
8905     //  OUTSIDE:
8906     //
8907     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8908     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8909     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8910     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8911     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8912     // will be treated as pair and handled correctly.
8913     if (block->bbJumpKind == BBJ_CALLFINALLY)
8914     {
8915         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8916         dupBlock->bbFlags    = block->bbFlags;
8917         dupBlock->bbJumpDest = block->bbJumpDest;
8918         dupBlock->copyEHRegion(block);
8919         dupBlock->bbCatchTyp = block->bbCatchTyp;
8920
8921         // Mark this block as
8922         //  a) not referenced by any other block to make sure that it gets deleted
8923         //  b) weight zero
8924         //  c) prevent from being imported
8925         //  d) as internal
8926         //  e) as rarely run
8927         dupBlock->bbRefs   = 0;
8928         dupBlock->bbWeight = 0;
8929         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8930
8931         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8932         // will be next to each other.
8933         fgInsertBBafter(block, dupBlock);
8934
8935 #ifdef DEBUG
8936         if (verbose)
8937         {
8938             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8939         }
8940 #endif
8941     }
8942 #endif // FEATURE_EH_FUNCLETS
8943
8944     block->bbJumpKind = BBJ_LEAVE;
8945     fgInitBBLookup();
8946     block->bbJumpDest = fgLookupBB(jmpAddr);
8947
8948     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8949     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8950     // reason we don't want to remove the block at this point is that if we call
8951     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8952     // added and the linked list length will be different than fgBBcount.
8953 }
8954
8955 /*****************************************************************************/
8956 // Get the first non-prefix opcode. Used for verification of valid combinations
8957 // of prefixes and actual opcodes.
8958
8959 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8960 {
8961     while (codeAddr < codeEndp)
8962     {
8963         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8964         codeAddr += sizeof(__int8);
8965
8966         if (opcode == CEE_PREFIX1)
8967         {
8968             if (codeAddr >= codeEndp)
8969             {
8970                 break;
8971             }
8972             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8973             codeAddr += sizeof(__int8);
8974         }
8975
8976         switch (opcode)
8977         {
8978             case CEE_UNALIGNED:
8979             case CEE_VOLATILE:
8980             case CEE_TAILCALL:
8981             case CEE_CONSTRAINED:
8982             case CEE_READONLY:
8983                 break;
8984             default:
8985                 return opcode;
8986         }
8987
8988         codeAddr += opcodeSizes[opcode];
8989     }
8990
8991     return CEE_ILLEGAL;
8992 }
8993
8994 /*****************************************************************************/
8995 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8996
8997 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8998 {
8999     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9000
9001     if (!(
9002             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9003             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9004             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9005             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9006             // volatile. prefix is allowed with the ldsfld and stsfld
9007             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9008     {
9009         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9010     }
9011 }
9012
9013 /*****************************************************************************/
9014
9015 #ifdef DEBUG
9016
9017 #undef RETURN // undef contracts RETURN macro
9018
9019 enum controlFlow_t
9020 {
9021     NEXT,
9022     CALL,
9023     RETURN,
9024     THROW,
9025     BRANCH,
9026     COND_BRANCH,
9027     BREAK,
9028     PHI,
9029     META,
9030 };
9031
9032 const static controlFlow_t controlFlow[] = {
9033 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9034 #include "opcode.def"
9035 #undef OPDEF
9036 };
9037
9038 #endif // DEBUG
9039
9040 /*****************************************************************************
9041  *  Determine the result type of an arithemetic operation
9042  *  On 64-bit inserts upcasts when native int is mixed with int32
9043  */
9044 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9045 {
9046     var_types  type = TYP_UNDEF;
9047     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9048
9049     // Arithemetic operations are generally only allowed with
9050     // primitive types, but certain operations are allowed
9051     // with byrefs
9052
9053     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9054     {
9055         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9056         {
9057             // byref1-byref2 => gives a native int
9058             type = TYP_I_IMPL;
9059         }
9060         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9061         {
9062             // [native] int - byref => gives a native int
9063
9064             //
9065             // The reason is that it is possible, in managed C++,
9066             // to have a tree like this:
9067             //
9068             //              -
9069             //             / \
9070             //            /   \
9071             //           /     \
9072             //          /       \
9073             // const(h) int     addr byref
9074             //
9075             // <BUGNUM> VSW 318822 </BUGNUM>
9076             //
9077             // So here we decide to make the resulting type to be a native int.
9078             CLANG_FORMAT_COMMENT_ANCHOR;
9079
9080 #ifdef _TARGET_64BIT_
9081             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9082             {
9083                 // insert an explicit upcast
9084                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9085             }
9086 #endif // _TARGET_64BIT_
9087
9088             type = TYP_I_IMPL;
9089         }
9090         else
9091         {
9092             // byref - [native] int => gives a byref
9093             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9094
9095 #ifdef _TARGET_64BIT_
9096             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9097             {
9098                 // insert an explicit upcast
9099                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9100             }
9101 #endif // _TARGET_64BIT_
9102
9103             type = TYP_BYREF;
9104         }
9105     }
9106     else if ((oper == GT_ADD) &&
9107              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9108     {
9109         // byref + [native] int => gives a byref
9110         // (or)
9111         // [native] int + byref => gives a byref
9112
9113         // only one can be a byref : byref op byref not allowed
9114         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9115         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9116
9117 #ifdef _TARGET_64BIT_
9118         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9119         {
9120             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9121             {
9122                 // insert an explicit upcast
9123                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9124             }
9125         }
9126         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9127         {
9128             // insert an explicit upcast
9129             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9130         }
9131 #endif // _TARGET_64BIT_
9132
9133         type = TYP_BYREF;
9134     }
9135 #ifdef _TARGET_64BIT_
9136     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9137     {
9138         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9139
9140         // int + long => gives long
9141         // long + int => gives long
9142         // we get this because in the IL the long isn't Int64, it's just IntPtr
9143
9144         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9145         {
9146             // insert an explicit upcast
9147             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9148         }
9149         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9150         {
9151             // insert an explicit upcast
9152             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9153         }
9154
9155         type = TYP_I_IMPL;
9156     }
9157 #else  // 32-bit TARGET
9158     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9159     {
9160         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9161
9162         // int + long => gives long
9163         // long + int => gives long
9164
9165         type = TYP_LONG;
9166     }
9167 #endif // _TARGET_64BIT_
9168     else
9169     {
9170         // int + int => gives an int
9171         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9172
9173         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9174                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9175
9176         type = genActualType(op1->gtType);
9177
9178 #if FEATURE_X87_DOUBLES
9179
9180         // For x87, since we only have 1 size of registers, prefer double
9181         // For everybody else, be more precise
9182         if (type == TYP_FLOAT)
9183             type = TYP_DOUBLE;
9184
9185 #else // !FEATURE_X87_DOUBLES
9186
9187         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9188         // Otherwise, turn floats into doubles
9189         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9190         {
9191             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9192             type = TYP_DOUBLE;
9193         }
9194
9195 #endif // FEATURE_X87_DOUBLES
9196     }
9197
9198 #if FEATURE_X87_DOUBLES
9199     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9200 #else  // FEATURE_X87_DOUBLES
9201     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9202 #endif // FEATURE_X87_DOUBLES
9203
9204     return type;
9205 }
9206
9207 /*****************************************************************************
9208  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9209  *
9210  * typeRef contains the token, op1 to contain the value being cast,
9211  * and op2 to contain code that creates the type handle corresponding to typeRef
9212  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9213  */
9214 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9215                                                 GenTreePtr              op2,
9216                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9217                                                 bool                    isCastClass)
9218 {
9219     bool expandInline;
9220
9221     assert(op1->TypeGet() == TYP_REF);
9222
9223     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9224
9225     if (isCastClass)
9226     {
9227         // We only want to expand inline the normal CHKCASTCLASS helper;
9228         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9229     }
9230     else
9231     {
9232         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9233         {
9234             // Get the Class Handle abd class attributes for the type we are casting to
9235             //
9236             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9237
9238             //
9239             // If the class handle is marked as final we can also expand the IsInst check inline
9240             //
9241             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9242
9243             //
9244             // But don't expand inline these two cases
9245             //
9246             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9247             {
9248                 expandInline = false;
9249             }
9250             else if (flags & CORINFO_FLG_CONTEXTFUL)
9251             {
9252                 expandInline = false;
9253             }
9254         }
9255         else
9256         {
9257             //
9258             // We can't expand inline any other helpers
9259             //
9260             expandInline = false;
9261         }
9262     }
9263
9264     if (expandInline)
9265     {
9266         if (compCurBB->isRunRarely())
9267         {
9268             expandInline = false; // not worth the code expansion in a rarely run block
9269         }
9270
9271         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9272         {
9273             expandInline = false; // not worth creating an untracked local variable
9274         }
9275     }
9276
9277     if (!expandInline)
9278     {
9279         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9280         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9281         //
9282         op2->gtFlags |= GTF_DONT_CSE;
9283
9284         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9285     }
9286
9287     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9288
9289     GenTreePtr temp;
9290     GenTreePtr condMT;
9291     //
9292     // expand the methodtable match:
9293     //
9294     //  condMT ==>   GT_NE
9295     //               /    \
9296     //           GT_IND   op2 (typically CNS_INT)
9297     //              |
9298     //           op1Copy
9299     //
9300
9301     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9302     //
9303     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9304     //
9305     // op1 is now known to be a non-complex tree
9306     // thus we can use gtClone(op1) from now on
9307     //
9308
9309     GenTreePtr op2Var = op2;
9310     if (isCastClass)
9311     {
9312         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9313         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9314     }
9315     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9316     temp->gtFlags |= GTF_EXCEPT;
9317     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9318
9319     GenTreePtr condNull;
9320     //
9321     // expand the null check:
9322     //
9323     //  condNull ==>   GT_EQ
9324     //                 /    \
9325     //             op1Copy CNS_INT
9326     //                      null
9327     //
9328     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9329
9330     //
9331     // expand the true and false trees for the condMT
9332     //
9333     GenTreePtr condFalse = gtClone(op1);
9334     GenTreePtr condTrue;
9335     if (isCastClass)
9336     {
9337         //
9338         // use the special helper that skips the cases checked by our inlined cast
9339         //
9340         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9341
9342         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9343     }
9344     else
9345     {
9346         condTrue = gtNewIconNode(0, TYP_REF);
9347     }
9348
9349 #define USE_QMARK_TREES
9350
9351 #ifdef USE_QMARK_TREES
9352     GenTreePtr qmarkMT;
9353     //
9354     // Generate first QMARK - COLON tree
9355     //
9356     //  qmarkMT ==>   GT_QMARK
9357     //                 /     \
9358     //            condMT   GT_COLON
9359     //                      /     \
9360     //                condFalse  condTrue
9361     //
9362     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9363     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9364     condMT->gtFlags |= GTF_RELOP_QMARK;
9365
9366     GenTreePtr qmarkNull;
9367     //
9368     // Generate second QMARK - COLON tree
9369     //
9370     //  qmarkNull ==>  GT_QMARK
9371     //                 /     \
9372     //           condNull  GT_COLON
9373     //                      /     \
9374     //                qmarkMT   op1Copy
9375     //
9376     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9377     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9378     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9379     condNull->gtFlags |= GTF_RELOP_QMARK;
9380
9381     // Make QMark node a top level node by spilling it.
9382     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9383     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9384     return gtNewLclvNode(tmp, TYP_REF);
9385 #endif
9386 }
9387
9388 #ifndef DEBUG
9389 #define assertImp(cond) ((void)0)
9390 #else
9391 #define assertImp(cond)                                                                                                \
9392     do                                                                                                                 \
9393     {                                                                                                                  \
9394         if (!(cond))                                                                                                   \
9395         {                                                                                                              \
9396             const int cchAssertImpBuf = 600;                                                                           \
9397             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9398             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9399                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9400                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9401                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9402             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9403         }                                                                                                              \
9404     } while (0)
9405 #endif // DEBUG
9406
9407 #ifdef _PREFAST_
9408 #pragma warning(push)
9409 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9410 #endif
9411 /*****************************************************************************
9412  *  Import the instr for the given basic block
9413  */
9414 void Compiler::impImportBlockCode(BasicBlock* block)
9415 {
9416 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9417
9418 #ifdef DEBUG
9419
9420     if (verbose)
9421     {
9422         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9423     }
9424 #endif
9425
9426     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9427     IL_OFFSET nxtStmtOffs;
9428
9429     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9430     bool                         expandInline;
9431     CorInfoHelpFunc              helper;
9432     CorInfoIsAccessAllowedResult accessAllowedResult;
9433     CORINFO_HELPER_DESC          calloutHelper;
9434     const BYTE*                  lastLoadToken = nullptr;
9435
9436     // reject cyclic constraints
9437     if (tiVerificationNeeded)
9438     {
9439         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9440         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9441     }
9442
9443     /* Get the tree list started */
9444
9445     impBeginTreeList();
9446
9447     /* Walk the opcodes that comprise the basic block */
9448
9449     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9450     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9451
9452     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9453     IL_OFFSET lastSpillOffs = opcodeOffs;
9454
9455     signed jmpDist;
9456
9457     /* remember the start of the delegate creation sequence (used for verification) */
9458     const BYTE* delegateCreateStart = nullptr;
9459
9460     int  prefixFlags = 0;
9461     bool explicitTailCall, constraintCall, readonlyCall;
9462
9463     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9464     typeInfo tiRetVal;
9465
9466     unsigned numArgs = info.compArgsCount;
9467
9468     /* Now process all the opcodes in the block */
9469
9470     var_types callTyp    = TYP_COUNT;
9471     OPCODE    prevOpcode = CEE_ILLEGAL;
9472
9473     if (block->bbCatchTyp)
9474     {
9475         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9476         {
9477             impCurStmtOffsSet(block->bbCodeOffs);
9478         }
9479
9480         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9481         // to a temp. This is a trade off for code simplicity
9482         impSpillSpecialSideEff();
9483     }
9484
9485     while (codeAddr < codeEndp)
9486     {
9487         bool                   usingReadyToRunHelper = false;
9488         CORINFO_RESOLVED_TOKEN resolvedToken;
9489         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9490         CORINFO_CALL_INFO      callInfo;
9491         CORINFO_FIELD_INFO     fieldInfo;
9492
9493         tiRetVal = typeInfo(); // Default type info
9494
9495         //---------------------------------------------------------------------
9496
9497         /* We need to restrict the max tree depth as many of the Compiler
9498            functions are recursive. We do this by spilling the stack */
9499
9500         if (verCurrentState.esStackDepth)
9501         {
9502             /* Has it been a while since we last saw a non-empty stack (which
9503                guarantees that the tree depth isnt accumulating. */
9504
9505             if ((opcodeOffs - lastSpillOffs) > 200)
9506             {
9507                 impSpillStackEnsure();
9508                 lastSpillOffs = opcodeOffs;
9509             }
9510         }
9511         else
9512         {
9513             lastSpillOffs   = opcodeOffs;
9514             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9515         }
9516
9517         /* Compute the current instr offset */
9518
9519         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9520
9521 #ifndef DEBUG
9522         if (opts.compDbgInfo)
9523 #endif
9524         {
9525             if (!compIsForInlining())
9526             {
9527                 nxtStmtOffs =
9528                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9529
9530                 /* Have we reached the next stmt boundary ? */
9531
9532                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9533                 {
9534                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9535
9536                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9537                     {
9538                         /* We need to provide accurate IP-mapping at this point.
9539                            So spill anything on the stack so that it will form
9540                            gtStmts with the correct stmt offset noted */
9541
9542                         impSpillStackEnsure(true);
9543                     }
9544
9545                     // Has impCurStmtOffs been reported in any tree?
9546
9547                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9548                     {
9549                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9550                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9551
9552                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9553                     }
9554
9555                     if (impCurStmtOffs == BAD_IL_OFFSET)
9556                     {
9557                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9558                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9559
9560                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9561                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9562                         {
9563                             nxtStmtIndex++;
9564                         }
9565
9566                         /* Go to the new stmt */
9567
9568                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9569
9570                         /* Update the stmt boundary index */
9571
9572                         nxtStmtIndex++;
9573                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9574
9575                         /* Are there any more line# entries after this one? */
9576
9577                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9578                         {
9579                             /* Remember where the next line# starts */
9580
9581                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9582                         }
9583                         else
9584                         {
9585                             /* No more line# entries */
9586
9587                             nxtStmtOffs = BAD_IL_OFFSET;
9588                         }
9589                     }
9590                 }
9591                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9592                          (verCurrentState.esStackDepth == 0))
9593                 {
9594                     /* At stack-empty locations, we have already added the tree to
9595                        the stmt list with the last offset. We just need to update
9596                        impCurStmtOffs
9597                      */
9598
9599                     impCurStmtOffsSet(opcodeOffs);
9600                 }
9601                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9602                          impOpcodeIsCallSiteBoundary(prevOpcode))
9603                 {
9604                     /* Make sure we have a type cached */
9605                     assert(callTyp != TYP_COUNT);
9606
9607                     if (callTyp == TYP_VOID)
9608                     {
9609                         impCurStmtOffsSet(opcodeOffs);
9610                     }
9611                     else if (opts.compDbgCode)
9612                     {
9613                         impSpillStackEnsure(true);
9614                         impCurStmtOffsSet(opcodeOffs);
9615                     }
9616                 }
9617                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9618                 {
9619                     if (opts.compDbgCode)
9620                     {
9621                         impSpillStackEnsure(true);
9622                     }
9623
9624                     impCurStmtOffsSet(opcodeOffs);
9625                 }
9626
9627                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9628                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9629             }
9630         }
9631
9632         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9633         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9634         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9635
9636         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9637         GenTreePtr      op1           = DUMMY_INIT(NULL);
9638         GenTreePtr      op2           = DUMMY_INIT(NULL);
9639         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9640         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9641         bool            uns           = DUMMY_INIT(false);
9642
9643         /* Get the next opcode and the size of its parameters */
9644
9645         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9646         codeAddr += sizeof(__int8);
9647
9648 #ifdef DEBUG
9649         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9650         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9651 #endif
9652
9653     DECODE_OPCODE:
9654
9655         // Return if any previous code has caused inline to fail.
9656         if (compDonotInline())
9657         {
9658             return;
9659         }
9660
9661         /* Get the size of additional parameters */
9662
9663         signed int sz = opcodeSizes[opcode];
9664
9665 #ifdef DEBUG
9666         clsHnd  = NO_CLASS_HANDLE;
9667         lclTyp  = TYP_COUNT;
9668         callTyp = TYP_COUNT;
9669
9670         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9671         impCurOpcName = opcodeNames[opcode];
9672
9673         if (verbose && (opcode != CEE_PREFIX1))
9674         {
9675             printf("%s", impCurOpcName);
9676         }
9677
9678         /* Use assertImp() to display the opcode */
9679
9680         op1 = op2 = nullptr;
9681 #endif
9682
9683         /* See what kind of an opcode we have, then */
9684
9685         unsigned mflags   = 0;
9686         unsigned clsFlags = 0;
9687
9688         switch (opcode)
9689         {
9690             unsigned  lclNum;
9691             var_types type;
9692
9693             GenTreePtr op3;
9694             genTreeOps oper;
9695             unsigned   size;
9696
9697             int val;
9698
9699             CORINFO_SIG_INFO     sig;
9700             unsigned             flags;
9701             IL_OFFSET            jmpAddr;
9702             bool                 ovfl, unordered, callNode;
9703             bool                 ldstruct;
9704             CORINFO_CLASS_HANDLE tokenType;
9705
9706             union {
9707                 int     intVal;
9708                 float   fltVal;
9709                 __int64 lngVal;
9710                 double  dblVal;
9711             } cval;
9712
9713             case CEE_PREFIX1:
9714                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9715                 codeAddr += sizeof(__int8);
9716                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9717                 goto DECODE_OPCODE;
9718
9719             SPILL_APPEND:
9720
9721                 // We need to call impSpillLclRefs() for a struct type lclVar.
9722                 // This is done for non-block assignments in the handling of stloc.
9723                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9724                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9725                 {
9726                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9727                 }
9728
9729                 /* Append 'op1' to the list of statements */
9730                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9731                 goto DONE_APPEND;
9732
9733             APPEND:
9734
9735                 /* Append 'op1' to the list of statements */
9736
9737                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9738                 goto DONE_APPEND;
9739
9740             DONE_APPEND:
9741
9742 #ifdef DEBUG
9743                 // Remember at which BC offset the tree was finished
9744                 impNoteLastILoffs();
9745 #endif
9746                 break;
9747
9748             case CEE_LDNULL:
9749                 impPushNullObjRefOnStack();
9750                 break;
9751
9752             case CEE_LDC_I4_M1:
9753             case CEE_LDC_I4_0:
9754             case CEE_LDC_I4_1:
9755             case CEE_LDC_I4_2:
9756             case CEE_LDC_I4_3:
9757             case CEE_LDC_I4_4:
9758             case CEE_LDC_I4_5:
9759             case CEE_LDC_I4_6:
9760             case CEE_LDC_I4_7:
9761             case CEE_LDC_I4_8:
9762                 cval.intVal = (opcode - CEE_LDC_I4_0);
9763                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9764                 goto PUSH_I4CON;
9765
9766             case CEE_LDC_I4_S:
9767                 cval.intVal = getI1LittleEndian(codeAddr);
9768                 goto PUSH_I4CON;
9769             case CEE_LDC_I4:
9770                 cval.intVal = getI4LittleEndian(codeAddr);
9771                 goto PUSH_I4CON;
9772             PUSH_I4CON:
9773                 JITDUMP(" %d", cval.intVal);
9774                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9775                 break;
9776
9777             case CEE_LDC_I8:
9778                 cval.lngVal = getI8LittleEndian(codeAddr);
9779                 JITDUMP(" 0x%016llx", cval.lngVal);
9780                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9781                 break;
9782
9783             case CEE_LDC_R8:
9784                 cval.dblVal = getR8LittleEndian(codeAddr);
9785                 JITDUMP(" %#.17g", cval.dblVal);
9786                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9787                 break;
9788
9789             case CEE_LDC_R4:
9790                 cval.dblVal = getR4LittleEndian(codeAddr);
9791                 JITDUMP(" %#.17g", cval.dblVal);
9792                 {
9793                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9794 #if !FEATURE_X87_DOUBLES
9795                     // X87 stack doesn't differentiate between float/double
9796                     // so R4 is treated as R8, but everybody else does
9797                     cnsOp->gtType = TYP_FLOAT;
9798 #endif // FEATURE_X87_DOUBLES
9799                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9800                 }
9801                 break;
9802
9803             case CEE_LDSTR:
9804
9805                 if (compIsForInlining())
9806                 {
9807                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9808                     {
9809                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9810                         return;
9811                     }
9812                 }
9813
9814                 val = getU4LittleEndian(codeAddr);
9815                 JITDUMP(" %08X", val);
9816                 if (tiVerificationNeeded)
9817                 {
9818                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9819                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9820                 }
9821                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9822
9823                 break;
9824
9825             case CEE_LDARG:
9826                 lclNum = getU2LittleEndian(codeAddr);
9827                 JITDUMP(" %u", lclNum);
9828                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9829                 break;
9830
9831             case CEE_LDARG_S:
9832                 lclNum = getU1LittleEndian(codeAddr);
9833                 JITDUMP(" %u", lclNum);
9834                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9835                 break;
9836
9837             case CEE_LDARG_0:
9838             case CEE_LDARG_1:
9839             case CEE_LDARG_2:
9840             case CEE_LDARG_3:
9841                 lclNum = (opcode - CEE_LDARG_0);
9842                 assert(lclNum >= 0 && lclNum < 4);
9843                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9844                 break;
9845
9846             case CEE_LDLOC:
9847                 lclNum = getU2LittleEndian(codeAddr);
9848                 JITDUMP(" %u", lclNum);
9849                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9850                 break;
9851
9852             case CEE_LDLOC_S:
9853                 lclNum = getU1LittleEndian(codeAddr);
9854                 JITDUMP(" %u", lclNum);
9855                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9856                 break;
9857
9858             case CEE_LDLOC_0:
9859             case CEE_LDLOC_1:
9860             case CEE_LDLOC_2:
9861             case CEE_LDLOC_3:
9862                 lclNum = (opcode - CEE_LDLOC_0);
9863                 assert(lclNum >= 0 && lclNum < 4);
9864                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9865                 break;
9866
9867             case CEE_STARG:
9868                 lclNum = getU2LittleEndian(codeAddr);
9869                 goto STARG;
9870
9871             case CEE_STARG_S:
9872                 lclNum = getU1LittleEndian(codeAddr);
9873             STARG:
9874                 JITDUMP(" %u", lclNum);
9875
9876                 if (tiVerificationNeeded)
9877                 {
9878                     Verify(lclNum < info.compILargsCount, "bad arg num");
9879                 }
9880
9881                 if (compIsForInlining())
9882                 {
9883                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9884                     noway_assert(op1->gtOper == GT_LCL_VAR);
9885                     lclNum = op1->AsLclVar()->gtLclNum;
9886
9887                     goto VAR_ST_VALID;
9888                 }
9889
9890                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9891                 assertImp(lclNum < numArgs);
9892
9893                 if (lclNum == info.compThisArg)
9894                 {
9895                     lclNum = lvaArg0Var;
9896                 }
9897                 lvaTable[lclNum].lvArgWrite = 1;
9898
9899                 if (tiVerificationNeeded)
9900                 {
9901                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9902                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9903                            "type mismatch");
9904
9905                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9906                     {
9907                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9908                     }
9909                 }
9910
9911                 goto VAR_ST;
9912
9913             case CEE_STLOC:
9914                 lclNum = getU2LittleEndian(codeAddr);
9915                 JITDUMP(" %u", lclNum);
9916                 goto LOC_ST;
9917
9918             case CEE_STLOC_S:
9919                 lclNum = getU1LittleEndian(codeAddr);
9920                 JITDUMP(" %u", lclNum);
9921                 goto LOC_ST;
9922
9923             case CEE_STLOC_0:
9924             case CEE_STLOC_1:
9925             case CEE_STLOC_2:
9926             case CEE_STLOC_3:
9927                 lclNum = (opcode - CEE_STLOC_0);
9928                 assert(lclNum >= 0 && lclNum < 4);
9929
9930             LOC_ST:
9931                 if (tiVerificationNeeded)
9932                 {
9933                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9934                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9935                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9936                            "type mismatch");
9937                 }
9938
9939                 if (compIsForInlining())
9940                 {
9941                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9942
9943                     /* Have we allocated a temp for this local? */
9944
9945                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9946
9947                     goto _PopValue;
9948                 }
9949
9950                 lclNum += numArgs;
9951
9952             VAR_ST:
9953
9954                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9955                 {
9956                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9957                     BADCODE("Bad IL");
9958                 }
9959
9960             VAR_ST_VALID:
9961
9962                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9963                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9964
9965                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9966                 {
9967                     lclTyp = lvaGetRealType(lclNum);
9968                 }
9969                 else
9970                 {
9971                     lclTyp = lvaGetActualType(lclNum);
9972                 }
9973
9974             _PopValue:
9975                 /* Pop the value being assigned */
9976
9977                 {
9978                     StackEntry se = impPopStack(clsHnd);
9979                     op1           = se.val;
9980                     tiRetVal      = se.seTypeInfo;
9981                 }
9982
9983 #ifdef FEATURE_SIMD
9984                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9985                 {
9986                     assert(op1->TypeGet() == TYP_STRUCT);
9987                     op1->gtType = lclTyp;
9988                 }
9989 #endif // FEATURE_SIMD
9990
9991                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9992
9993 #ifdef _TARGET_64BIT_
9994                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9995                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9996                 {
9997                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9998                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9999                 }
10000 #endif // _TARGET_64BIT_
10001
10002                 // We had better assign it a value of the correct type
10003                 assertImp(
10004                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10005                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10006                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10007                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10008                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10009                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10010
10011                 /* If op1 is "&var" then its type is the transient "*" and it can
10012                    be used either as TYP_BYREF or TYP_I_IMPL */
10013
10014                 if (op1->IsVarAddr())
10015                 {
10016                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10017
10018                     /* When "&var" is created, we assume it is a byref. If it is
10019                        being assigned to a TYP_I_IMPL var, change the type to
10020                        prevent unnecessary GC info */
10021
10022                     if (genActualType(lclTyp) == TYP_I_IMPL)
10023                     {
10024                         op1->gtType = TYP_I_IMPL;
10025                     }
10026                 }
10027
10028                 /* Filter out simple assignments to itself */
10029
10030                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10031                 {
10032                     if (insertLdloc)
10033                     {
10034                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
10035                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
10036                         CLANG_FORMAT_COMMENT_ANCHOR;
10037
10038 #ifdef DEBUG
10039                         if (tiVerificationNeeded)
10040                         {
10041                             assert(
10042                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10043                         }
10044 #endif
10045
10046                         op1         = nullptr;
10047                         insertLdloc = false;
10048
10049                         impLoadVar(lclNum, opcodeOffs + sz + 1);
10050                         break;
10051                     }
10052                     else if (opts.compDbgCode)
10053                     {
10054                         op1 = gtNewNothingNode();
10055                         goto SPILL_APPEND;
10056                     }
10057                     else
10058                     {
10059                         break;
10060                     }
10061                 }
10062
10063                 /* Create the assignment node */
10064
10065                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10066
10067                 /* If the local is aliased, we need to spill calls and
10068                    indirections from the stack. */
10069
10070                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10071                     verCurrentState.esStackDepth > 0)
10072                 {
10073                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10074                 }
10075
10076                 /* Spill any refs to the local from the stack */
10077
10078                 impSpillLclRefs(lclNum);
10079
10080 #if !FEATURE_X87_DOUBLES
10081                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10082                 // We insert a cast to the dest 'op2' type
10083                 //
10084                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10085                     varTypeIsFloating(op2->gtType))
10086                 {
10087                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10088                 }
10089 #endif // !FEATURE_X87_DOUBLES
10090
10091                 if (varTypeIsStruct(lclTyp))
10092                 {
10093                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10094                 }
10095                 else
10096                 {
10097                     // The code generator generates GC tracking information
10098                     // based on the RHS of the assignment.  Later the LHS (which is
10099                     // is a BYREF) gets used and the emitter checks that that variable
10100                     // is being tracked.  It is not (since the RHS was an int and did
10101                     // not need tracking).  To keep this assert happy, we change the RHS
10102                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10103                     {
10104                         op1->gtType = TYP_BYREF;
10105                     }
10106                     op1 = gtNewAssignNode(op2, op1);
10107                 }
10108
10109                 /* If insertLdloc is true, then we need to insert a ldloc following the
10110                    stloc.  This is done when converting a (dup, stloc) sequence into
10111                    a (stloc, ldloc) sequence. */
10112
10113                 if (insertLdloc)
10114                 {
10115                     // From SPILL_APPEND
10116                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10117
10118 #ifdef DEBUG
10119                     // From DONE_APPEND
10120                     impNoteLastILoffs();
10121 #endif
10122                     op1         = nullptr;
10123                     insertLdloc = false;
10124
10125                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10126                     break;
10127                 }
10128
10129                 goto SPILL_APPEND;
10130
10131             case CEE_LDLOCA:
10132                 lclNum = getU2LittleEndian(codeAddr);
10133                 goto LDLOCA;
10134
10135             case CEE_LDLOCA_S:
10136                 lclNum = getU1LittleEndian(codeAddr);
10137             LDLOCA:
10138                 JITDUMP(" %u", lclNum);
10139                 if (tiVerificationNeeded)
10140                 {
10141                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10142                     Verify(info.compInitMem, "initLocals not set");
10143                 }
10144
10145                 if (compIsForInlining())
10146                 {
10147                     // Get the local type
10148                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10149
10150                     /* Have we allocated a temp for this local? */
10151
10152                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10153
10154                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10155
10156                     goto _PUSH_ADRVAR;
10157                 }
10158
10159                 lclNum += numArgs;
10160                 assertImp(lclNum < info.compLocalsCount);
10161                 goto ADRVAR;
10162
10163             case CEE_LDARGA:
10164                 lclNum = getU2LittleEndian(codeAddr);
10165                 goto LDARGA;
10166
10167             case CEE_LDARGA_S:
10168                 lclNum = getU1LittleEndian(codeAddr);
10169             LDARGA:
10170                 JITDUMP(" %u", lclNum);
10171                 Verify(lclNum < info.compILargsCount, "bad arg num");
10172
10173                 if (compIsForInlining())
10174                 {
10175                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10176                     // followed by a ldfld to load the field.
10177
10178                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10179                     if (op1->gtOper != GT_LCL_VAR)
10180                     {
10181                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10182                         return;
10183                     }
10184
10185                     assert(op1->gtOper == GT_LCL_VAR);
10186
10187                     goto _PUSH_ADRVAR;
10188                 }
10189
10190                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10191                 assertImp(lclNum < numArgs);
10192
10193                 if (lclNum == info.compThisArg)
10194                 {
10195                     lclNum = lvaArg0Var;
10196                 }
10197
10198                 goto ADRVAR;
10199
10200             ADRVAR:
10201
10202                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10203
10204             _PUSH_ADRVAR:
10205                 assert(op1->gtOper == GT_LCL_VAR);
10206
10207                 /* Note that this is supposed to create the transient type "*"
10208                    which may be used as a TYP_I_IMPL. However we catch places
10209                    where it is used as a TYP_I_IMPL and change the node if needed.
10210                    Thus we are pessimistic and may report byrefs in the GC info
10211                    where it was not absolutely needed, but it is safer this way.
10212                  */
10213                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10214
10215                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10216                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10217
10218                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10219                 if (tiVerificationNeeded)
10220                 {
10221                     // Don't allow taking address of uninit this ptr.
10222                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10223                     {
10224                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10225                     }
10226
10227                     if (!tiRetVal.IsByRef())
10228                     {
10229                         tiRetVal.MakeByRef();
10230                     }
10231                     else
10232                     {
10233                         Verify(false, "byref to byref");
10234                     }
10235                 }
10236
10237                 impPushOnStack(op1, tiRetVal);
10238                 break;
10239
10240             case CEE_ARGLIST:
10241
10242                 if (!info.compIsVarArgs)
10243                 {
10244                     BADCODE("arglist in non-vararg method");
10245                 }
10246
10247                 if (tiVerificationNeeded)
10248                 {
10249                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10250                 }
10251                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10252
10253                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10254                    adjusted the arg count cos this is like fetching the last param */
10255                 assertImp(0 < numArgs);
10256                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10257                 lclNum = lvaVarargsHandleArg;
10258                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10259                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10260                 impPushOnStack(op1, tiRetVal);
10261                 break;
10262
10263             case CEE_ENDFINALLY:
10264
10265                 if (compIsForInlining())
10266                 {
10267                     assert(!"Shouldn't have exception handlers in the inliner!");
10268                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10269                     return;
10270                 }
10271
10272                 if (verCurrentState.esStackDepth > 0)
10273                 {
10274                     impEvalSideEffects();
10275                 }
10276
10277                 if (info.compXcptnsCount == 0)
10278                 {
10279                     BADCODE("endfinally outside finally");
10280                 }
10281
10282                 assert(verCurrentState.esStackDepth == 0);
10283
10284                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10285                 goto APPEND;
10286
10287             case CEE_ENDFILTER:
10288
10289                 if (compIsForInlining())
10290                 {
10291                     assert(!"Shouldn't have exception handlers in the inliner!");
10292                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10293                     return;
10294                 }
10295
10296                 block->bbSetRunRarely(); // filters are rare
10297
10298                 if (info.compXcptnsCount == 0)
10299                 {
10300                     BADCODE("endfilter outside filter");
10301                 }
10302
10303                 if (tiVerificationNeeded)
10304                 {
10305                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10306                 }
10307
10308                 op1 = impPopStack().val;
10309                 assertImp(op1->gtType == TYP_INT);
10310                 if (!bbInFilterILRange(block))
10311                 {
10312                     BADCODE("EndFilter outside a filter handler");
10313                 }
10314
10315                 /* Mark current bb as end of filter */
10316
10317                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10318                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10319
10320                 /* Mark catch handler as successor */
10321
10322                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10323                 if (verCurrentState.esStackDepth != 0)
10324                 {
10325                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10326                                                 DEBUGARG(__LINE__));
10327                 }
10328                 goto APPEND;
10329
10330             case CEE_RET:
10331                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10332             RET:
10333                 if (!impReturnInstruction(block, prefixFlags, opcode))
10334                 {
10335                     return; // abort
10336                 }
10337                 else
10338                 {
10339                     break;
10340                 }
10341
10342             case CEE_JMP:
10343
10344                 assert(!compIsForInlining());
10345
10346                 if (tiVerificationNeeded)
10347                 {
10348                     Verify(false, "Invalid opcode: CEE_JMP");
10349                 }
10350
10351                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10352                 {
10353                     /* CEE_JMP does not make sense in some "protected" regions. */
10354
10355                     BADCODE("Jmp not allowed in protected region");
10356                 }
10357
10358                 if (verCurrentState.esStackDepth != 0)
10359                 {
10360                     BADCODE("Stack must be empty after CEE_JMPs");
10361                 }
10362
10363                 _impResolveToken(CORINFO_TOKENKIND_Method);
10364
10365                 JITDUMP(" %08X", resolvedToken.token);
10366
10367                 /* The signature of the target has to be identical to ours.
10368                    At least check that argCnt and returnType match */
10369
10370                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10371                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10372                     sig.retType != info.compMethodInfo->args.retType ||
10373                     sig.callConv != info.compMethodInfo->args.callConv)
10374                 {
10375                     BADCODE("Incompatible target for CEE_JMPs");
10376                 }
10377
10378 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10379
10380                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10381
10382                 /* Mark the basic block as being a JUMP instead of RETURN */
10383
10384                 block->bbFlags |= BBF_HAS_JMP;
10385
10386                 /* Set this flag to make sure register arguments have a location assigned
10387                  * even if we don't use them inside the method */
10388
10389                 compJmpOpUsed = true;
10390
10391                 fgNoStructPromotion = true;
10392
10393                 goto APPEND;
10394
10395 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10396
10397                 // Import this just like a series of LDARGs + tail. + call + ret
10398
10399                 if (info.compIsVarArgs)
10400                 {
10401                     // For now we don't implement true tail calls, so this breaks varargs.
10402                     // So warn the user instead of generating bad code.
10403                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10404                     // implement true tail calls.
10405                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10406                 }
10407
10408                 // First load up the arguments (0 - N)
10409                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10410                 {
10411                     impLoadArg(argNum, opcodeOffs + sz + 1);
10412                 }
10413
10414                 // Now generate the tail call
10415                 noway_assert(prefixFlags == 0);
10416                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10417                 opcode      = CEE_CALL;
10418
10419                 eeGetCallInfo(&resolvedToken, NULL,
10420                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10421
10422                 // All calls and delegates need a security callout.
10423                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10424
10425                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10426                                         opcodeOffs);
10427
10428                 // And finish with the ret
10429                 goto RET;
10430
10431 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10432
10433             case CEE_LDELEMA:
10434                 assertImp(sz == sizeof(unsigned));
10435
10436                 _impResolveToken(CORINFO_TOKENKIND_Class);
10437
10438                 JITDUMP(" %08X", resolvedToken.token);
10439
10440                 ldelemClsHnd = resolvedToken.hClass;
10441
10442                 if (tiVerificationNeeded)
10443                 {
10444                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10445                     typeInfo tiIndex = impStackTop().seTypeInfo;
10446
10447                     // As per ECMA 'index' specified can be either int32 or native int.
10448                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10449
10450                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10451                     Verify(tiArray.IsNullObjRef() ||
10452                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10453                            "bad array");
10454
10455                     tiRetVal = arrayElemType;
10456                     tiRetVal.MakeByRef();
10457                     if (prefixFlags & PREFIX_READONLY)
10458                     {
10459                         tiRetVal.SetIsReadonlyByRef();
10460                     }
10461
10462                     // an array interior pointer is always in the heap
10463                     tiRetVal.SetIsPermanentHomeByRef();
10464                 }
10465
10466                 // If it's a value class array we just do a simple address-of
10467                 if (eeIsValueClass(ldelemClsHnd))
10468                 {
10469                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10470                     if (cit == CORINFO_TYPE_UNDEF)
10471                     {
10472                         lclTyp = TYP_STRUCT;
10473                     }
10474                     else
10475                     {
10476                         lclTyp = JITtype2varType(cit);
10477                     }
10478                     goto ARR_LD_POST_VERIFY;
10479                 }
10480
10481                 // Similarly, if its a readonly access, we can do a simple address-of
10482                 // without doing a runtime type-check
10483                 if (prefixFlags & PREFIX_READONLY)
10484                 {
10485                     lclTyp = TYP_REF;
10486                     goto ARR_LD_POST_VERIFY;
10487                 }
10488
10489                 // Otherwise we need the full helper function with run-time type check
10490                 op1 = impTokenToHandle(&resolvedToken);
10491                 if (op1 == nullptr)
10492                 { // compDonotInline()
10493                     return;
10494                 }
10495
10496                 args = gtNewArgList(op1);                      // Type
10497                 args = gtNewListNode(impPopStack().val, args); // index
10498                 args = gtNewListNode(impPopStack().val, args); // array
10499                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10500
10501                 impPushOnStack(op1, tiRetVal);
10502                 break;
10503
10504             // ldelem for reference and value types
10505             case CEE_LDELEM:
10506                 assertImp(sz == sizeof(unsigned));
10507
10508                 _impResolveToken(CORINFO_TOKENKIND_Class);
10509
10510                 JITDUMP(" %08X", resolvedToken.token);
10511
10512                 ldelemClsHnd = resolvedToken.hClass;
10513
10514                 if (tiVerificationNeeded)
10515                 {
10516                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10517                     typeInfo tiIndex = impStackTop().seTypeInfo;
10518
10519                     // As per ECMA 'index' specified can be either int32 or native int.
10520                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10521                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10522
10523                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10524                            "type of array incompatible with type operand");
10525                     tiRetVal.NormaliseForStack();
10526                 }
10527
10528                 // If it's a reference type or generic variable type
10529                 // then just generate code as though it's a ldelem.ref instruction
10530                 if (!eeIsValueClass(ldelemClsHnd))
10531                 {
10532                     lclTyp = TYP_REF;
10533                     opcode = CEE_LDELEM_REF;
10534                 }
10535                 else
10536                 {
10537                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10538                     lclTyp             = JITtype2varType(jitTyp);
10539                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10540                     tiRetVal.NormaliseForStack();
10541                 }
10542                 goto ARR_LD_POST_VERIFY;
10543
10544             case CEE_LDELEM_I1:
10545                 lclTyp = TYP_BYTE;
10546                 goto ARR_LD;
10547             case CEE_LDELEM_I2:
10548                 lclTyp = TYP_SHORT;
10549                 goto ARR_LD;
10550             case CEE_LDELEM_I:
10551                 lclTyp = TYP_I_IMPL;
10552                 goto ARR_LD;
10553
10554             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10555             // and treating it as TYP_INT avoids other asserts.
10556             case CEE_LDELEM_U4:
10557                 lclTyp = TYP_INT;
10558                 goto ARR_LD;
10559
10560             case CEE_LDELEM_I4:
10561                 lclTyp = TYP_INT;
10562                 goto ARR_LD;
10563             case CEE_LDELEM_I8:
10564                 lclTyp = TYP_LONG;
10565                 goto ARR_LD;
10566             case CEE_LDELEM_REF:
10567                 lclTyp = TYP_REF;
10568                 goto ARR_LD;
10569             case CEE_LDELEM_R4:
10570                 lclTyp = TYP_FLOAT;
10571                 goto ARR_LD;
10572             case CEE_LDELEM_R8:
10573                 lclTyp = TYP_DOUBLE;
10574                 goto ARR_LD;
10575             case CEE_LDELEM_U1:
10576                 lclTyp = TYP_UBYTE;
10577                 goto ARR_LD;
10578             case CEE_LDELEM_U2:
10579                 lclTyp = TYP_CHAR;
10580                 goto ARR_LD;
10581
10582             ARR_LD:
10583
10584                 if (tiVerificationNeeded)
10585                 {
10586                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10587                     typeInfo tiIndex = impStackTop().seTypeInfo;
10588
10589                     // As per ECMA 'index' specified can be either int32 or native int.
10590                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10591                     if (tiArray.IsNullObjRef())
10592                     {
10593                         if (lclTyp == TYP_REF)
10594                         { // we will say a deref of a null array yields a null ref
10595                             tiRetVal = typeInfo(TI_NULL);
10596                         }
10597                         else
10598                         {
10599                             tiRetVal = typeInfo(lclTyp);
10600                         }
10601                     }
10602                     else
10603                     {
10604                         tiRetVal             = verGetArrayElemType(tiArray);
10605                         typeInfo arrayElemTi = typeInfo(lclTyp);
10606 #ifdef _TARGET_64BIT_
10607                         if (opcode == CEE_LDELEM_I)
10608                         {
10609                             arrayElemTi = typeInfo::nativeInt();
10610                         }
10611
10612                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10613                         {
10614                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10615                         }
10616                         else
10617 #endif // _TARGET_64BIT_
10618                         {
10619                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10620                         }
10621                     }
10622                     tiRetVal.NormaliseForStack();
10623                 }
10624             ARR_LD_POST_VERIFY:
10625
10626                 /* Pull the index value and array address */
10627                 op2 = impPopStack().val;
10628                 op1 = impPopStack().val;
10629                 assertImp(op1->gtType == TYP_REF);
10630
10631                 /* Check for null pointer - in the inliner case we simply abort */
10632
10633                 if (compIsForInlining())
10634                 {
10635                     if (op1->gtOper == GT_CNS_INT)
10636                     {
10637                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10638                         return;
10639                     }
10640                 }
10641
10642                 op1 = impCheckForNullPointer(op1);
10643
10644                 /* Mark the block as containing an index expression */
10645
10646                 if (op1->gtOper == GT_LCL_VAR)
10647                 {
10648                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10649                     {
10650                         block->bbFlags |= BBF_HAS_IDX_LEN;
10651                         optMethodFlags |= OMF_HAS_ARRAYREF;
10652                     }
10653                 }
10654
10655                 /* Create the index node and push it on the stack */
10656
10657                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10658
10659                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10660
10661                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10662                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10663                 {
10664                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10665
10666                     // remember the element size
10667                     if (lclTyp == TYP_REF)
10668                     {
10669                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10670                     }
10671                     else
10672                     {
10673                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10674                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10675                         {
10676                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10677                         }
10678                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10679                         if (lclTyp == TYP_STRUCT)
10680                         {
10681                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10682                             op1->gtIndex.gtIndElemSize = size;
10683                             op1->gtType                = lclTyp;
10684                         }
10685                     }
10686
10687                     if ((opcode == CEE_LDELEMA) || ldstruct)
10688                     {
10689                         // wrap it in a &
10690                         lclTyp = TYP_BYREF;
10691
10692                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10693                     }
10694                     else
10695                     {
10696                         assert(lclTyp != TYP_STRUCT);
10697                     }
10698                 }
10699
10700                 if (ldstruct)
10701                 {
10702                     // Create an OBJ for the result
10703                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10704                     op1->gtFlags |= GTF_EXCEPT;
10705                 }
10706                 impPushOnStack(op1, tiRetVal);
10707                 break;
10708
10709             // stelem for reference and value types
10710             case CEE_STELEM:
10711
10712                 assertImp(sz == sizeof(unsigned));
10713
10714                 _impResolveToken(CORINFO_TOKENKIND_Class);
10715
10716                 JITDUMP(" %08X", resolvedToken.token);
10717
10718                 stelemClsHnd = resolvedToken.hClass;
10719
10720                 if (tiVerificationNeeded)
10721                 {
10722                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10723                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10724                     typeInfo tiValue = impStackTop().seTypeInfo;
10725
10726                     // As per ECMA 'index' specified can be either int32 or native int.
10727                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10728                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10729
10730                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10731                            "type operand incompatible with array element type");
10732                     arrayElem.NormaliseForStack();
10733                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10734                 }
10735
10736                 // If it's a reference type just behave as though it's a stelem.ref instruction
10737                 if (!eeIsValueClass(stelemClsHnd))
10738                 {
10739                     goto STELEM_REF_POST_VERIFY;
10740                 }
10741
10742                 // Otherwise extract the type
10743                 {
10744                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10745                     lclTyp             = JITtype2varType(jitTyp);
10746                     goto ARR_ST_POST_VERIFY;
10747                 }
10748
10749             case CEE_STELEM_REF:
10750
10751                 if (tiVerificationNeeded)
10752                 {
10753                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10754                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10755                     typeInfo tiValue = impStackTop().seTypeInfo;
10756
10757                     // As per ECMA 'index' specified can be either int32 or native int.
10758                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10759                     Verify(tiValue.IsObjRef(), "bad value");
10760
10761                     // we only check that it is an object referece, The helper does additional checks
10762                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10763                 }
10764
10765                 arrayNodeTo      = impStackTop(2).val;
10766                 arrayNodeToIndex = impStackTop(1).val;
10767                 arrayNodeFrom    = impStackTop().val;
10768
10769                 //
10770                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10771                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10772                 //
10773
10774                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10775                 // This does not need CORINFO_HELP_ARRADDR_ST
10776
10777                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10778                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10779                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10780                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10781                 {
10782                     lclTyp = TYP_REF;
10783                     goto ARR_ST_POST_VERIFY;
10784                 }
10785
10786                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10787
10788                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10789                 {
10790                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10791
10792                     lclTyp = TYP_REF;
10793                     goto ARR_ST_POST_VERIFY;
10794                 }
10795
10796             STELEM_REF_POST_VERIFY:
10797
10798                 /* Call a helper function to do the assignment */
10799                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10800
10801                 goto SPILL_APPEND;
10802
10803             case CEE_STELEM_I1:
10804                 lclTyp = TYP_BYTE;
10805                 goto ARR_ST;
10806             case CEE_STELEM_I2:
10807                 lclTyp = TYP_SHORT;
10808                 goto ARR_ST;
10809             case CEE_STELEM_I:
10810                 lclTyp = TYP_I_IMPL;
10811                 goto ARR_ST;
10812             case CEE_STELEM_I4:
10813                 lclTyp = TYP_INT;
10814                 goto ARR_ST;
10815             case CEE_STELEM_I8:
10816                 lclTyp = TYP_LONG;
10817                 goto ARR_ST;
10818             case CEE_STELEM_R4:
10819                 lclTyp = TYP_FLOAT;
10820                 goto ARR_ST;
10821             case CEE_STELEM_R8:
10822                 lclTyp = TYP_DOUBLE;
10823                 goto ARR_ST;
10824
10825             ARR_ST:
10826
10827                 if (tiVerificationNeeded)
10828                 {
10829                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10830                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10831                     typeInfo tiValue = impStackTop().seTypeInfo;
10832
10833                     // As per ECMA 'index' specified can be either int32 or native int.
10834                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10835                     typeInfo arrayElem = typeInfo(lclTyp);
10836 #ifdef _TARGET_64BIT_
10837                     if (opcode == CEE_STELEM_I)
10838                     {
10839                         arrayElem = typeInfo::nativeInt();
10840                     }
10841 #endif // _TARGET_64BIT_
10842                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10843                            "bad array");
10844
10845                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10846                            "bad value");
10847                 }
10848
10849             ARR_ST_POST_VERIFY:
10850                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10851                    range-check, and then assignment. However, codegen currently
10852                    does the range-check before evaluation the RHS-operands. So to
10853                    maintain strict ordering, we spill the stack. */
10854
10855                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10856                 {
10857                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10858                                                    "Strict ordering of exceptions for Array store"));
10859                 }
10860
10861                 /* Pull the new value from the stack */
10862                 op2 = impPopStack().val;
10863
10864                 /* Pull the index value */
10865                 op1 = impPopStack().val;
10866
10867                 /* Pull the array address */
10868                 op3 = impPopStack().val;
10869
10870                 assertImp(op3->gtType == TYP_REF);
10871                 if (op2->IsVarAddr())
10872                 {
10873                     op2->gtType = TYP_I_IMPL;
10874                 }
10875
10876                 op3 = impCheckForNullPointer(op3);
10877
10878                 // Mark the block as containing an index expression
10879
10880                 if (op3->gtOper == GT_LCL_VAR)
10881                 {
10882                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10883                     {
10884                         block->bbFlags |= BBF_HAS_IDX_LEN;
10885                         optMethodFlags |= OMF_HAS_ARRAYREF;
10886                     }
10887                 }
10888
10889                 /* Create the index node */
10890
10891                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10892
10893                 /* Create the assignment node and append it */
10894
10895                 if (lclTyp == TYP_STRUCT)
10896                 {
10897                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10898
10899                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10900                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10901                 }
10902                 if (varTypeIsStruct(op1))
10903                 {
10904                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10905                 }
10906                 else
10907                 {
10908                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10909                     op1 = gtNewAssignNode(op1, op2);
10910                 }
10911
10912                 /* Mark the expression as containing an assignment */
10913
10914                 op1->gtFlags |= GTF_ASG;
10915
10916                 goto SPILL_APPEND;
10917
10918             case CEE_ADD:
10919                 oper = GT_ADD;
10920                 goto MATH_OP2;
10921
10922             case CEE_ADD_OVF:
10923                 uns = false;
10924                 goto ADD_OVF;
10925             case CEE_ADD_OVF_UN:
10926                 uns = true;
10927                 goto ADD_OVF;
10928
10929             ADD_OVF:
10930                 ovfl     = true;
10931                 callNode = false;
10932                 oper     = GT_ADD;
10933                 goto MATH_OP2_FLAGS;
10934
10935             case CEE_SUB:
10936                 oper = GT_SUB;
10937                 goto MATH_OP2;
10938
10939             case CEE_SUB_OVF:
10940                 uns = false;
10941                 goto SUB_OVF;
10942             case CEE_SUB_OVF_UN:
10943                 uns = true;
10944                 goto SUB_OVF;
10945
10946             SUB_OVF:
10947                 ovfl     = true;
10948                 callNode = false;
10949                 oper     = GT_SUB;
10950                 goto MATH_OP2_FLAGS;
10951
10952             case CEE_MUL:
10953                 oper = GT_MUL;
10954                 goto MATH_MAYBE_CALL_NO_OVF;
10955
10956             case CEE_MUL_OVF:
10957                 uns = false;
10958                 goto MUL_OVF;
10959             case CEE_MUL_OVF_UN:
10960                 uns = true;
10961                 goto MUL_OVF;
10962
10963             MUL_OVF:
10964                 ovfl = true;
10965                 oper = GT_MUL;
10966                 goto MATH_MAYBE_CALL_OVF;
10967
10968             // Other binary math operations
10969
10970             case CEE_DIV:
10971                 oper = GT_DIV;
10972                 goto MATH_MAYBE_CALL_NO_OVF;
10973
10974             case CEE_DIV_UN:
10975                 oper = GT_UDIV;
10976                 goto MATH_MAYBE_CALL_NO_OVF;
10977
10978             case CEE_REM:
10979                 oper = GT_MOD;
10980                 goto MATH_MAYBE_CALL_NO_OVF;
10981
10982             case CEE_REM_UN:
10983                 oper = GT_UMOD;
10984                 goto MATH_MAYBE_CALL_NO_OVF;
10985
10986             MATH_MAYBE_CALL_NO_OVF:
10987                 ovfl = false;
10988             MATH_MAYBE_CALL_OVF:
10989                 // Morpher has some complex logic about when to turn different
10990                 // typed nodes on different platforms into helper calls. We
10991                 // need to either duplicate that logic here, or just
10992                 // pessimistically make all the nodes large enough to become
10993                 // call nodes.  Since call nodes aren't that much larger and
10994                 // these opcodes are infrequent enough I chose the latter.
10995                 callNode = true;
10996                 goto MATH_OP2_FLAGS;
10997
10998             case CEE_AND:
10999                 oper = GT_AND;
11000                 goto MATH_OP2;
11001             case CEE_OR:
11002                 oper = GT_OR;
11003                 goto MATH_OP2;
11004             case CEE_XOR:
11005                 oper = GT_XOR;
11006                 goto MATH_OP2;
11007
11008             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11009
11010                 ovfl     = false;
11011                 callNode = false;
11012
11013             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11014
11015                 /* Pull two values and push back the result */
11016
11017                 if (tiVerificationNeeded)
11018                 {
11019                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11020                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11021
11022                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11023                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11024                     {
11025                         Verify(tiOp1.IsNumberType(), "not number");
11026                     }
11027                     else
11028                     {
11029                         Verify(tiOp1.IsIntegerType(), "not integer");
11030                     }
11031
11032                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11033
11034                     tiRetVal = tiOp1;
11035
11036 #ifdef _TARGET_64BIT_
11037                     if (tiOp2.IsNativeIntType())
11038                     {
11039                         tiRetVal = tiOp2;
11040                     }
11041 #endif // _TARGET_64BIT_
11042                 }
11043
11044                 op2 = impPopStack().val;
11045                 op1 = impPopStack().val;
11046
11047 #if !CPU_HAS_FP_SUPPORT
11048                 if (varTypeIsFloating(op1->gtType))
11049                 {
11050                     callNode = true;
11051                 }
11052 #endif
11053                 /* Can't do arithmetic with references */
11054                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11055
11056                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11057                 // if it is in the stack)
11058                 impBashVarAddrsToI(op1, op2);
11059
11060                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11061
11062                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11063
11064                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11065
11066                 if (op2->gtOper == GT_CNS_INT)
11067                 {
11068                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11069                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11070
11071                     {
11072                         impPushOnStack(op1, tiRetVal);
11073                         break;
11074                     }
11075                 }
11076
11077 #if !FEATURE_X87_DOUBLES
11078                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11079                 //
11080                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11081                 {
11082                     if (op1->TypeGet() != type)
11083                     {
11084                         // We insert a cast of op1 to 'type'
11085                         op1 = gtNewCastNode(type, op1, type);
11086                     }
11087                     if (op2->TypeGet() != type)
11088                     {
11089                         // We insert a cast of op2 to 'type'
11090                         op2 = gtNewCastNode(type, op2, type);
11091                     }
11092                 }
11093 #endif // !FEATURE_X87_DOUBLES
11094
11095 #if SMALL_TREE_NODES
11096                 if (callNode)
11097                 {
11098                     /* These operators can later be transformed into 'GT_CALL' */
11099
11100                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11101 #ifndef _TARGET_ARM_
11102                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11103                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11104                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11105                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11106 #endif
11107                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11108                     // that we'll need to transform into a general large node, but rather specifically
11109                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11110                     // and a CALL is no longer the largest.
11111                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11112                     // than an "if".
11113                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11114                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11115                 }
11116                 else
11117 #endif // SMALL_TREE_NODES
11118                 {
11119                     op1 = gtNewOperNode(oper, type, op1, op2);
11120                 }
11121
11122                 /* Special case: integer/long division may throw an exception */
11123
11124                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11125                 {
11126                     op1->gtFlags |= GTF_EXCEPT;
11127                 }
11128
11129                 if (ovfl)
11130                 {
11131                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11132                     if (ovflType != TYP_UNKNOWN)
11133                     {
11134                         op1->gtType = ovflType;
11135                     }
11136                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11137                     if (uns)
11138                     {
11139                         op1->gtFlags |= GTF_UNSIGNED;
11140                     }
11141                 }
11142
11143                 impPushOnStack(op1, tiRetVal);
11144                 break;
11145
11146             case CEE_SHL:
11147                 oper = GT_LSH;
11148                 goto CEE_SH_OP2;
11149
11150             case CEE_SHR:
11151                 oper = GT_RSH;
11152                 goto CEE_SH_OP2;
11153             case CEE_SHR_UN:
11154                 oper = GT_RSZ;
11155                 goto CEE_SH_OP2;
11156
11157             CEE_SH_OP2:
11158                 if (tiVerificationNeeded)
11159                 {
11160                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11161                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11162                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11163                     tiRetVal = tiVal;
11164                 }
11165                 op2 = impPopStack().val;
11166                 op1 = impPopStack().val; // operand to be shifted
11167                 impBashVarAddrsToI(op1, op2);
11168
11169                 type = genActualType(op1->TypeGet());
11170                 op1  = gtNewOperNode(oper, type, op1, op2);
11171
11172                 impPushOnStack(op1, tiRetVal);
11173                 break;
11174
11175             case CEE_NOT:
11176                 if (tiVerificationNeeded)
11177                 {
11178                     tiRetVal = impStackTop().seTypeInfo;
11179                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11180                 }
11181
11182                 op1 = impPopStack().val;
11183                 impBashVarAddrsToI(op1, nullptr);
11184                 type = genActualType(op1->TypeGet());
11185                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11186                 break;
11187
11188             case CEE_CKFINITE:
11189                 if (tiVerificationNeeded)
11190                 {
11191                     tiRetVal = impStackTop().seTypeInfo;
11192                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11193                 }
11194                 op1  = impPopStack().val;
11195                 type = op1->TypeGet();
11196                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11197                 op1->gtFlags |= GTF_EXCEPT;
11198
11199                 impPushOnStack(op1, tiRetVal);
11200                 break;
11201
11202             case CEE_LEAVE:
11203
11204                 val     = getI4LittleEndian(codeAddr); // jump distance
11205                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11206                 goto LEAVE;
11207
11208             case CEE_LEAVE_S:
11209                 val     = getI1LittleEndian(codeAddr); // jump distance
11210                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11211
11212             LEAVE:
11213
11214                 if (compIsForInlining())
11215                 {
11216                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11217                     return;
11218                 }
11219
11220                 JITDUMP(" %04X", jmpAddr);
11221                 if (block->bbJumpKind != BBJ_LEAVE)
11222                 {
11223                     impResetLeaveBlock(block, jmpAddr);
11224                 }
11225
11226                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11227                 impImportLeave(block);
11228                 impNoteBranchOffs();
11229
11230                 break;
11231
11232             case CEE_BR:
11233             case CEE_BR_S:
11234                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11235
11236                 if (compIsForInlining() && jmpDist == 0)
11237                 {
11238                     break; /* NOP */
11239                 }
11240
11241                 impNoteBranchOffs();
11242                 break;
11243
11244             case CEE_BRTRUE:
11245             case CEE_BRTRUE_S:
11246             case CEE_BRFALSE:
11247             case CEE_BRFALSE_S:
11248
11249                 /* Pop the comparand (now there's a neat term) from the stack */
11250                 if (tiVerificationNeeded)
11251                 {
11252                     typeInfo& tiVal = impStackTop().seTypeInfo;
11253                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11254                            "bad value");
11255                 }
11256
11257                 op1  = impPopStack().val;
11258                 type = op1->TypeGet();
11259
11260                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11261                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11262                 {
11263                     block->bbJumpKind = BBJ_NONE;
11264
11265                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11266                     {
11267                         op1 = gtUnusedValNode(op1);
11268                         goto SPILL_APPEND;
11269                     }
11270                     else
11271                     {
11272                         break;
11273                     }
11274                 }
11275
11276                 if (op1->OperIsCompare())
11277                 {
11278                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11279                     {
11280                         // Flip the sense of the compare
11281
11282                         op1 = gtReverseCond(op1);
11283                     }
11284                 }
11285                 else
11286                 {
11287                     /* We'll compare against an equally-sized integer 0 */
11288                     /* For small types, we always compare against int   */
11289                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11290
11291                     /* Create the comparison operator and try to fold it */
11292
11293                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11294                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11295                 }
11296
11297             // fall through
11298
11299             COND_JUMP:
11300
11301                 /* Fold comparison if we can */
11302
11303                 op1 = gtFoldExpr(op1);
11304
11305                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11306                 /* Don't make any blocks unreachable in import only mode */
11307
11308                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11309                 {
11310                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11311                        unreachable under compDbgCode */
11312                     assert(!opts.compDbgCode);
11313
11314                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11315                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11316                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11317                                                                          // block for the second time
11318
11319                     block->bbJumpKind = foldedJumpKind;
11320 #ifdef DEBUG
11321                     if (verbose)
11322                     {
11323                         if (op1->gtIntCon.gtIconVal)
11324                         {
11325                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11326                                    block->bbJumpDest->bbNum);
11327                         }
11328                         else
11329                         {
11330                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11331                         }
11332                     }
11333 #endif
11334                     break;
11335                 }
11336
11337                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11338
11339                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11340                    in impImportBlock(block). For correct line numbers, spill stack. */
11341
11342                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11343                 {
11344                     impSpillStackEnsure(true);
11345                 }
11346
11347                 goto SPILL_APPEND;
11348
11349             case CEE_CEQ:
11350                 oper = GT_EQ;
11351                 uns  = false;
11352                 goto CMP_2_OPs;
11353             case CEE_CGT_UN:
11354                 oper = GT_GT;
11355                 uns  = true;
11356                 goto CMP_2_OPs;
11357             case CEE_CGT:
11358                 oper = GT_GT;
11359                 uns  = false;
11360                 goto CMP_2_OPs;
11361             case CEE_CLT_UN:
11362                 oper = GT_LT;
11363                 uns  = true;
11364                 goto CMP_2_OPs;
11365             case CEE_CLT:
11366                 oper = GT_LT;
11367                 uns  = false;
11368                 goto CMP_2_OPs;
11369
11370             CMP_2_OPs:
11371                 if (tiVerificationNeeded)
11372                 {
11373                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11374                     tiRetVal = typeInfo(TI_INT);
11375                 }
11376
11377                 op2 = impPopStack().val;
11378                 op1 = impPopStack().val;
11379
11380 #ifdef _TARGET_64BIT_
11381                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11382                 {
11383                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11384                 }
11385                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11386                 {
11387                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11388                 }
11389 #endif // _TARGET_64BIT_
11390
11391                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11392                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11393                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11394
11395                 /* Create the comparison node */
11396
11397                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11398
11399                 /* TODO: setting both flags when only one is appropriate */
11400                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11401                 {
11402                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11403                 }
11404
11405                 impPushOnStack(op1, tiRetVal);
11406                 break;
11407
11408             case CEE_BEQ_S:
11409             case CEE_BEQ:
11410                 oper = GT_EQ;
11411                 goto CMP_2_OPs_AND_BR;
11412
11413             case CEE_BGE_S:
11414             case CEE_BGE:
11415                 oper = GT_GE;
11416                 goto CMP_2_OPs_AND_BR;
11417
11418             case CEE_BGE_UN_S:
11419             case CEE_BGE_UN:
11420                 oper = GT_GE;
11421                 goto CMP_2_OPs_AND_BR_UN;
11422
11423             case CEE_BGT_S:
11424             case CEE_BGT:
11425                 oper = GT_GT;
11426                 goto CMP_2_OPs_AND_BR;
11427
11428             case CEE_BGT_UN_S:
11429             case CEE_BGT_UN:
11430                 oper = GT_GT;
11431                 goto CMP_2_OPs_AND_BR_UN;
11432
11433             case CEE_BLE_S:
11434             case CEE_BLE:
11435                 oper = GT_LE;
11436                 goto CMP_2_OPs_AND_BR;
11437
11438             case CEE_BLE_UN_S:
11439             case CEE_BLE_UN:
11440                 oper = GT_LE;
11441                 goto CMP_2_OPs_AND_BR_UN;
11442
11443             case CEE_BLT_S:
11444             case CEE_BLT:
11445                 oper = GT_LT;
11446                 goto CMP_2_OPs_AND_BR;
11447
11448             case CEE_BLT_UN_S:
11449             case CEE_BLT_UN:
11450                 oper = GT_LT;
11451                 goto CMP_2_OPs_AND_BR_UN;
11452
11453             case CEE_BNE_UN_S:
11454             case CEE_BNE_UN:
11455                 oper = GT_NE;
11456                 goto CMP_2_OPs_AND_BR_UN;
11457
11458             CMP_2_OPs_AND_BR_UN:
11459                 uns       = true;
11460                 unordered = true;
11461                 goto CMP_2_OPs_AND_BR_ALL;
11462             CMP_2_OPs_AND_BR:
11463                 uns       = false;
11464                 unordered = false;
11465                 goto CMP_2_OPs_AND_BR_ALL;
11466             CMP_2_OPs_AND_BR_ALL:
11467
11468                 if (tiVerificationNeeded)
11469                 {
11470                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11471                 }
11472
11473                 /* Pull two values */
11474                 op2 = impPopStack().val;
11475                 op1 = impPopStack().val;
11476
11477 #ifdef _TARGET_64BIT_
11478                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11479                 {
11480                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11481                 }
11482                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11483                 {
11484                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11485                 }
11486 #endif // _TARGET_64BIT_
11487
11488                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11489                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11490                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11491
11492                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11493                 {
11494                     block->bbJumpKind = BBJ_NONE;
11495
11496                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11497                     {
11498                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11499                                                        "Branch to next Optimization, op1 side effect"));
11500                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11501                     }
11502                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11503                     {
11504                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11505                                                        "Branch to next Optimization, op2 side effect"));
11506                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11507                     }
11508
11509 #ifdef DEBUG
11510                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11511                     {
11512                         impNoteLastILoffs();
11513                     }
11514 #endif
11515                     break;
11516                 }
11517 #if !FEATURE_X87_DOUBLES
11518                 // We can generate an compare of different sized floating point op1 and op2
11519                 // We insert a cast
11520                 //
11521                 if (varTypeIsFloating(op1->TypeGet()))
11522                 {
11523                     if (op1->TypeGet() != op2->TypeGet())
11524                     {
11525                         assert(varTypeIsFloating(op2->TypeGet()));
11526
11527                         // say op1=double, op2=float. To avoid loss of precision
11528                         // while comparing, op2 is converted to double and double
11529                         // comparison is done.
11530                         if (op1->TypeGet() == TYP_DOUBLE)
11531                         {
11532                             // We insert a cast of op2 to TYP_DOUBLE
11533                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11534                         }
11535                         else if (op2->TypeGet() == TYP_DOUBLE)
11536                         {
11537                             // We insert a cast of op1 to TYP_DOUBLE
11538                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11539                         }
11540                     }
11541                 }
11542 #endif // !FEATURE_X87_DOUBLES
11543
11544                 /* Create and append the operator */
11545
11546                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11547
11548                 if (uns)
11549                 {
11550                     op1->gtFlags |= GTF_UNSIGNED;
11551                 }
11552
11553                 if (unordered)
11554                 {
11555                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11556                 }
11557
11558                 goto COND_JUMP;
11559
11560             case CEE_SWITCH:
11561                 assert(!compIsForInlining());
11562
11563                 if (tiVerificationNeeded)
11564                 {
11565                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11566                 }
11567                 /* Pop the switch value off the stack */
11568                 op1 = impPopStack().val;
11569                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11570
11571 #ifdef _TARGET_64BIT_
11572                 // Widen 'op1' on 64-bit targets
11573                 if (op1->TypeGet() != TYP_I_IMPL)
11574                 {
11575                     if (op1->OperGet() == GT_CNS_INT)
11576                     {
11577                         op1->gtType = TYP_I_IMPL;
11578                     }
11579                     else
11580                     {
11581                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11582                     }
11583                 }
11584 #endif // _TARGET_64BIT_
11585                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11586
11587                 /* We can create a switch node */
11588
11589                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11590
11591                 val = (int)getU4LittleEndian(codeAddr);
11592                 codeAddr += 4 + val * 4; // skip over the switch-table
11593
11594                 goto SPILL_APPEND;
11595
11596             /************************** Casting OPCODES ***************************/
11597
11598             case CEE_CONV_OVF_I1:
11599                 lclTyp = TYP_BYTE;
11600                 goto CONV_OVF;
11601             case CEE_CONV_OVF_I2:
11602                 lclTyp = TYP_SHORT;
11603                 goto CONV_OVF;
11604             case CEE_CONV_OVF_I:
11605                 lclTyp = TYP_I_IMPL;
11606                 goto CONV_OVF;
11607             case CEE_CONV_OVF_I4:
11608                 lclTyp = TYP_INT;
11609                 goto CONV_OVF;
11610             case CEE_CONV_OVF_I8:
11611                 lclTyp = TYP_LONG;
11612                 goto CONV_OVF;
11613
11614             case CEE_CONV_OVF_U1:
11615                 lclTyp = TYP_UBYTE;
11616                 goto CONV_OVF;
11617             case CEE_CONV_OVF_U2:
11618                 lclTyp = TYP_CHAR;
11619                 goto CONV_OVF;
11620             case CEE_CONV_OVF_U:
11621                 lclTyp = TYP_U_IMPL;
11622                 goto CONV_OVF;
11623             case CEE_CONV_OVF_U4:
11624                 lclTyp = TYP_UINT;
11625                 goto CONV_OVF;
11626             case CEE_CONV_OVF_U8:
11627                 lclTyp = TYP_ULONG;
11628                 goto CONV_OVF;
11629
11630             case CEE_CONV_OVF_I1_UN:
11631                 lclTyp = TYP_BYTE;
11632                 goto CONV_OVF_UN;
11633             case CEE_CONV_OVF_I2_UN:
11634                 lclTyp = TYP_SHORT;
11635                 goto CONV_OVF_UN;
11636             case CEE_CONV_OVF_I_UN:
11637                 lclTyp = TYP_I_IMPL;
11638                 goto CONV_OVF_UN;
11639             case CEE_CONV_OVF_I4_UN:
11640                 lclTyp = TYP_INT;
11641                 goto CONV_OVF_UN;
11642             case CEE_CONV_OVF_I8_UN:
11643                 lclTyp = TYP_LONG;
11644                 goto CONV_OVF_UN;
11645
11646             case CEE_CONV_OVF_U1_UN:
11647                 lclTyp = TYP_UBYTE;
11648                 goto CONV_OVF_UN;
11649             case CEE_CONV_OVF_U2_UN:
11650                 lclTyp = TYP_CHAR;
11651                 goto CONV_OVF_UN;
11652             case CEE_CONV_OVF_U_UN:
11653                 lclTyp = TYP_U_IMPL;
11654                 goto CONV_OVF_UN;
11655             case CEE_CONV_OVF_U4_UN:
11656                 lclTyp = TYP_UINT;
11657                 goto CONV_OVF_UN;
11658             case CEE_CONV_OVF_U8_UN:
11659                 lclTyp = TYP_ULONG;
11660                 goto CONV_OVF_UN;
11661
11662             CONV_OVF_UN:
11663                 uns = true;
11664                 goto CONV_OVF_COMMON;
11665             CONV_OVF:
11666                 uns = false;
11667                 goto CONV_OVF_COMMON;
11668
11669             CONV_OVF_COMMON:
11670                 ovfl = true;
11671                 goto _CONV;
11672
11673             case CEE_CONV_I1:
11674                 lclTyp = TYP_BYTE;
11675                 goto CONV;
11676             case CEE_CONV_I2:
11677                 lclTyp = TYP_SHORT;
11678                 goto CONV;
11679             case CEE_CONV_I:
11680                 lclTyp = TYP_I_IMPL;
11681                 goto CONV;
11682             case CEE_CONV_I4:
11683                 lclTyp = TYP_INT;
11684                 goto CONV;
11685             case CEE_CONV_I8:
11686                 lclTyp = TYP_LONG;
11687                 goto CONV;
11688
11689             case CEE_CONV_U1:
11690                 lclTyp = TYP_UBYTE;
11691                 goto CONV;
11692             case CEE_CONV_U2:
11693                 lclTyp = TYP_CHAR;
11694                 goto CONV;
11695 #if (REGSIZE_BYTES == 8)
11696             case CEE_CONV_U:
11697                 lclTyp = TYP_U_IMPL;
11698                 goto CONV_UN;
11699 #else
11700             case CEE_CONV_U:
11701                 lclTyp = TYP_U_IMPL;
11702                 goto CONV;
11703 #endif
11704             case CEE_CONV_U4:
11705                 lclTyp = TYP_UINT;
11706                 goto CONV;
11707             case CEE_CONV_U8:
11708                 lclTyp = TYP_ULONG;
11709                 goto CONV_UN;
11710
11711             case CEE_CONV_R4:
11712                 lclTyp = TYP_FLOAT;
11713                 goto CONV;
11714             case CEE_CONV_R8:
11715                 lclTyp = TYP_DOUBLE;
11716                 goto CONV;
11717
11718             case CEE_CONV_R_UN:
11719                 lclTyp = TYP_DOUBLE;
11720                 goto CONV_UN;
11721
11722             CONV_UN:
11723                 uns  = true;
11724                 ovfl = false;
11725                 goto _CONV;
11726
11727             CONV:
11728                 uns  = false;
11729                 ovfl = false;
11730                 goto _CONV;
11731
11732             _CONV:
11733                 // just check that we have a number on the stack
11734                 if (tiVerificationNeeded)
11735                 {
11736                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11737                     Verify(tiVal.IsNumberType(), "bad arg");
11738
11739 #ifdef _TARGET_64BIT_
11740                     bool isNative = false;
11741
11742                     switch (opcode)
11743                     {
11744                         case CEE_CONV_OVF_I:
11745                         case CEE_CONV_OVF_I_UN:
11746                         case CEE_CONV_I:
11747                         case CEE_CONV_OVF_U:
11748                         case CEE_CONV_OVF_U_UN:
11749                         case CEE_CONV_U:
11750                             isNative = true;
11751                         default:
11752                             // leave 'isNative' = false;
11753                             break;
11754                     }
11755                     if (isNative)
11756                     {
11757                         tiRetVal = typeInfo::nativeInt();
11758                     }
11759                     else
11760 #endif // _TARGET_64BIT_
11761                     {
11762                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11763                     }
11764                 }
11765
11766                 // only converts from FLOAT or DOUBLE to an integer type
11767                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11768
11769                 if (varTypeIsFloating(lclTyp))
11770                 {
11771                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11772 #ifdef _TARGET_64BIT_
11773                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11774                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11775                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11776                                // and generate SSE2 code instead of going through helper calls.
11777                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11778 #endif
11779                         ;
11780                 }
11781                 else
11782                 {
11783                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11784                 }
11785
11786                 // At this point uns, ovf, callNode all set
11787
11788                 op1 = impPopStack().val;
11789                 impBashVarAddrsToI(op1);
11790
11791                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11792                 {
11793                     op2 = op1->gtOp.gtOp2;
11794
11795                     if (op2->gtOper == GT_CNS_INT)
11796                     {
11797                         ssize_t ival = op2->gtIntCon.gtIconVal;
11798                         ssize_t mask, umask;
11799
11800                         switch (lclTyp)
11801                         {
11802                             case TYP_BYTE:
11803                             case TYP_UBYTE:
11804                                 mask  = 0x00FF;
11805                                 umask = 0x007F;
11806                                 break;
11807                             case TYP_CHAR:
11808                             case TYP_SHORT:
11809                                 mask  = 0xFFFF;
11810                                 umask = 0x7FFF;
11811                                 break;
11812
11813                             default:
11814                                 assert(!"unexpected type");
11815                                 return;
11816                         }
11817
11818                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11819                         {
11820                             /* Toss the cast, it's a waste of time */
11821
11822                             impPushOnStack(op1, tiRetVal);
11823                             break;
11824                         }
11825                         else if (ival == mask)
11826                         {
11827                             /* Toss the masking, it's a waste of time, since
11828                                we sign-extend from the small value anyways */
11829
11830                             op1 = op1->gtOp.gtOp1;
11831                         }
11832                     }
11833                 }
11834
11835                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11836                     since the result of a cast to one of the 'small' integer
11837                     types is an integer.
11838                  */
11839
11840                 type = genActualType(lclTyp);
11841
11842 #if SMALL_TREE_NODES
11843                 if (callNode)
11844                 {
11845                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11846                 }
11847                 else
11848 #endif // SMALL_TREE_NODES
11849                 {
11850                     op1 = gtNewCastNode(type, op1, lclTyp);
11851                 }
11852
11853                 if (ovfl)
11854                 {
11855                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11856                 }
11857                 if (uns)
11858                 {
11859                     op1->gtFlags |= GTF_UNSIGNED;
11860                 }
11861                 impPushOnStack(op1, tiRetVal);
11862                 break;
11863
11864             case CEE_NEG:
11865                 if (tiVerificationNeeded)
11866                 {
11867                     tiRetVal = impStackTop().seTypeInfo;
11868                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11869                 }
11870
11871                 op1 = impPopStack().val;
11872                 impBashVarAddrsToI(op1, nullptr);
11873                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11874                 break;
11875
11876             case CEE_POP:
11877                 if (tiVerificationNeeded)
11878                 {
11879                     impStackTop(0);
11880                 }
11881
11882                 /* Pull the top value from the stack */
11883
11884                 op1 = impPopStack(clsHnd).val;
11885
11886                 /* Get hold of the type of the value being duplicated */
11887
11888                 lclTyp = genActualType(op1->gtType);
11889
11890                 /* Does the value have any side effects? */
11891
11892                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11893                 {
11894                     // Since we are throwing away the value, just normalize
11895                     // it to its address.  This is more efficient.
11896
11897                     if (varTypeIsStruct(op1))
11898                     {
11899 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11900                         // Non-calls, such as obj or ret_expr, have to go through this.
11901                         // Calls with large struct return value have to go through this.
11902                         // Helper calls with small struct return value also have to go
11903                         // through this since they do not follow Unix calling convention.
11904                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11905                             op1->AsCall()->gtCallType == CT_HELPER)
11906 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11907                         {
11908                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11909                         }
11910                     }
11911
11912                     // If op1 is non-overflow cast, throw it away since it is useless.
11913                     // Another reason for throwing away the useless cast is in the context of
11914                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11915                     // The cast gets added as part of importing GT_CALL, which gets in the way
11916                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11917                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11918                     {
11919                         op1 = op1->gtOp.gtOp1;
11920                     }
11921
11922                     // If 'op1' is an expression, create an assignment node.
11923                     // Helps analyses (like CSE) to work fine.
11924
11925                     if (op1->gtOper != GT_CALL)
11926                     {
11927                         op1 = gtUnusedValNode(op1);
11928                     }
11929
11930                     /* Append the value to the tree list */
11931                     goto SPILL_APPEND;
11932                 }
11933
11934                 /* No side effects - just throw the <BEEP> thing away */
11935                 break;
11936
11937             case CEE_DUP:
11938
11939                 if (tiVerificationNeeded)
11940                 {
11941                     // Dup could start the begining of delegate creation sequence, remember that
11942                     delegateCreateStart = codeAddr - 1;
11943                     impStackTop(0);
11944                 }
11945
11946                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11947                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11948                 //   This helps eliminate a redundant bounds check in cases such as:
11949                 //       ariba[i+3] += some_value;
11950                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11951
11952                 if (codeAddr < codeEndp)
11953                 {
11954                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11955                     if (impIsAnySTLOC(nextOpcode))
11956                     {
11957                         if (!opts.compDbgCode)
11958                         {
11959                             insertLdloc = true;
11960                             break;
11961                         }
11962                         GenTree* stackTop = impStackTop().val;
11963                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11964                         {
11965                             insertLdloc = true;
11966                             break;
11967                         }
11968                     }
11969                 }
11970
11971                 /* Pull the top value from the stack */
11972                 op1 = impPopStack(tiRetVal);
11973
11974                 /* Clone the value */
11975                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11976                                    nullptr DEBUGARG("DUP instruction"));
11977
11978                 /* Either the tree started with no global effects, or impCloneExpr
11979                    evaluated the tree to a temp and returned two copies of that
11980                    temp. Either way, neither op1 nor op2 should have side effects.
11981                 */
11982                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11983
11984                 /* Push the tree/temp back on the stack */
11985                 impPushOnStack(op1, tiRetVal);
11986
11987                 /* Push the copy on the stack */
11988                 impPushOnStack(op2, tiRetVal);
11989
11990                 break;
11991
11992             case CEE_STIND_I1:
11993                 lclTyp = TYP_BYTE;
11994                 goto STIND;
11995             case CEE_STIND_I2:
11996                 lclTyp = TYP_SHORT;
11997                 goto STIND;
11998             case CEE_STIND_I4:
11999                 lclTyp = TYP_INT;
12000                 goto STIND;
12001             case CEE_STIND_I8:
12002                 lclTyp = TYP_LONG;
12003                 goto STIND;
12004             case CEE_STIND_I:
12005                 lclTyp = TYP_I_IMPL;
12006                 goto STIND;
12007             case CEE_STIND_REF:
12008                 lclTyp = TYP_REF;
12009                 goto STIND;
12010             case CEE_STIND_R4:
12011                 lclTyp = TYP_FLOAT;
12012                 goto STIND;
12013             case CEE_STIND_R8:
12014                 lclTyp = TYP_DOUBLE;
12015                 goto STIND;
12016             STIND:
12017
12018                 if (tiVerificationNeeded)
12019                 {
12020                     typeInfo instrType(lclTyp);
12021 #ifdef _TARGET_64BIT_
12022                     if (opcode == CEE_STIND_I)
12023                     {
12024                         instrType = typeInfo::nativeInt();
12025                     }
12026 #endif // _TARGET_64BIT_
12027                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12028                 }
12029                 else
12030                 {
12031                     compUnsafeCastUsed = true; // Have to go conservative
12032                 }
12033
12034             STIND_POST_VERIFY:
12035
12036                 op2 = impPopStack().val; // value to store
12037                 op1 = impPopStack().val; // address to store to
12038
12039                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12040                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12041
12042                 impBashVarAddrsToI(op1, op2);
12043
12044                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12045
12046 #ifdef _TARGET_64BIT_
12047                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12048                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12049                 {
12050                     op2->gtType = TYP_I_IMPL;
12051                 }
12052                 else
12053                 {
12054                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12055                     //
12056                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12057                     {
12058                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12059                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12060                     }
12061                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12062                     //
12063                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12064                     {
12065                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12066                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12067                     }
12068                 }
12069 #endif // _TARGET_64BIT_
12070
12071                 if (opcode == CEE_STIND_REF)
12072                 {
12073                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12074                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12075                     lclTyp = genActualType(op2->TypeGet());
12076                 }
12077
12078 // Check target type.
12079 #ifdef DEBUG
12080                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12081                 {
12082                     if (op2->gtType == TYP_BYREF)
12083                     {
12084                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12085                     }
12086                     else if (lclTyp == TYP_BYREF)
12087                     {
12088                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12089                     }
12090                 }
12091                 else
12092                 {
12093                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12094                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12095                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12096                 }
12097 #endif
12098
12099                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12100
12101                 // stind could point anywhere, example a boxed class static int
12102                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12103
12104                 if (prefixFlags & PREFIX_VOLATILE)
12105                 {
12106                     assert(op1->OperGet() == GT_IND);
12107                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12108                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12109                     op1->gtFlags |= GTF_IND_VOLATILE;
12110                 }
12111
12112                 if (prefixFlags & PREFIX_UNALIGNED)
12113                 {
12114                     assert(op1->OperGet() == GT_IND);
12115                     op1->gtFlags |= GTF_IND_UNALIGNED;
12116                 }
12117
12118                 op1 = gtNewAssignNode(op1, op2);
12119                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12120
12121                 // Spill side-effects AND global-data-accesses
12122                 if (verCurrentState.esStackDepth > 0)
12123                 {
12124                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12125                 }
12126
12127                 goto APPEND;
12128
12129             case CEE_LDIND_I1:
12130                 lclTyp = TYP_BYTE;
12131                 goto LDIND;
12132             case CEE_LDIND_I2:
12133                 lclTyp = TYP_SHORT;
12134                 goto LDIND;
12135             case CEE_LDIND_U4:
12136             case CEE_LDIND_I4:
12137                 lclTyp = TYP_INT;
12138                 goto LDIND;
12139             case CEE_LDIND_I8:
12140                 lclTyp = TYP_LONG;
12141                 goto LDIND;
12142             case CEE_LDIND_REF:
12143                 lclTyp = TYP_REF;
12144                 goto LDIND;
12145             case CEE_LDIND_I:
12146                 lclTyp = TYP_I_IMPL;
12147                 goto LDIND;
12148             case CEE_LDIND_R4:
12149                 lclTyp = TYP_FLOAT;
12150                 goto LDIND;
12151             case CEE_LDIND_R8:
12152                 lclTyp = TYP_DOUBLE;
12153                 goto LDIND;
12154             case CEE_LDIND_U1:
12155                 lclTyp = TYP_UBYTE;
12156                 goto LDIND;
12157             case CEE_LDIND_U2:
12158                 lclTyp = TYP_CHAR;
12159                 goto LDIND;
12160             LDIND:
12161
12162                 if (tiVerificationNeeded)
12163                 {
12164                     typeInfo lclTiType(lclTyp);
12165 #ifdef _TARGET_64BIT_
12166                     if (opcode == CEE_LDIND_I)
12167                     {
12168                         lclTiType = typeInfo::nativeInt();
12169                     }
12170 #endif // _TARGET_64BIT_
12171                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12172                     tiRetVal.NormaliseForStack();
12173                 }
12174                 else
12175                 {
12176                     compUnsafeCastUsed = true; // Have to go conservative
12177                 }
12178
12179             LDIND_POST_VERIFY:
12180
12181                 op1 = impPopStack().val; // address to load from
12182                 impBashVarAddrsToI(op1);
12183
12184 #ifdef _TARGET_64BIT_
12185                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12186                 //
12187                 if (genActualType(op1->gtType) == TYP_INT)
12188                 {
12189                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12190                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12191                 }
12192 #endif
12193
12194                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12195
12196                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12197
12198                 // ldind could point anywhere, example a boxed class static int
12199                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12200
12201                 if (prefixFlags & PREFIX_VOLATILE)
12202                 {
12203                     assert(op1->OperGet() == GT_IND);
12204                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12205                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12206                     op1->gtFlags |= GTF_IND_VOLATILE;
12207                 }
12208
12209                 if (prefixFlags & PREFIX_UNALIGNED)
12210                 {
12211                     assert(op1->OperGet() == GT_IND);
12212                     op1->gtFlags |= GTF_IND_UNALIGNED;
12213                 }
12214
12215                 impPushOnStack(op1, tiRetVal);
12216
12217                 break;
12218
12219             case CEE_UNALIGNED:
12220
12221                 assert(sz == 1);
12222                 val = getU1LittleEndian(codeAddr);
12223                 ++codeAddr;
12224                 JITDUMP(" %u", val);
12225                 if ((val != 1) && (val != 2) && (val != 4))
12226                 {
12227                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12228                 }
12229
12230                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12231                 prefixFlags |= PREFIX_UNALIGNED;
12232
12233                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12234
12235             PREFIX:
12236                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12237                 codeAddr += sizeof(__int8);
12238                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12239                 goto DECODE_OPCODE;
12240
12241             case CEE_VOLATILE:
12242
12243                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12244                 prefixFlags |= PREFIX_VOLATILE;
12245
12246                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12247
12248                 assert(sz == 0);
12249                 goto PREFIX;
12250
12251             case CEE_LDFTN:
12252             {
12253                 // Need to do a lookup here so that we perform an access check
12254                 // and do a NOWAY if protections are violated
12255                 _impResolveToken(CORINFO_TOKENKIND_Method);
12256
12257                 JITDUMP(" %08X", resolvedToken.token);
12258
12259                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12260                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12261                               &callInfo);
12262
12263                 // This check really only applies to intrinsic Array.Address methods
12264                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12265                 {
12266                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12267                 }
12268
12269                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12270                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12271
12272                 if (tiVerificationNeeded)
12273                 {
12274                     // LDFTN could start the begining of delegate creation sequence, remember that
12275                     delegateCreateStart = codeAddr - 2;
12276
12277                     // check any constraints on the callee's class and type parameters
12278                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12279                                    "method has unsatisfied class constraints");
12280                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12281                                                                                 resolvedToken.hMethod),
12282                                    "method has unsatisfied method constraints");
12283
12284                     mflags = callInfo.verMethodFlags;
12285                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12286                 }
12287
12288             DO_LDFTN:
12289                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12290                 if (compDonotInline())
12291                 {
12292                     return;
12293                 }
12294
12295                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12296
12297                 break;
12298             }
12299
12300             case CEE_LDVIRTFTN:
12301             {
12302                 /* Get the method token */
12303
12304                 _impResolveToken(CORINFO_TOKENKIND_Method);
12305
12306                 JITDUMP(" %08X", resolvedToken.token);
12307
12308                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12309                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12310                                                     CORINFO_CALLINFO_CALLVIRT)),
12311                               &callInfo);
12312
12313                 // This check really only applies to intrinsic Array.Address methods
12314                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12315                 {
12316                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12317                 }
12318
12319                 mflags = callInfo.methodFlags;
12320
12321                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12322
12323                 if (compIsForInlining())
12324                 {
12325                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12326                     {
12327                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12328                         return;
12329                     }
12330                 }
12331
12332                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12333
12334                 if (tiVerificationNeeded)
12335                 {
12336
12337                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12338                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12339
12340                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12341                     typeInfo declType =
12342                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12343
12344                     typeInfo arg = impStackTop().seTypeInfo;
12345                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12346                            "bad ldvirtftn");
12347
12348                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12349                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12350                     {
12351                         instanceClassHnd = arg.GetClassHandleForObjRef();
12352                     }
12353
12354                     // check any constraints on the method's class and type parameters
12355                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12356                                    "method has unsatisfied class constraints");
12357                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12358                                                                                 resolvedToken.hMethod),
12359                                    "method has unsatisfied method constraints");
12360
12361                     if (mflags & CORINFO_FLG_PROTECTED)
12362                     {
12363                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12364                                "Accessing protected method through wrong type.");
12365                     }
12366                 }
12367
12368                 /* Get the object-ref */
12369                 op1 = impPopStack().val;
12370                 assertImp(op1->gtType == TYP_REF);
12371
12372                 if (opts.IsReadyToRun())
12373                 {
12374                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12375                     {
12376                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12377                         {
12378                             op1 = gtUnusedValNode(op1);
12379                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12380                         }
12381                         goto DO_LDFTN;
12382                     }
12383                 }
12384                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12385                 {
12386                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12387                     {
12388                         op1 = gtUnusedValNode(op1);
12389                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12390                     }
12391                     goto DO_LDFTN;
12392                 }
12393
12394                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12395                 if (compDonotInline())
12396                 {
12397                     return;
12398                 }
12399
12400                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12401
12402                 break;
12403             }
12404
12405             case CEE_CONSTRAINED:
12406
12407                 assertImp(sz == sizeof(unsigned));
12408                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12409                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12410                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12411
12412                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12413                 prefixFlags |= PREFIX_CONSTRAINED;
12414
12415                 {
12416                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12417                     if (actualOpcode != CEE_CALLVIRT)
12418                     {
12419                         BADCODE("constrained. has to be followed by callvirt");
12420                     }
12421                 }
12422
12423                 goto PREFIX;
12424
12425             case CEE_READONLY:
12426                 JITDUMP(" readonly.");
12427
12428                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12429                 prefixFlags |= PREFIX_READONLY;
12430
12431                 {
12432                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12433                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12434                     {
12435                         BADCODE("readonly. has to be followed by ldelema or call");
12436                     }
12437                 }
12438
12439                 assert(sz == 0);
12440                 goto PREFIX;
12441
12442             case CEE_TAILCALL:
12443                 JITDUMP(" tail.");
12444
12445                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12446                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12447
12448                 {
12449                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12450                     if (!impOpcodeIsCallOpcode(actualOpcode))
12451                     {
12452                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12453                     }
12454                 }
12455                 assert(sz == 0);
12456                 goto PREFIX;
12457
12458             case CEE_NEWOBJ:
12459
12460                 /* Since we will implicitly insert newObjThisPtr at the start of the
12461                    argument list, spill any GTF_ORDER_SIDEEFF */
12462                 impSpillSpecialSideEff();
12463
12464                 /* NEWOBJ does not respond to TAIL */
12465                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12466
12467                 /* NEWOBJ does not respond to CONSTRAINED */
12468                 prefixFlags &= ~PREFIX_CONSTRAINED;
12469
12470 #if COR_JIT_EE_VERSION > 460
12471                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12472 #else
12473                 _impResolveToken(CORINFO_TOKENKIND_Method);
12474 #endif
12475
12476                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12477                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12478                               &callInfo);
12479
12480                 if (compIsForInlining())
12481                 {
12482                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12483                     {
12484                         // Check to see if this call violates the boundary.
12485                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12486                         return;
12487                     }
12488                 }
12489
12490                 mflags = callInfo.methodFlags;
12491
12492                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12493                 {
12494                     BADCODE("newobj on static or abstract method");
12495                 }
12496
12497                 // Insert the security callout before any actual code is generated
12498                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12499
12500                 // There are three different cases for new
12501                 // Object size is variable (depends on arguments)
12502                 //      1) Object is an array (arrays treated specially by the EE)
12503                 //      2) Object is some other variable sized object (e.g. String)
12504                 //      3) Class Size can be determined beforehand (normal case)
12505                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12506                 // in the second case we call the constructor with a '0' this pointer
12507                 // In the third case we alloc the memory, then call the constuctor
12508
12509                 clsFlags = callInfo.classFlags;
12510                 if (clsFlags & CORINFO_FLG_ARRAY)
12511                 {
12512                     if (tiVerificationNeeded)
12513                     {
12514                         CORINFO_CLASS_HANDLE elemTypeHnd;
12515                         INDEBUG(CorInfoType corType =)
12516                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12517                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12518                         Verify(elemTypeHnd == nullptr ||
12519                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12520                                "newarr of byref-like objects");
12521                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12522                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12523                                       &callInfo DEBUGARG(info.compFullName));
12524                     }
12525                     // Arrays need to call the NEWOBJ helper.
12526                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12527
12528                     impImportNewObjArray(&resolvedToken, &callInfo);
12529                     if (compDonotInline())
12530                     {
12531                         return;
12532                     }
12533
12534                     callTyp = TYP_REF;
12535                     break;
12536                 }
12537                 // At present this can only be String
12538                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12539                 {
12540                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12541                     {
12542                         // The dummy argument does not exist in CoreRT
12543                         newObjThisPtr = nullptr;
12544                     }
12545                     else
12546                     {
12547                         // This is the case for variable-sized objects that are not
12548                         // arrays.  In this case, call the constructor with a null 'this'
12549                         // pointer
12550                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12551                     }
12552
12553                     /* Remember that this basic block contains 'new' of an object */
12554                     block->bbFlags |= BBF_HAS_NEWOBJ;
12555                     optMethodFlags |= OMF_HAS_NEWOBJ;
12556                 }
12557                 else
12558                 {
12559                     // This is the normal case where the size of the object is
12560                     // fixed.  Allocate the memory and call the constructor.
12561
12562                     // Note: We cannot add a peep to avoid use of temp here
12563                     // becase we don't have enough interference info to detect when
12564                     // sources and destination interfere, example: s = new S(ref);
12565
12566                     // TODO: We find the correct place to introduce a general
12567                     // reverse copy prop for struct return values from newobj or
12568                     // any function returning structs.
12569
12570                     /* get a temporary for the new object */
12571                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12572
12573                     // In the value class case we only need clsHnd for size calcs.
12574                     //
12575                     // The lookup of the code pointer will be handled by CALL in this case
12576                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12577                     {
12578                         if (compIsForInlining())
12579                         {
12580                             // If value class has GC fields, inform the inliner. It may choose to
12581                             // bail out on the inline.
12582                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12583                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12584                             {
12585                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12586                                 if (compInlineResult->IsFailure())
12587                                 {
12588                                     return;
12589                                 }
12590
12591                                 // Do further notification in the case where the call site is rare;
12592                                 // some policies do not track the relative hotness of call sites for
12593                                 // "always" inline cases.
12594                                 if (impInlineInfo->iciBlock->isRunRarely())
12595                                 {
12596                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12597                                     if (compInlineResult->IsFailure())
12598                                     {
12599                                         return;
12600                                     }
12601                                 }
12602                             }
12603                         }
12604
12605                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12606                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12607
12608                         if (impIsPrimitive(jitTyp))
12609                         {
12610                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12611                         }
12612                         else
12613                         {
12614                             // The local variable itself is the allocated space.
12615                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12616                             // and potentially exploitable.
12617                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12618                         }
12619
12620                         // Append a tree to zero-out the temp
12621                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12622
12623                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12624                                                        gtNewIconNode(0), // Value
12625                                                        size,             // Size
12626                                                        false,            // isVolatile
12627                                                        false);           // not copyBlock
12628                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12629
12630                         // Obtain the address of the temp
12631                         newObjThisPtr =
12632                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12633                     }
12634                     else
12635                     {
12636 #ifdef FEATURE_READYTORUN_COMPILER
12637                         if (opts.IsReadyToRun())
12638                         {
12639                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12640                             usingReadyToRunHelper = (op1 != nullptr);
12641                         }
12642
12643                         if (!usingReadyToRunHelper)
12644 #endif
12645                         {
12646                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12647                             if (op1 == nullptr)
12648                             { // compDonotInline()
12649                                 return;
12650                             }
12651
12652                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12653                             // and the newfast call with a single call to a dynamic R2R cell that will:
12654                             //      1) Load the context
12655                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12656                             //      stub
12657                             //      3) Allocate and return the new object
12658                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12659
12660                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12661                                                     resolvedToken.hClass, TYP_REF, op1);
12662                         }
12663
12664                         // Remember that this basic block contains 'new' of an object
12665                         block->bbFlags |= BBF_HAS_NEWOBJ;
12666                         optMethodFlags |= OMF_HAS_NEWOBJ;
12667
12668                         // Append the assignment to the temp/local. Dont need to spill
12669                         // at all as we are just calling an EE-Jit helper which can only
12670                         // cause an (async) OutOfMemoryException.
12671
12672                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12673                         // to a temp. Note that the pattern "temp = allocObj" is required
12674                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12675                         // without exhaustive walk over all expressions.
12676
12677                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12678
12679                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12680                     }
12681                 }
12682                 goto CALL;
12683
12684             case CEE_CALLI:
12685
12686                 /* CALLI does not respond to CONSTRAINED */
12687                 prefixFlags &= ~PREFIX_CONSTRAINED;
12688
12689                 if (compIsForInlining())
12690                 {
12691                     // CALLI doesn't have a method handle, so assume the worst.
12692                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12693                     {
12694                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12695                         return;
12696                     }
12697                 }
12698
12699             // fall through
12700
12701             case CEE_CALLVIRT:
12702             case CEE_CALL:
12703
12704                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12705                 // many other places.  We unfortunately embed that knowledge here.
12706                 if (opcode != CEE_CALLI)
12707                 {
12708                     _impResolveToken(CORINFO_TOKENKIND_Method);
12709
12710                     eeGetCallInfo(&resolvedToken,
12711                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12712                                   // this is how impImportCall invokes getCallInfo
12713                                   addVerifyFlag(
12714                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12715                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12716                                                                        : CORINFO_CALLINFO_NONE)),
12717                                   &callInfo);
12718                 }
12719                 else
12720                 {
12721                     // Suppress uninitialized use warning.
12722                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12723                     memset(&callInfo, 0, sizeof(callInfo));
12724
12725                     resolvedToken.token = getU4LittleEndian(codeAddr);
12726                 }
12727
12728             CALL: // memberRef should be set.
12729                 // newObjThisPtr should be set for CEE_NEWOBJ
12730
12731                 JITDUMP(" %08X", resolvedToken.token);
12732                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12733
12734                 bool newBBcreatedForTailcallStress;
12735
12736                 newBBcreatedForTailcallStress = false;
12737
12738                 if (compIsForInlining())
12739                 {
12740                     if (compDonotInline())
12741                     {
12742                         return;
12743                     }
12744                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12745                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12746                 }
12747                 else
12748                 {
12749                     if (compTailCallStress())
12750                     {
12751                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12752                         // Tail call stress only recognizes call+ret patterns and forces them to be
12753                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12754                         // doesn't import 'ret' opcode following the call into the basic block containing
12755                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12756                         // is already checking that there is an opcode following call and hence it is
12757                         // safe here to read next opcode without bounds check.
12758                         newBBcreatedForTailcallStress =
12759                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12760                                                              // make it jump to RET.
12761                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12762
12763                         if (newBBcreatedForTailcallStress &&
12764                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12765                             verCheckTailCallConstraint(opcode, &resolvedToken,
12766                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12767                                                        true) // Is it legal to do talcall?
12768                             )
12769                         {
12770                             // Stress the tailcall.
12771                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12772                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12773                         }
12774                     }
12775
12776                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12777                     // hence will not be considered for implicit tail calling.
12778                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12779                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12780                     {
12781                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12782                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12783                     }
12784                 }
12785
12786                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12787                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12788                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12789
12790                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12791                 {
12792                     // All calls and delegates need a security callout.
12793                     // For delegates, this is the call to the delegate constructor, not the access check on the
12794                     // LD(virt)FTN.
12795                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12796
12797 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12798      
12799                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12800                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12801                 // ldtoken <filed token>, and we now check accessibility
12802                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12803                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12804                 {
12805                     if (prevOpcode != CEE_LDTOKEN)
12806                     {
12807                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12808                     }
12809                     else
12810                     {
12811                         assert(lastLoadToken != NULL);
12812                         // Now that we know we have a token, verify that it is accessible for loading
12813                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12814                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12815                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12816                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12817                     }
12818                 }
12819
12820 #endif // DevDiv 410397
12821                 }
12822
12823                 if (tiVerificationNeeded)
12824                 {
12825                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12826                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12827                                   &callInfo DEBUGARG(info.compFullName));
12828                 }
12829
12830                 // Insert delegate callout here.
12831                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12832                 {
12833 #ifdef DEBUG
12834                     // We should do this only if verification is enabled
12835                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12836                     if (tiVerificationNeeded)
12837                     {
12838                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12839                         // We should get here only for well formed delegate creation.
12840                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12841                     }
12842 #endif
12843
12844 #ifdef FEATURE_CORECLR
12845                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12846                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12847                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12848
12849                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12850 #endif // FEATURE_CORECLR
12851                 }
12852
12853                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12854                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12855                 if (compDonotInline())
12856                 {
12857                     return;
12858                 }
12859
12860                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12861                                                                        // have created a new BB after the "call"
12862                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12863                 {
12864                     assert(!compIsForInlining());
12865                     goto RET;
12866                 }
12867
12868                 break;
12869
12870             case CEE_LDFLD:
12871             case CEE_LDSFLD:
12872             case CEE_LDFLDA:
12873             case CEE_LDSFLDA:
12874             {
12875
12876                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12877                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12878
12879                 /* Get the CP_Fieldref index */
12880                 assertImp(sz == sizeof(unsigned));
12881
12882                 _impResolveToken(CORINFO_TOKENKIND_Field);
12883
12884                 JITDUMP(" %08X", resolvedToken.token);
12885
12886                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12887
12888                 GenTreePtr           obj     = nullptr;
12889                 typeInfo*            tiObj   = nullptr;
12890                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12891
12892                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12893                 {
12894                     tiObj = &impStackTop().seTypeInfo;
12895                     obj   = impPopStack(objType).val;
12896
12897                     if (impIsThis(obj))
12898                     {
12899                         aflags |= CORINFO_ACCESS_THIS;
12900
12901                         // An optimization for Contextful classes:
12902                         // we unwrap the proxy when we have a 'this reference'
12903
12904                         if (info.compUnwrapContextful)
12905                         {
12906                             aflags |= CORINFO_ACCESS_UNWRAP;
12907                         }
12908                     }
12909                 }
12910
12911                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12912
12913                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12914                 // handle
12915                 CorInfoType ciType = fieldInfo.fieldType;
12916                 clsHnd             = fieldInfo.structType;
12917
12918                 lclTyp = JITtype2varType(ciType);
12919
12920 #ifdef _TARGET_AMD64
12921                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12922 #endif // _TARGET_AMD64
12923
12924                 if (compIsForInlining())
12925                 {
12926                     switch (fieldInfo.fieldAccessor)
12927                     {
12928                         case CORINFO_FIELD_INSTANCE_HELPER:
12929                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12930                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12931                         case CORINFO_FIELD_STATIC_TLS:
12932
12933                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12934                             return;
12935
12936                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12937 #if COR_JIT_EE_VERSION > 460
12938                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12939 #endif
12940                             /* We may be able to inline the field accessors in specific instantiations of generic
12941                              * methods */
12942                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12943                             return;
12944
12945                         default:
12946                             break;
12947                     }
12948
12949                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12950                         clsHnd)
12951                     {
12952                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12953                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12954                         {
12955                             // Loading a static valuetype field usually will cause a JitHelper to be called
12956                             // for the static base. This will bloat the code.
12957                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12958
12959                             if (compInlineResult->IsFailure())
12960                             {
12961                                 return;
12962                             }
12963                         }
12964                     }
12965                 }
12966
12967                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12968                 if (isLoadAddress)
12969                 {
12970                     tiRetVal.MakeByRef();
12971                 }
12972                 else
12973                 {
12974                     tiRetVal.NormaliseForStack();
12975                 }
12976
12977                 // Perform this check always to ensure that we get field access exceptions even with
12978                 // SkipVerification.
12979                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12980
12981                 if (tiVerificationNeeded)
12982                 {
12983                     // You can also pass the unboxed struct to  LDFLD
12984                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12985                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12986                     {
12987                         bAllowPlainValueTypeAsThis = TRUE;
12988                     }
12989
12990                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12991
12992                     // If we're doing this on a heap object or from a 'safe' byref
12993                     // then the result is a safe byref too
12994                     if (isLoadAddress) // load address
12995                     {
12996                         if (fieldInfo.fieldFlags &
12997                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12998                         {
12999                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13000                             {
13001                                 tiRetVal.SetIsPermanentHomeByRef();
13002                             }
13003                         }
13004                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13005                         {
13006                             // ldflda of byref is safe if done on a gc object or on  a
13007                             // safe byref
13008                             tiRetVal.SetIsPermanentHomeByRef();
13009                         }
13010                     }
13011                 }
13012                 else
13013                 {
13014                     // tiVerificationNeeded is false.
13015                     // Raise InvalidProgramException if static load accesses non-static field
13016                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13017                     {
13018                         BADCODE("static access on an instance field");
13019                     }
13020                 }
13021
13022                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13023                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13024                 {
13025                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13026                     {
13027                         obj = gtUnusedValNode(obj);
13028                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13029                     }
13030                     obj = nullptr;
13031                 }
13032
13033                 /* Preserve 'small' int types */
13034                 if (lclTyp > TYP_INT)
13035                 {
13036                     lclTyp = genActualType(lclTyp);
13037                 }
13038
13039                 bool usesHelper = false;
13040
13041                 switch (fieldInfo.fieldAccessor)
13042                 {
13043                     case CORINFO_FIELD_INSTANCE:
13044 #ifdef FEATURE_READYTORUN_COMPILER
13045                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13046 #endif
13047                     {
13048                         bool nullcheckNeeded = false;
13049
13050                         obj = impCheckForNullPointer(obj);
13051
13052                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13053                         {
13054                             nullcheckNeeded = true;
13055                         }
13056
13057                         // If the object is a struct, what we really want is
13058                         // for the field to operate on the address of the struct.
13059                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13060                         {
13061                             assert(opcode == CEE_LDFLD && objType != nullptr);
13062
13063                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13064                         }
13065
13066                         /* Create the data member node */
13067                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13068
13069 #ifdef FEATURE_READYTORUN_COMPILER
13070                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13071                         {
13072                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13073                         }
13074 #endif
13075
13076                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13077
13078                         if (fgAddrCouldBeNull(obj))
13079                         {
13080                             op1->gtFlags |= GTF_EXCEPT;
13081                         }
13082
13083                         // If gtFldObj is a BYREF then our target is a value class and
13084                         // it could point anywhere, example a boxed class static int
13085                         if (obj->gtType == TYP_BYREF)
13086                         {
13087                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13088                         }
13089
13090                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13091                         if (StructHasOverlappingFields(typeFlags))
13092                         {
13093                             op1->gtField.gtFldMayOverlap = true;
13094                         }
13095
13096                         // wrap it in a address of operator if necessary
13097                         if (isLoadAddress)
13098                         {
13099                             op1 = gtNewOperNode(GT_ADDR,
13100                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13101                         }
13102                         else
13103                         {
13104                             if (compIsForInlining() &&
13105                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13106                                                                                    impInlineInfo->inlArgInfo))
13107                             {
13108                                 impInlineInfo->thisDereferencedFirst = true;
13109                             }
13110                         }
13111                     }
13112                     break;
13113
13114                     case CORINFO_FIELD_STATIC_TLS:
13115 #ifdef _TARGET_X86_
13116                         // Legacy TLS access is implemented as intrinsic on x86 only
13117
13118                         /* Create the data member node */
13119                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13120                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13121
13122                         if (isLoadAddress)
13123                         {
13124                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13125                         }
13126                         break;
13127 #else
13128                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13129
13130                         __fallthrough;
13131 #endif
13132
13133                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13134                     case CORINFO_FIELD_INSTANCE_HELPER:
13135                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13136                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13137                                                clsHnd, nullptr);
13138                         usesHelper = true;
13139                         break;
13140
13141                     case CORINFO_FIELD_STATIC_ADDRESS:
13142                         // Replace static read-only fields with constant if possible
13143                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13144                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13145                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13146                         {
13147                             CorInfoInitClassResult initClassResult =
13148                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13149                                                             impTokenLookupContextHandle);
13150
13151                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13152                             {
13153                                 void** pFldAddr = nullptr;
13154                                 void*  fldAddr =
13155                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13156
13157                                 // We should always be able to access this static's address directly
13158                                 assert(pFldAddr == nullptr);
13159
13160                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13161                                 goto FIELD_DONE;
13162                             }
13163                         }
13164
13165                         __fallthrough;
13166
13167                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13168                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13169                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13170 #if COR_JIT_EE_VERSION > 460
13171                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13172 #endif
13173                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13174                                                          lclTyp);
13175                         break;
13176
13177                     case CORINFO_FIELD_INTRINSIC_ZERO:
13178                     {
13179                         assert(aflags & CORINFO_ACCESS_GET);
13180                         op1 = gtNewIconNode(0, lclTyp);
13181                         goto FIELD_DONE;
13182                     }
13183                     break;
13184
13185                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13186                     {
13187                         assert(aflags & CORINFO_ACCESS_GET);
13188
13189                         LPVOID         pValue;
13190                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13191                         op1                = gtNewStringLiteralNode(iat, pValue);
13192                         goto FIELD_DONE;
13193                     }
13194                     break;
13195
13196                     default:
13197                         assert(!"Unexpected fieldAccessor");
13198                 }
13199
13200                 if (!isLoadAddress)
13201                 {
13202
13203                     if (prefixFlags & PREFIX_VOLATILE)
13204                     {
13205                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13206                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13207
13208                         if (!usesHelper)
13209                         {
13210                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13211                                    (op1->OperGet() == GT_OBJ));
13212                             op1->gtFlags |= GTF_IND_VOLATILE;
13213                         }
13214                     }
13215
13216                     if (prefixFlags & PREFIX_UNALIGNED)
13217                     {
13218                         if (!usesHelper)
13219                         {
13220                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13221                                    (op1->OperGet() == GT_OBJ));
13222                             op1->gtFlags |= GTF_IND_UNALIGNED;
13223                         }
13224                     }
13225                 }
13226
13227                 /* Check if the class needs explicit initialization */
13228
13229                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13230                 {
13231                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13232                     if (compDonotInline())
13233                     {
13234                         return;
13235                     }
13236                     if (helperNode != nullptr)
13237                     {
13238                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13239                     }
13240                 }
13241
13242             FIELD_DONE:
13243                 impPushOnStack(op1, tiRetVal);
13244             }
13245             break;
13246
13247             case CEE_STFLD:
13248             case CEE_STSFLD:
13249             {
13250
13251                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13252
13253                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13254
13255                 /* Get the CP_Fieldref index */
13256
13257                 assertImp(sz == sizeof(unsigned));
13258
13259                 _impResolveToken(CORINFO_TOKENKIND_Field);
13260
13261                 JITDUMP(" %08X", resolvedToken.token);
13262
13263                 int        aflags = CORINFO_ACCESS_SET;
13264                 GenTreePtr obj    = nullptr;
13265                 typeInfo*  tiObj  = nullptr;
13266                 typeInfo   tiVal;
13267
13268                 /* Pull the value from the stack */
13269                 op2    = impPopStack(tiVal);
13270                 clsHnd = tiVal.GetClassHandle();
13271
13272                 if (opcode == CEE_STFLD)
13273                 {
13274                     tiObj = &impStackTop().seTypeInfo;
13275                     obj   = impPopStack().val;
13276
13277                     if (impIsThis(obj))
13278                     {
13279                         aflags |= CORINFO_ACCESS_THIS;
13280
13281                         // An optimization for Contextful classes:
13282                         // we unwrap the proxy when we have a 'this reference'
13283
13284                         if (info.compUnwrapContextful)
13285                         {
13286                             aflags |= CORINFO_ACCESS_UNWRAP;
13287                         }
13288                     }
13289                 }
13290
13291                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13292
13293                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13294                 // handle
13295                 CorInfoType ciType = fieldInfo.fieldType;
13296                 fieldClsHnd        = fieldInfo.structType;
13297
13298                 lclTyp = JITtype2varType(ciType);
13299
13300                 if (compIsForInlining())
13301                 {
13302                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13303                      * per-inst static? */
13304
13305                     switch (fieldInfo.fieldAccessor)
13306                     {
13307                         case CORINFO_FIELD_INSTANCE_HELPER:
13308                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13309                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13310                         case CORINFO_FIELD_STATIC_TLS:
13311
13312                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13313                             return;
13314
13315                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13316 #if COR_JIT_EE_VERSION > 460
13317                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13318 #endif
13319
13320                             /* We may be able to inline the field accessors in specific instantiations of generic
13321                              * methods */
13322                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13323                             return;
13324
13325                         default:
13326                             break;
13327                     }
13328                 }
13329
13330                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13331
13332                 if (tiVerificationNeeded)
13333                 {
13334                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13335                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13336                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13337                 }
13338                 else
13339                 {
13340                     // tiVerificationNeed is false.
13341                     // Raise InvalidProgramException if static store accesses non-static field
13342                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13343                     {
13344                         BADCODE("static access on an instance field");
13345                     }
13346                 }
13347
13348                 // We are using stfld on a static field.
13349                 // We allow it, but need to eval any side-effects for obj
13350                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13351                 {
13352                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13353                     {
13354                         obj = gtUnusedValNode(obj);
13355                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13356                     }
13357                     obj = nullptr;
13358                 }
13359
13360                 /* Preserve 'small' int types */
13361                 if (lclTyp > TYP_INT)
13362                 {
13363                     lclTyp = genActualType(lclTyp);
13364                 }
13365
13366                 switch (fieldInfo.fieldAccessor)
13367                 {
13368                     case CORINFO_FIELD_INSTANCE:
13369 #ifdef FEATURE_READYTORUN_COMPILER
13370                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13371 #endif
13372                     {
13373                         obj = impCheckForNullPointer(obj);
13374
13375                         /* Create the data member node */
13376                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13377                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13378                         if (StructHasOverlappingFields(typeFlags))
13379                         {
13380                             op1->gtField.gtFldMayOverlap = true;
13381                         }
13382
13383 #ifdef FEATURE_READYTORUN_COMPILER
13384                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13385                         {
13386                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13387                         }
13388 #endif
13389
13390                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13391
13392                         if (fgAddrCouldBeNull(obj))
13393                         {
13394                             op1->gtFlags |= GTF_EXCEPT;
13395                         }
13396
13397                         // If gtFldObj is a BYREF then our target is a value class and
13398                         // it could point anywhere, example a boxed class static int
13399                         if (obj->gtType == TYP_BYREF)
13400                         {
13401                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13402                         }
13403
13404                         if (compIsForInlining() &&
13405                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13406                         {
13407                             impInlineInfo->thisDereferencedFirst = true;
13408                         }
13409                     }
13410                     break;
13411
13412                     case CORINFO_FIELD_STATIC_TLS:
13413 #ifdef _TARGET_X86_
13414                         // Legacy TLS access is implemented as intrinsic on x86 only
13415
13416                         /* Create the data member node */
13417                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13418                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13419
13420                         break;
13421 #else
13422                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13423
13424                         __fallthrough;
13425 #endif
13426
13427                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13428                     case CORINFO_FIELD_INSTANCE_HELPER:
13429                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13430                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13431                                                clsHnd, op2);
13432                         goto SPILL_APPEND;
13433
13434                     case CORINFO_FIELD_STATIC_ADDRESS:
13435                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13436                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13437                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13438 #if COR_JIT_EE_VERSION > 460
13439                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13440 #endif
13441                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13442                                                          lclTyp);
13443                         break;
13444
13445                     default:
13446                         assert(!"Unexpected fieldAccessor");
13447                 }
13448
13449                 // Create the member assignment, unless we have a struct.
13450                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13451                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13452
13453                 if (!deferStructAssign)
13454                 {
13455                     if (prefixFlags & PREFIX_VOLATILE)
13456                     {
13457                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13458                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13459                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13460                         op1->gtFlags |= GTF_IND_VOLATILE;
13461                     }
13462                     if (prefixFlags & PREFIX_UNALIGNED)
13463                     {
13464                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13465                         op1->gtFlags |= GTF_IND_UNALIGNED;
13466                     }
13467
13468                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13469                        trust
13470                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13471                        importation
13472                        and reads from the union as if it were a long during code generation. Though this can potentially
13473                        read garbage, one can get lucky to have this working correctly.
13474
13475                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13476                        /O2
13477                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13478                        on
13479                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13480                        correctly
13481                        always.
13482
13483                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13484                        V4.0.
13485                     */
13486                     CLANG_FORMAT_COMMENT_ANCHOR;
13487
13488 #ifdef _TARGET_X86_
13489                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13490                         varTypeIsLong(op1->TypeGet()))
13491                     {
13492                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13493                     }
13494 #endif
13495
13496 #ifdef _TARGET_64BIT_
13497                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13498                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13499                     {
13500                         op2->gtType = TYP_I_IMPL;
13501                     }
13502                     else
13503                     {
13504                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13505                         //
13506                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13507                         {
13508                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13509                         }
13510                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13511                         //
13512                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13513                         {
13514                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13515                         }
13516                     }
13517 #endif
13518
13519 #if !FEATURE_X87_DOUBLES
13520                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13521                     // We insert a cast to the dest 'op1' type
13522                     //
13523                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13524                         varTypeIsFloating(op2->gtType))
13525                     {
13526                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13527                     }
13528 #endif // !FEATURE_X87_DOUBLES
13529
13530                     op1 = gtNewAssignNode(op1, op2);
13531
13532                     /* Mark the expression as containing an assignment */
13533
13534                     op1->gtFlags |= GTF_ASG;
13535                 }
13536
13537                 /* Check if the class needs explicit initialization */
13538
13539                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13540                 {
13541                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13542                     if (compDonotInline())
13543                     {
13544                         return;
13545                     }
13546                     if (helperNode != nullptr)
13547                     {
13548                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13549                     }
13550                 }
13551
13552                 /* stfld can interfere with value classes (consider the sequence
13553                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13554                    spill all value class references from the stack. */
13555
13556                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13557                 {
13558                     assert(tiObj);
13559
13560                     if (impIsValueType(tiObj))
13561                     {
13562                         impSpillEvalStack();
13563                     }
13564                     else
13565                     {
13566                         impSpillValueClasses();
13567                     }
13568                 }
13569
13570                 /* Spill any refs to the same member from the stack */
13571
13572                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13573
13574                 /* stsfld also interferes with indirect accesses (for aliased
13575                    statics) and calls. But don't need to spill other statics
13576                    as we have explicitly spilled this particular static field. */
13577
13578                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13579
13580                 if (deferStructAssign)
13581                 {
13582                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13583                 }
13584             }
13585                 goto APPEND;
13586
13587             case CEE_NEWARR:
13588             {
13589
13590                 /* Get the class type index operand */
13591
13592                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13593
13594                 JITDUMP(" %08X", resolvedToken.token);
13595
13596                 if (!opts.IsReadyToRun())
13597                 {
13598                     // Need to restore array classes before creating array objects on the heap
13599                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13600                     if (op1 == nullptr)
13601                     { // compDonotInline()
13602                         return;
13603                     }
13604                 }
13605
13606                 if (tiVerificationNeeded)
13607                 {
13608                     // As per ECMA 'numElems' specified can be either int32 or native int.
13609                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13610
13611                     CORINFO_CLASS_HANDLE elemTypeHnd;
13612                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13613                     Verify(elemTypeHnd == nullptr ||
13614                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13615                            "array of byref-like type");
13616                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13617                 }
13618
13619                 accessAllowedResult =
13620                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13621                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13622
13623                 /* Form the arglist: array class handle, size */
13624                 op2 = impPopStack().val;
13625                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13626
13627 #ifdef FEATURE_READYTORUN_COMPILER
13628                 if (opts.IsReadyToRun())
13629                 {
13630                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13631                                                     gtNewArgList(op2));
13632                     usingReadyToRunHelper = (op1 != nullptr);
13633
13634                     if (!usingReadyToRunHelper)
13635                     {
13636                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13637                         // and the newarr call with a single call to a dynamic R2R cell that will:
13638                         //      1) Load the context
13639                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13640                         //      3) Allocate the new array
13641                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13642
13643                         // Need to restore array classes before creating array objects on the heap
13644                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13645                         if (op1 == nullptr)
13646                         { // compDonotInline()
13647                             return;
13648                         }
13649                     }
13650                 }
13651
13652                 if (!usingReadyToRunHelper)
13653 #endif
13654                 {
13655                     args = gtNewArgList(op1, op2);
13656
13657                     /* Create a call to 'new' */
13658
13659                     // Note that this only works for shared generic code because the same helper is used for all
13660                     // reference array types
13661                     op1 =
13662                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13663                 }
13664
13665                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13666
13667                 /* Remember that this basic block contains 'new' of an sd array */
13668
13669                 block->bbFlags |= BBF_HAS_NEWARRAY;
13670                 optMethodFlags |= OMF_HAS_NEWARRAY;
13671
13672                 /* Push the result of the call on the stack */
13673
13674                 impPushOnStack(op1, tiRetVal);
13675
13676                 callTyp = TYP_REF;
13677             }
13678             break;
13679
13680             case CEE_LOCALLOC:
13681                 assert(!compIsForInlining());
13682
13683                 if (tiVerificationNeeded)
13684                 {
13685                     Verify(false, "bad opcode");
13686                 }
13687
13688                 // We don't allow locallocs inside handlers
13689                 if (block->hasHndIndex())
13690                 {
13691                     BADCODE("Localloc can't be inside handler");
13692                 }
13693
13694                 /* The FP register may not be back to the original value at the end
13695                    of the method, even if the frame size is 0, as localloc may
13696                    have modified it. So we will HAVE to reset it */
13697
13698                 compLocallocUsed = true;
13699                 setNeedsGSSecurityCookie();
13700
13701                 // Get the size to allocate
13702
13703                 op2 = impPopStack().val;
13704                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13705
13706                 if (verCurrentState.esStackDepth != 0)
13707                 {
13708                     BADCODE("Localloc can only be used when the stack is empty");
13709                 }
13710
13711                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13712
13713                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13714
13715                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13716
13717                 impPushOnStack(op1, tiRetVal);
13718                 break;
13719
13720             case CEE_ISINST:
13721
13722                 /* Get the type token */
13723                 assertImp(sz == sizeof(unsigned));
13724
13725                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13726
13727                 JITDUMP(" %08X", resolvedToken.token);
13728
13729                 if (!opts.IsReadyToRun())
13730                 {
13731                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13732                     if (op2 == nullptr)
13733                     { // compDonotInline()
13734                         return;
13735                     }
13736                 }
13737
13738                 if (tiVerificationNeeded)
13739                 {
13740                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13741                     // Even if this is a value class, we know it is boxed.
13742                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13743                 }
13744                 accessAllowedResult =
13745                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13746                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13747
13748                 op1 = impPopStack().val;
13749
13750 #ifdef FEATURE_READYTORUN_COMPILER
13751                 if (opts.IsReadyToRun())
13752                 {
13753                     GenTreePtr opLookup =
13754                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13755                                                   gtNewArgList(op1));
13756                     usingReadyToRunHelper = (opLookup != nullptr);
13757                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13758
13759                     if (!usingReadyToRunHelper)
13760                     {
13761                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13762                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13763                         //      1) Load the context
13764                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13765                         //      3) Perform the 'is instance' check on the input object
13766                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13767
13768                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13769                         if (op2 == nullptr)
13770                         { // compDonotInline()
13771                             return;
13772                         }
13773                     }
13774                 }
13775
13776                 if (!usingReadyToRunHelper)
13777 #endif
13778                 {
13779                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13780                 }
13781                 if (compDonotInline())
13782                 {
13783                     return;
13784                 }
13785
13786                 impPushOnStack(op1, tiRetVal);
13787
13788                 break;
13789
13790             case CEE_REFANYVAL:
13791
13792                 // get the class handle and make a ICON node out of it
13793
13794                 _impResolveToken(CORINFO_TOKENKIND_Class);
13795
13796                 JITDUMP(" %08X", resolvedToken.token);
13797
13798                 op2 = impTokenToHandle(&resolvedToken);
13799                 if (op2 == nullptr)
13800                 { // compDonotInline()
13801                     return;
13802                 }
13803
13804                 if (tiVerificationNeeded)
13805                 {
13806                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13807                            "need refany");
13808                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13809                 }
13810
13811                 op1 = impPopStack().val;
13812                 // make certain it is normalized;
13813                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13814
13815                 // Call helper GETREFANY(classHandle, op1);
13816                 args = gtNewArgList(op2, op1);
13817                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13818
13819                 impPushOnStack(op1, tiRetVal);
13820                 break;
13821
13822             case CEE_REFANYTYPE:
13823
13824                 if (tiVerificationNeeded)
13825                 {
13826                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13827                            "need refany");
13828                 }
13829
13830                 op1 = impPopStack().val;
13831
13832                 // make certain it is normalized;
13833                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13834
13835                 if (op1->gtOper == GT_OBJ)
13836                 {
13837                     // Get the address of the refany
13838                     op1 = op1->gtOp.gtOp1;
13839
13840                     // Fetch the type from the correct slot
13841                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13842                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13843                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13844                 }
13845                 else
13846                 {
13847                     assertImp(op1->gtOper == GT_MKREFANY);
13848
13849                     // The pointer may have side-effects
13850                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13851                     {
13852                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13853 #ifdef DEBUG
13854                         impNoteLastILoffs();
13855 #endif
13856                     }
13857
13858                     // We already have the class handle
13859                     op1 = op1->gtOp.gtOp2;
13860                 }
13861
13862                 // convert native TypeHandle to RuntimeTypeHandle
13863                 {
13864                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13865
13866                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13867                                               helperArgs);
13868
13869                     // The handle struct is returned in register
13870                     op1->gtCall.gtReturnType = TYP_REF;
13871
13872                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13873                 }
13874
13875                 impPushOnStack(op1, tiRetVal);
13876                 break;
13877
13878             case CEE_LDTOKEN:
13879             {
13880                 /* Get the Class index */
13881                 assertImp(sz == sizeof(unsigned));
13882                 lastLoadToken = codeAddr;
13883                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13884
13885                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13886
13887                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13888                 if (op1 == nullptr)
13889                 { // compDonotInline()
13890                     return;
13891                 }
13892
13893                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13894                 assert(resolvedToken.hClass != nullptr);
13895
13896                 if (resolvedToken.hMethod != nullptr)
13897                 {
13898                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13899                 }
13900                 else if (resolvedToken.hField != nullptr)
13901                 {
13902                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13903                 }
13904
13905                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13906
13907                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13908
13909                 // The handle struct is returned in register
13910                 op1->gtCall.gtReturnType = TYP_REF;
13911
13912                 tiRetVal = verMakeTypeInfo(tokenType);
13913                 impPushOnStack(op1, tiRetVal);
13914             }
13915             break;
13916
13917             case CEE_UNBOX:
13918             case CEE_UNBOX_ANY:
13919             {
13920                 /* Get the Class index */
13921                 assertImp(sz == sizeof(unsigned));
13922
13923                 _impResolveToken(CORINFO_TOKENKIND_Class);
13924
13925                 JITDUMP(" %08X", resolvedToken.token);
13926
13927                 BOOL runtimeLookup;
13928                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13929                 if (op2 == nullptr)
13930                 { // compDonotInline()
13931                     return;
13932                 }
13933
13934                 // Run this always so we can get access exceptions even with SkipVerification.
13935                 accessAllowedResult =
13936                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13937                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13938
13939                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13940                 {
13941                     if (tiVerificationNeeded)
13942                     {
13943                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13944                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13945                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13946                         tiRetVal.NormaliseForStack();
13947                     }
13948                     op1 = impPopStack().val;
13949                     goto CASTCLASS;
13950                 }
13951
13952                 /* Pop the object and create the unbox helper call */
13953                 /* You might think that for UNBOX_ANY we need to push a different */
13954                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13955                 /* for the intermediate pointer which we then transfer onto the OBJ */
13956                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13957                 if (tiVerificationNeeded)
13958                 {
13959                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13960                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13961
13962                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13963                     Verify(tiRetVal.IsValueClass(), "not value class");
13964                     tiRetVal.MakeByRef();
13965
13966                     // We always come from an objref, so this is safe byref
13967                     tiRetVal.SetIsPermanentHomeByRef();
13968                     tiRetVal.SetIsReadonlyByRef();
13969                 }
13970
13971                 op1 = impPopStack().val;
13972                 assertImp(op1->gtType == TYP_REF);
13973
13974                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13975                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13976
13977                 // We only want to expand inline the normal UNBOX helper;
13978                 expandInline = (helper == CORINFO_HELP_UNBOX);
13979
13980                 if (expandInline)
13981                 {
13982                     if (compCurBB->isRunRarely())
13983                     {
13984                         expandInline = false; // not worth the code expansion
13985                     }
13986                 }
13987
13988                 if (expandInline)
13989                 {
13990                     // we are doing normal unboxing
13991                     // inline the common case of the unbox helper
13992                     // UNBOX(exp) morphs into
13993                     // clone = pop(exp);
13994                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13995                     // push(clone + sizeof(void*))
13996                     //
13997                     GenTreePtr cloneOperand;
13998                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13999                                        nullptr DEBUGARG("inline UNBOX clone1"));
14000                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14001
14002                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14003
14004                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14005                                        nullptr DEBUGARG("inline UNBOX clone2"));
14006                     op2 = impTokenToHandle(&resolvedToken);
14007                     if (op2 == nullptr)
14008                     { // compDonotInline()
14009                         return;
14010                     }
14011                     args = gtNewArgList(op2, op1);
14012                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14013
14014                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14015                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14016                     condBox->gtFlags |= GTF_RELOP_QMARK;
14017
14018                     // QMARK nodes cannot reside on the evaluation stack. Because there
14019                     // may be other trees on the evaluation stack that side-effect the
14020                     // sources of the UNBOX operation we must spill the stack.
14021
14022                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14023
14024                     // Create the address-expression to reference past the object header
14025                     // to the beginning of the value-type. Today this means adjusting
14026                     // past the base of the objects vtable field which is pointer sized.
14027
14028                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14029                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14030                 }
14031                 else
14032                 {
14033                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14034
14035                     // Don't optimize, just call the helper and be done with it
14036                     args = gtNewArgList(op2, op1);
14037                     op1  = gtNewHelperCallNode(helper,
14038                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14039                                               callFlags, args);
14040                 }
14041
14042                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14043                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14044                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14045                        );
14046
14047                 /*
14048                   ----------------------------------------------------------------------
14049                   | \ helper  |                         |                              |
14050                   |   \       |                         |                              |
14051                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14052                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14053                   | opcode  \ |                         |                              |
14054                   |---------------------------------------------------------------------
14055                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14056                   |           |                         | push the BYREF to this local |
14057                   |---------------------------------------------------------------------
14058                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14059                   |           | the BYREF               | For Linux when the           |
14060                   |           |                         |  struct is returned in two   |
14061                   |           |                         |  registers create a temp     |
14062                   |           |                         |  which address is passed to  |
14063                   |           |                         |  the unbox_nullable helper.  |
14064                   |---------------------------------------------------------------------
14065                 */
14066
14067                 if (opcode == CEE_UNBOX)
14068                 {
14069                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14070                     {
14071                         // Unbox nullable helper returns a struct type.
14072                         // We need to spill it to a temp so than can take the address of it.
14073                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14074                         // further along and potetially be exploitable.
14075
14076                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14077                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14078
14079                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14080                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14081                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14082
14083                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14084                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14085                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14086                     }
14087
14088                     assert(op1->gtType == TYP_BYREF);
14089                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14090                 }
14091                 else
14092                 {
14093                     assert(opcode == CEE_UNBOX_ANY);
14094
14095                     if (helper == CORINFO_HELP_UNBOX)
14096                     {
14097                         // Normal unbox helper returns a TYP_BYREF.
14098                         impPushOnStack(op1, tiRetVal);
14099                         oper = GT_OBJ;
14100                         goto OBJ;
14101                     }
14102
14103                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14104
14105 #if FEATURE_MULTIREG_RET
14106
14107                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14108                     {
14109                         // Unbox nullable helper returns a TYP_STRUCT.
14110                         // For the multi-reg case we need to spill it to a temp so that
14111                         // we can pass the address to the unbox_nullable jit helper.
14112
14113                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14114                         lvaTable[tmp].lvIsMultiRegArg = true;
14115                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14116
14117                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14118                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14119                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14120
14121                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14122                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14123                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14124
14125                         // In this case the return value of the unbox helper is TYP_BYREF.
14126                         // Make sure the right type is placed on the operand type stack.
14127                         impPushOnStack(op1, tiRetVal);
14128
14129                         // Load the struct.
14130                         oper = GT_OBJ;
14131
14132                         assert(op1->gtType == TYP_BYREF);
14133                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14134
14135                         goto OBJ;
14136                     }
14137                     else
14138
14139 #endif // !FEATURE_MULTIREG_RET
14140
14141                     {
14142                         // If non register passable struct we have it materialized in the RetBuf.
14143                         assert(op1->gtType == TYP_STRUCT);
14144                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14145                         assert(tiRetVal.IsValueClass());
14146                     }
14147                 }
14148
14149                 impPushOnStack(op1, tiRetVal);
14150             }
14151             break;
14152
14153             case CEE_BOX:
14154             {
14155                 /* Get the Class index */
14156                 assertImp(sz == sizeof(unsigned));
14157
14158                 _impResolveToken(CORINFO_TOKENKIND_Box);
14159
14160                 JITDUMP(" %08X", resolvedToken.token);
14161
14162                 if (tiVerificationNeeded)
14163                 {
14164                     typeInfo tiActual = impStackTop().seTypeInfo;
14165                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14166
14167                     Verify(verIsBoxable(tiBox), "boxable type expected");
14168
14169                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14170                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14171                            "boxed type has unsatisfied class constraints");
14172
14173                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14174
14175                     // Observation: the following code introduces a boxed value class on the stack, but,
14176                     // according to the ECMA spec, one would simply expect: tiRetVal =
14177                     // typeInfo(TI_REF,impGetObjectClass());
14178
14179                     // Push the result back on the stack,
14180                     // even if clsHnd is a value class we want the TI_REF
14181                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14182                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14183                 }
14184
14185                 accessAllowedResult =
14186                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14187                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14188
14189                 // Note BOX can be used on things that are not value classes, in which
14190                 // case we get a NOP.  However the verifier's view of the type on the
14191                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14192                 if (!eeIsValueClass(resolvedToken.hClass))
14193                 {
14194                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14195                     break;
14196                 }
14197
14198                 // Look ahead for unbox.any
14199                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14200                 {
14201                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14202                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14203                     {
14204                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14205
14206                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14207
14208                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14209                         {
14210                             // Skip the next unbox.any instruction
14211                             sz += sizeof(mdToken) + 1;
14212                             break;
14213                         }
14214                     }
14215                 }
14216
14217                 impImportAndPushBox(&resolvedToken);
14218                 if (compDonotInline())
14219                 {
14220                     return;
14221                 }
14222             }
14223             break;
14224
14225             case CEE_SIZEOF:
14226
14227                 /* Get the Class index */
14228                 assertImp(sz == sizeof(unsigned));
14229
14230                 _impResolveToken(CORINFO_TOKENKIND_Class);
14231
14232                 JITDUMP(" %08X", resolvedToken.token);
14233
14234                 if (tiVerificationNeeded)
14235                 {
14236                     tiRetVal = typeInfo(TI_INT);
14237                 }
14238
14239                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14240                 impPushOnStack(op1, tiRetVal);
14241                 break;
14242
14243             case CEE_CASTCLASS:
14244
14245                 /* Get the Class index */
14246
14247                 assertImp(sz == sizeof(unsigned));
14248
14249                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14250
14251                 JITDUMP(" %08X", resolvedToken.token);
14252
14253                 if (!opts.IsReadyToRun())
14254                 {
14255                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14256                     if (op2 == nullptr)
14257                     { // compDonotInline()
14258                         return;
14259                     }
14260                 }
14261
14262                 if (tiVerificationNeeded)
14263                 {
14264                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14265                     // box it
14266                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14267                 }
14268
14269                 accessAllowedResult =
14270                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14271                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14272
14273                 op1 = impPopStack().val;
14274
14275             /* Pop the address and create the 'checked cast' helper call */
14276
14277             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14278             // and op2 to contain code that creates the type handle corresponding to typeRef
14279             CASTCLASS:
14280
14281 #ifdef FEATURE_READYTORUN_COMPILER
14282                 if (opts.IsReadyToRun())
14283                 {
14284                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14285                                                                     TYP_REF, gtNewArgList(op1));
14286                     usingReadyToRunHelper = (opLookup != nullptr);
14287                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14288
14289                     if (!usingReadyToRunHelper)
14290                     {
14291                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14292                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14293                         //      1) Load the context
14294                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14295                         //      3) Check the object on the stack for the type-cast
14296                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14297
14298                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14299                         if (op2 == nullptr)
14300                         { // compDonotInline()
14301                             return;
14302                         }
14303                     }
14304                 }
14305
14306                 if (!usingReadyToRunHelper)
14307 #endif
14308                 {
14309                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14310                 }
14311                 if (compDonotInline())
14312                 {
14313                     return;
14314                 }
14315
14316                 /* Push the result back on the stack */
14317                 impPushOnStack(op1, tiRetVal);
14318                 break;
14319
14320             case CEE_THROW:
14321
14322                 if (compIsForInlining())
14323                 {
14324                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14325                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14326                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14327
14328                     /* Do we have just the exception on the stack ?*/
14329
14330                     if (verCurrentState.esStackDepth != 1)
14331                     {
14332                         /* if not, just don't inline the method */
14333
14334                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14335                         return;
14336                     }
14337                 }
14338
14339                 if (tiVerificationNeeded)
14340                 {
14341                     tiRetVal = impStackTop().seTypeInfo;
14342                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14343                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14344                     {
14345                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14346                     }
14347                 }
14348
14349                 block->bbSetRunRarely(); // any block with a throw is rare
14350                 /* Pop the exception object and create the 'throw' helper call */
14351
14352                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14353
14354             EVAL_APPEND:
14355                 if (verCurrentState.esStackDepth > 0)
14356                 {
14357                     impEvalSideEffects();
14358                 }
14359
14360                 assert(verCurrentState.esStackDepth == 0);
14361
14362                 goto APPEND;
14363
14364             case CEE_RETHROW:
14365
14366                 assert(!compIsForInlining());
14367
14368                 if (info.compXcptnsCount == 0)
14369                 {
14370                     BADCODE("rethrow outside catch");
14371                 }
14372
14373                 if (tiVerificationNeeded)
14374                 {
14375                     Verify(block->hasHndIndex(), "rethrow outside catch");
14376                     if (block->hasHndIndex())
14377                     {
14378                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14379                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14380                         if (HBtab->HasFilter())
14381                         {
14382                             // we better be in the handler clause part, not the filter part
14383                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14384                                    "rethrow in filter");
14385                         }
14386                     }
14387                 }
14388
14389                 /* Create the 'rethrow' helper call */
14390
14391                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14392
14393                 goto EVAL_APPEND;
14394
14395             case CEE_INITOBJ:
14396
14397                 assertImp(sz == sizeof(unsigned));
14398
14399                 _impResolveToken(CORINFO_TOKENKIND_Class);
14400
14401                 JITDUMP(" %08X", resolvedToken.token);
14402
14403                 if (tiVerificationNeeded)
14404                 {
14405                     typeInfo tiTo    = impStackTop().seTypeInfo;
14406                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14407
14408                     Verify(tiTo.IsByRef(), "byref expected");
14409                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14410
14411                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14412                            "type operand incompatible with type of address");
14413                 }
14414
14415                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14416                 op2  = gtNewIconNode(0);                                     // Value
14417                 op1  = impPopStack().val;                                    // Dest
14418                 op1  = gtNewBlockVal(op1, size);
14419                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14420                 goto SPILL_APPEND;
14421
14422             case CEE_INITBLK:
14423
14424                 if (tiVerificationNeeded)
14425                 {
14426                     Verify(false, "bad opcode");
14427                 }
14428
14429                 op3 = impPopStack().val; // Size
14430                 op2 = impPopStack().val; // Value
14431                 op1 = impPopStack().val; // Dest
14432
14433                 if (op3->IsCnsIntOrI())
14434                 {
14435                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14436                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14437                 }
14438                 else
14439                 {
14440                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14441                     size = 0;
14442                 }
14443                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14444
14445                 goto SPILL_APPEND;
14446
14447             case CEE_CPBLK:
14448
14449                 if (tiVerificationNeeded)
14450                 {
14451                     Verify(false, "bad opcode");
14452                 }
14453                 op3 = impPopStack().val; // Size
14454                 op2 = impPopStack().val; // Src
14455                 op1 = impPopStack().val; // Dest
14456
14457                 if (op3->IsCnsIntOrI())
14458                 {
14459                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14460                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14461                 }
14462                 else
14463                 {
14464                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14465                     size = 0;
14466                 }
14467                 if (op2->OperGet() == GT_ADDR)
14468                 {
14469                     op2 = op2->gtOp.gtOp1;
14470                 }
14471                 else
14472                 {
14473                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14474                 }
14475
14476                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14477                 goto SPILL_APPEND;
14478
14479             case CEE_CPOBJ:
14480
14481                 assertImp(sz == sizeof(unsigned));
14482
14483                 _impResolveToken(CORINFO_TOKENKIND_Class);
14484
14485                 JITDUMP(" %08X", resolvedToken.token);
14486
14487                 if (tiVerificationNeeded)
14488                 {
14489                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14490                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14491                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14492
14493                     Verify(tiFrom.IsByRef(), "expected byref source");
14494                     Verify(tiTo.IsByRef(), "expected byref destination");
14495
14496                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14497                            "type of source address incompatible with type operand");
14498                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14499                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14500                            "type operand incompatible with type of destination address");
14501                 }
14502
14503                 if (!eeIsValueClass(resolvedToken.hClass))
14504                 {
14505                     op1 = impPopStack().val; // address to load from
14506
14507                     impBashVarAddrsToI(op1);
14508
14509                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14510
14511                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14512                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14513
14514                     impPushOnStackNoType(op1);
14515                     opcode = CEE_STIND_REF;
14516                     lclTyp = TYP_REF;
14517                     goto STIND_POST_VERIFY;
14518                 }
14519
14520                 op2 = impPopStack().val; // Src
14521                 op1 = impPopStack().val; // Dest
14522                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14523                 goto SPILL_APPEND;
14524
14525             case CEE_STOBJ:
14526             {
14527                 assertImp(sz == sizeof(unsigned));
14528
14529                 _impResolveToken(CORINFO_TOKENKIND_Class);
14530
14531                 JITDUMP(" %08X", resolvedToken.token);
14532
14533                 if (eeIsValueClass(resolvedToken.hClass))
14534                 {
14535                     lclTyp = TYP_STRUCT;
14536                 }
14537                 else
14538                 {
14539                     lclTyp = TYP_REF;
14540                 }
14541
14542                 if (tiVerificationNeeded)
14543                 {
14544
14545                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14546
14547                     // Make sure we have a good looking byref
14548                     Verify(tiPtr.IsByRef(), "pointer not byref");
14549                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14550                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14551                     {
14552                         compUnsafeCastUsed = true;
14553                     }
14554
14555                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14556                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14557
14558                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14559                     {
14560                         Verify(false, "type of value incompatible with type operand");
14561                         compUnsafeCastUsed = true;
14562                     }
14563
14564                     if (!tiCompatibleWith(argVal, ptrVal, false))
14565                     {
14566                         Verify(false, "type operand incompatible with type of address");
14567                         compUnsafeCastUsed = true;
14568                     }
14569                 }
14570                 else
14571                 {
14572                     compUnsafeCastUsed = true;
14573                 }
14574
14575                 if (lclTyp == TYP_REF)
14576                 {
14577                     opcode = CEE_STIND_REF;
14578                     goto STIND_POST_VERIFY;
14579                 }
14580
14581                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14582                 if (impIsPrimitive(jitTyp))
14583                 {
14584                     lclTyp = JITtype2varType(jitTyp);
14585                     goto STIND_POST_VERIFY;
14586                 }
14587
14588                 op2 = impPopStack().val; // Value
14589                 op1 = impPopStack().val; // Ptr
14590
14591                 assertImp(varTypeIsStruct(op2));
14592
14593                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14594                 goto SPILL_APPEND;
14595             }
14596
14597             case CEE_MKREFANY:
14598
14599                 assert(!compIsForInlining());
14600
14601                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14602                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14603
14604                 JITDUMP("disabling struct promotion because of mkrefany\n");
14605                 fgNoStructPromotion = true;
14606
14607                 oper = GT_MKREFANY;
14608                 assertImp(sz == sizeof(unsigned));
14609
14610                 _impResolveToken(CORINFO_TOKENKIND_Class);
14611
14612                 JITDUMP(" %08X", resolvedToken.token);
14613
14614                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14615                 if (op2 == nullptr)
14616                 { // compDonotInline()
14617                     return;
14618                 }
14619
14620                 if (tiVerificationNeeded)
14621                 {
14622                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14623                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14624
14625                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14626                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14627                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14628                 }
14629
14630                 accessAllowedResult =
14631                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14632                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14633
14634                 op1 = impPopStack().val;
14635
14636                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14637                 // But JIT32 allowed it, so we continue to allow it.
14638                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14639
14640                 // MKREFANY returns a struct.  op2 is the class token.
14641                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14642
14643                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14644                 break;
14645
14646             case CEE_LDOBJ:
14647             {
14648                 oper = GT_OBJ;
14649                 assertImp(sz == sizeof(unsigned));
14650
14651                 _impResolveToken(CORINFO_TOKENKIND_Class);
14652
14653                 JITDUMP(" %08X", resolvedToken.token);
14654
14655             OBJ:
14656
14657                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14658
14659                 if (tiVerificationNeeded)
14660                 {
14661                     typeInfo tiPtr = impStackTop().seTypeInfo;
14662
14663                     // Make sure we have a byref
14664                     if (!tiPtr.IsByRef())
14665                     {
14666                         Verify(false, "pointer not byref");
14667                         compUnsafeCastUsed = true;
14668                     }
14669                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14670
14671                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14672                     {
14673                         Verify(false, "type of address incompatible with type operand");
14674                         compUnsafeCastUsed = true;
14675                     }
14676                     tiRetVal.NormaliseForStack();
14677                 }
14678                 else
14679                 {
14680                     compUnsafeCastUsed = true;
14681                 }
14682
14683                 if (eeIsValueClass(resolvedToken.hClass))
14684                 {
14685                     lclTyp = TYP_STRUCT;
14686                 }
14687                 else
14688                 {
14689                     lclTyp = TYP_REF;
14690                     opcode = CEE_LDIND_REF;
14691                     goto LDIND_POST_VERIFY;
14692                 }
14693
14694                 op1 = impPopStack().val;
14695
14696                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14697
14698                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14699                 if (impIsPrimitive(jitTyp))
14700                 {
14701                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14702
14703                     // Could point anywhere, example a boxed class static int
14704                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14705                     assertImp(varTypeIsArithmetic(op1->gtType));
14706                 }
14707                 else
14708                 {
14709                     // OBJ returns a struct
14710                     // and an inline argument which is the class token of the loaded obj
14711                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14712                 }
14713                 op1->gtFlags |= GTF_EXCEPT;
14714
14715                 impPushOnStack(op1, tiRetVal);
14716                 break;
14717             }
14718
14719             case CEE_LDLEN:
14720                 if (tiVerificationNeeded)
14721                 {
14722                     typeInfo tiArray = impStackTop().seTypeInfo;
14723                     Verify(verIsSDArray(tiArray), "bad array");
14724                     tiRetVal = typeInfo(TI_INT);
14725                 }
14726
14727                 op1 = impPopStack().val;
14728                 if (!opts.MinOpts() && !opts.compDbgCode)
14729                 {
14730                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14731                     GenTreeArrLen* arrLen =
14732                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14733
14734                     /* Mark the block as containing a length expression */
14735
14736                     if (op1->gtOper == GT_LCL_VAR)
14737                     {
14738                         block->bbFlags |= BBF_HAS_IDX_LEN;
14739                     }
14740
14741                     op1 = arrLen;
14742                 }
14743                 else
14744                 {
14745                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14746                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14747                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14748                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14749                     op1->gtFlags |= GTF_IND_ARR_LEN;
14750                 }
14751
14752                 /* An indirection will cause a GPF if the address is null */
14753                 op1->gtFlags |= GTF_EXCEPT;
14754
14755                 /* Push the result back on the stack */
14756                 impPushOnStack(op1, tiRetVal);
14757                 break;
14758
14759             case CEE_BREAK:
14760                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14761                 goto SPILL_APPEND;
14762
14763             case CEE_NOP:
14764                 if (opts.compDbgCode)
14765                 {
14766                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14767                     goto SPILL_APPEND;
14768                 }
14769                 break;
14770
14771             /******************************** NYI *******************************/
14772
14773             case 0xCC:
14774                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14775
14776             case CEE_ILLEGAL:
14777             case CEE_MACRO_END:
14778
14779             default:
14780                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14781         }
14782
14783         codeAddr += sz;
14784         prevOpcode = opcode;
14785
14786         prefixFlags = 0;
14787         assert(!insertLdloc || opcode == CEE_DUP);
14788     }
14789
14790     assert(!insertLdloc);
14791
14792     return;
14793 #undef _impResolveToken
14794 }
14795 #ifdef _PREFAST_
14796 #pragma warning(pop)
14797 #endif
14798
14799 // Push a local/argument treeon the operand stack
14800 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14801 {
14802     tiRetVal.NormaliseForStack();
14803
14804     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14805     {
14806         tiRetVal.SetUninitialisedObjRef();
14807     }
14808
14809     impPushOnStack(op, tiRetVal);
14810 }
14811
14812 // Load a local/argument on the operand stack
14813 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14814 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14815 {
14816     var_types lclTyp;
14817
14818     if (lvaTable[lclNum].lvNormalizeOnLoad())
14819     {
14820         lclTyp = lvaGetRealType(lclNum);
14821     }
14822     else
14823     {
14824         lclTyp = lvaGetActualType(lclNum);
14825     }
14826
14827     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14828 }
14829
14830 // Load an argument on the operand stack
14831 // Shared by the various CEE_LDARG opcodes
14832 // ilArgNum is the argument index as specified in IL.
14833 // It will be mapped to the correct lvaTable index
14834 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14835 {
14836     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14837
14838     if (compIsForInlining())
14839     {
14840         if (ilArgNum >= info.compArgsCount)
14841         {
14842             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14843             return;
14844         }
14845
14846         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14847                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14848     }
14849     else
14850     {
14851         if (ilArgNum >= info.compArgsCount)
14852         {
14853             BADCODE("Bad IL");
14854         }
14855
14856         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14857
14858         if (lclNum == info.compThisArg)
14859         {
14860             lclNum = lvaArg0Var;
14861         }
14862
14863         impLoadVar(lclNum, offset);
14864     }
14865 }
14866
14867 // Load a local on the operand stack
14868 // Shared by the various CEE_LDLOC opcodes
14869 // ilLclNum is the local index as specified in IL.
14870 // It will be mapped to the correct lvaTable index
14871 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14872 {
14873     if (tiVerificationNeeded)
14874     {
14875         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14876         Verify(info.compInitMem, "initLocals not set");
14877     }
14878
14879     if (compIsForInlining())
14880     {
14881         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14882         {
14883             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14884             return;
14885         }
14886
14887         // Get the local type
14888         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14889
14890         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14891
14892         /* Have we allocated a temp for this local? */
14893
14894         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14895
14896         // All vars of inlined methods should be !lvNormalizeOnLoad()
14897
14898         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14899         lclTyp = genActualType(lclTyp);
14900
14901         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14902     }
14903     else
14904     {
14905         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14906         {
14907             BADCODE("Bad IL");
14908         }
14909
14910         unsigned lclNum = info.compArgsCount + ilLclNum;
14911
14912         impLoadVar(lclNum, offset);
14913     }
14914 }
14915
14916 #ifdef _TARGET_ARM_
14917 /**************************************************************************************
14918  *
14919  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14920  *  dst struct, because struct promotion will turn it into a float/double variable while
14921  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14922  *  a float, but there is nothing that might prevent us from doing so. The tree however
14923  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14924  *
14925  *  tmpNum - the lcl dst variable num that is a struct.
14926  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14927  *  hClass - the type handle for the struct variable.
14928  *
14929  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14930  *        however, we could do a codegen of transferring from int to float registers
14931  *        (transfer, not a cast.)
14932  *
14933  */
14934 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14935 {
14936     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14937     {
14938         int       hfaSlots = GetHfaCount(hClass);
14939         var_types hfaType  = GetHfaType(hClass);
14940
14941         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14942         // type: struct/float at importer because the ABI calls out return in integer registers.
14943         // We don't want struct promotion to replace an expression like this:
14944         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14945         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14946         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14947             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14948         {
14949             // Make sure this struct type stays as struct so we can receive the call in a struct.
14950             lvaTable[tmpNum].lvIsMultiRegRet = true;
14951         }
14952     }
14953 }
14954 #endif // _TARGET_ARM_
14955
14956 #if FEATURE_MULTIREG_RET
14957 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14958 {
14959     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14960     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14961     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14962
14963     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14964     ret->gtFlags |= GTF_DONT_CSE;
14965
14966     assert(IsMultiRegReturnedType(hClass));
14967
14968     // Mark the var so that fields are not promoted and stay together.
14969     lvaTable[tmpNum].lvIsMultiRegRet = true;
14970
14971     return ret;
14972 }
14973 #endif // FEATURE_MULTIREG_RET
14974
14975 // do import for a return
14976 // returns false if inlining was aborted
14977 // opcode can be ret or call in the case of a tail.call
14978 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14979 {
14980     if (tiVerificationNeeded)
14981     {
14982         verVerifyThisPtrInitialised();
14983
14984         unsigned expectedStack = 0;
14985         if (info.compRetType != TYP_VOID)
14986         {
14987             typeInfo tiVal = impStackTop().seTypeInfo;
14988             typeInfo tiDeclared =
14989                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14990
14991             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14992
14993             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14994             expectedStack = 1;
14995         }
14996         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14997     }
14998
14999     GenTree*             op2       = nullptr;
15000     GenTree*             op1       = nullptr;
15001     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15002
15003     if (info.compRetType != TYP_VOID)
15004     {
15005         StackEntry se = impPopStack(retClsHnd);
15006         op2           = se.val;
15007
15008         if (!compIsForInlining())
15009         {
15010             impBashVarAddrsToI(op2);
15011             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15012             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15013             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15014                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15015                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15016                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15017                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15018
15019 #ifdef DEBUG
15020             if (opts.compGcChecks && info.compRetType == TYP_REF)
15021             {
15022                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15023                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15024                 // one-return BB.
15025
15026                 assert(op2->gtType == TYP_REF);
15027
15028                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15029                 GenTreeArgList* args = gtNewArgList(op2);
15030                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15031
15032                 if (verbose)
15033                 {
15034                     printf("\ncompGcChecks tree:\n");
15035                     gtDispTree(op2);
15036                 }
15037             }
15038 #endif
15039         }
15040         else
15041         {
15042             // inlinee's stack should be empty now.
15043             assert(verCurrentState.esStackDepth == 0);
15044
15045 #ifdef DEBUG
15046             if (verbose)
15047             {
15048                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15049                 gtDispTree(op2);
15050             }
15051 #endif
15052
15053             // Make sure the type matches the original call.
15054
15055             var_types returnType       = genActualType(op2->gtType);
15056             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15057             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15058             {
15059                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15060             }
15061
15062             if (returnType != originalCallType)
15063             {
15064                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15065                 return false;
15066             }
15067
15068             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15069             // expression. At this point, retExpr could already be set if there are multiple
15070             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15071             // the other blocks already set it. If there is only a single return block,
15072             // retExpr shouldn't be set. However, this is not true if we reimport a block
15073             // with a return. In that case, retExpr will be set, then the block will be
15074             // reimported, but retExpr won't get cleared as part of setting the block to
15075             // be reimported. The reimported retExpr value should be the same, so even if
15076             // we don't unconditionally overwrite it, it shouldn't matter.
15077             if (info.compRetNativeType != TYP_STRUCT)
15078             {
15079                 // compRetNativeType is not TYP_STRUCT.
15080                 // This implies it could be either a scalar type or SIMD vector type or
15081                 // a struct type that can be normalized to a scalar type.
15082
15083                 if (varTypeIsStruct(info.compRetType))
15084                 {
15085                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15086                     // adjust the type away from struct to integral
15087                     // and no normalizing
15088                     op2 = impFixupStructReturnType(op2, retClsHnd);
15089                 }
15090                 else
15091                 {
15092                     // Do we have to normalize?
15093                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15094                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15095                         fgCastNeeded(op2, fncRealRetType))
15096                     {
15097                         // Small-typed return values are normalized by the callee
15098                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15099                     }
15100                 }
15101
15102                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15103                 {
15104                     assert(info.compRetNativeType != TYP_VOID &&
15105                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15106
15107                     // This is a bit of a workaround...
15108                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15109                     // not a struct (for example, the struct is composed of exactly one int, and the native
15110                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15111                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15112                     // to the *native* return type), and at least one of the return blocks is the result of
15113                     // a call, then we have a problem. The situation is like this (from a failed test case):
15114                     //
15115                     // inliner:
15116                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15117                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15118                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15119                     //
15120                     // inlinee:
15121                     //      ...
15122                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15123                     //      ret
15124                     //      ...
15125                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15126                     //      object&, class System.Func`1<!!0>)
15127                     //      ret
15128                     //
15129                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15130                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15131                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15132                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15133                     //
15134                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15135                     // native return type, which is what it will be set to eventually. We generate the
15136                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15137                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15138
15139                     bool restoreType = false;
15140                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15141                     {
15142                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15143                         op2->gtType = info.compRetNativeType;
15144                         restoreType = true;
15145                     }
15146
15147                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15148                                      (unsigned)CHECK_SPILL_ALL);
15149
15150                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15151
15152                     if (restoreType)
15153                     {
15154                         op2->gtType = TYP_STRUCT; // restore it to what it was
15155                     }
15156
15157                     op2 = tmpOp2;
15158
15159 #ifdef DEBUG
15160                     if (impInlineInfo->retExpr)
15161                     {
15162                         // Some other block(s) have seen the CEE_RET first.
15163                         // Better they spilled to the same temp.
15164                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15165                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15166                     }
15167 #endif
15168                 }
15169
15170 #ifdef DEBUG
15171                 if (verbose)
15172                 {
15173                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15174                     gtDispTree(op2);
15175                 }
15176 #endif
15177
15178                 // Report the return expression
15179                 impInlineInfo->retExpr = op2;
15180             }
15181             else
15182             {
15183                 // compRetNativeType is TYP_STRUCT.
15184                 // This implies that struct return via RetBuf arg or multi-reg struct return
15185
15186                 GenTreePtr iciCall = impInlineInfo->iciCall;
15187                 assert(iciCall->gtOper == GT_CALL);
15188
15189                 // Assign the inlinee return into a spill temp.
15190                 // spill temp only exists if there are multiple return points
15191                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15192                 {
15193                     // in this case we have to insert multiple struct copies to the temp
15194                     // and the retexpr is just the temp.
15195                     assert(info.compRetNativeType != TYP_VOID);
15196                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15197
15198                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15199                                      (unsigned)CHECK_SPILL_ALL);
15200                 }
15201
15202 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15203 #if defined(_TARGET_ARM_)
15204                 // TODO-ARM64-NYI: HFA
15205                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15206                 // next ifdefs could be refactored in a single method with the ifdef inside.
15207                 if (IsHfa(retClsHnd))
15208                 {
15209 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15210 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15211                 ReturnTypeDesc retTypeDesc;
15212                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15213                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15214
15215                 if (retRegCount != 0)
15216                 {
15217                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15218                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15219                     // max allowed.)
15220                     assert(retRegCount == MAX_RET_REG_COUNT);
15221                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15222                     CLANG_FORMAT_COMMENT_ANCHOR;
15223 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15224
15225                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15226                     {
15227                         if (!impInlineInfo->retExpr)
15228                         {
15229 #if defined(_TARGET_ARM_)
15230                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15231 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15232                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15233                             impInlineInfo->retExpr =
15234                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15235 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15236                         }
15237                     }
15238                     else
15239                     {
15240                         impInlineInfo->retExpr = op2;
15241                     }
15242                 }
15243                 else
15244 #elif defined(_TARGET_ARM64_)
15245                 ReturnTypeDesc retTypeDesc;
15246                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15247                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15248
15249                 if (retRegCount != 0)
15250                 {
15251                     assert(!iciCall->AsCall()->HasRetBufArg());
15252                     assert(retRegCount >= 2);
15253                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15254                     {
15255                         if (!impInlineInfo->retExpr)
15256                         {
15257                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15258                             impInlineInfo->retExpr =
15259                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15260                         }
15261                     }
15262                     else
15263                     {
15264                         impInlineInfo->retExpr = op2;
15265                     }
15266                 }
15267                 else
15268 #endif // defined(_TARGET_ARM64_)
15269                 {
15270                     assert(iciCall->AsCall()->HasRetBufArg());
15271                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15272                     // spill temp only exists if there are multiple return points
15273                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15274                     {
15275                         // if this is the first return we have seen set the retExpr
15276                         if (!impInlineInfo->retExpr)
15277                         {
15278                             impInlineInfo->retExpr =
15279                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15280                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15281                         }
15282                     }
15283                     else
15284                     {
15285                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15286                     }
15287                 }
15288             }
15289         }
15290     }
15291
15292     if (compIsForInlining())
15293     {
15294         return true;
15295     }
15296
15297     if (info.compRetType == TYP_VOID)
15298     {
15299         // return void
15300         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15301     }
15302     else if (info.compRetBuffArg != BAD_VAR_NUM)
15303     {
15304         // Assign value to return buff (first param)
15305         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15306
15307         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15308         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15309
15310         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15311         CLANG_FORMAT_COMMENT_ANCHOR;
15312
15313 #if defined(_TARGET_AMD64_)
15314
15315         // x64 (System V and Win64) calling convention requires to
15316         // return the implicit return buffer explicitly (in RAX).
15317         // Change the return type to be BYREF.
15318         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15319 #else  // !defined(_TARGET_AMD64_)
15320         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15321         // In such case the return value of the function is changed to BYREF.
15322         // If profiler hook is not needed the return type of the function is TYP_VOID.
15323         if (compIsProfilerHookNeeded())
15324         {
15325             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15326         }
15327         else
15328         {
15329             // return void
15330             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15331         }
15332 #endif // !defined(_TARGET_AMD64_)
15333     }
15334     else if (varTypeIsStruct(info.compRetType))
15335     {
15336 #if !FEATURE_MULTIREG_RET
15337         // For both ARM architectures the HFA native types are maintained as structs.
15338         // Also on System V AMD64 the multireg structs returns are also left as structs.
15339         noway_assert(info.compRetNativeType != TYP_STRUCT);
15340 #endif
15341         op2 = impFixupStructReturnType(op2, retClsHnd);
15342         // return op2
15343         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15344     }
15345     else
15346     {
15347         // return op2
15348         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15349     }
15350
15351     // We must have imported a tailcall and jumped to RET
15352     if (prefixFlags & PREFIX_TAILCALL)
15353     {
15354 #ifndef _TARGET_AMD64_
15355         // Jit64 compat:
15356         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15357         //      tail.call
15358         //      pop
15359         //      ret
15360         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15361 #endif
15362
15363         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15364
15365         // impImportCall() would have already appended TYP_VOID calls
15366         if (info.compRetType == TYP_VOID)
15367         {
15368             return true;
15369         }
15370     }
15371
15372     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15373 #ifdef DEBUG
15374     // Remember at which BC offset the tree was finished
15375     impNoteLastILoffs();
15376 #endif
15377     return true;
15378 }
15379
15380 /*****************************************************************************
15381  *  Mark the block as unimported.
15382  *  Note that the caller is responsible for calling impImportBlockPending(),
15383  *  with the appropriate stack-state
15384  */
15385
15386 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15387 {
15388 #ifdef DEBUG
15389     if (verbose && (block->bbFlags & BBF_IMPORTED))
15390     {
15391         printf("\nBB%02u will be reimported\n", block->bbNum);
15392     }
15393 #endif
15394
15395     block->bbFlags &= ~BBF_IMPORTED;
15396 }
15397
15398 /*****************************************************************************
15399  *  Mark the successors of the given block as unimported.
15400  *  Note that the caller is responsible for calling impImportBlockPending()
15401  *  for all the successors, with the appropriate stack-state.
15402  */
15403
15404 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15405 {
15406     for (unsigned i = 0; i < block->NumSucc(); i++)
15407     {
15408         impReimportMarkBlock(block->GetSucc(i));
15409     }
15410 }
15411
15412 /*****************************************************************************
15413  *
15414  *  Filter wrapper to handle only passed in exception code
15415  *  from it).
15416  */
15417
15418 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15419 {
15420     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15421     {
15422         return EXCEPTION_EXECUTE_HANDLER;
15423     }
15424
15425     return EXCEPTION_CONTINUE_SEARCH;
15426 }
15427
15428 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15429 {
15430     assert(block->hasTryIndex());
15431     assert(!compIsForInlining());
15432
15433     unsigned  tryIndex = block->getTryIndex();
15434     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15435
15436     if (isTryStart)
15437     {
15438         assert(block->bbFlags & BBF_TRY_BEG);
15439
15440         // The Stack must be empty
15441         //
15442         if (block->bbStkDepth != 0)
15443         {
15444             BADCODE("Evaluation stack must be empty on entry into a try block");
15445         }
15446     }
15447
15448     // Save the stack contents, we'll need to restore it later
15449     //
15450     SavedStack blockState;
15451     impSaveStackState(&blockState, false);
15452
15453     while (HBtab != nullptr)
15454     {
15455         if (isTryStart)
15456         {
15457             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15458             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15459             //
15460             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15461             {
15462                 // We  trigger an invalid program exception here unless we have a try/fault region.
15463                 //
15464                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15465                 {
15466                     BADCODE(
15467                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15468                 }
15469                 else
15470                 {
15471                     // Allow a try/fault region to proceed.
15472                     assert(HBtab->HasFaultHandler());
15473                 }
15474             }
15475
15476             /* Recursively process the handler block */
15477             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15478
15479             //  Construct the proper verification stack state
15480             //   either empty or one that contains just
15481             //   the Exception Object that we are dealing with
15482             //
15483             verCurrentState.esStackDepth = 0;
15484
15485             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15486             {
15487                 CORINFO_CLASS_HANDLE clsHnd;
15488
15489                 if (HBtab->HasFilter())
15490                 {
15491                     clsHnd = impGetObjectClass();
15492                 }
15493                 else
15494                 {
15495                     CORINFO_RESOLVED_TOKEN resolvedToken;
15496
15497                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15498                     resolvedToken.tokenScope   = info.compScopeHnd;
15499                     resolvedToken.token        = HBtab->ebdTyp;
15500                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15501                     info.compCompHnd->resolveToken(&resolvedToken);
15502
15503                     clsHnd = resolvedToken.hClass;
15504                 }
15505
15506                 // push catch arg the stack, spill to a temp if necessary
15507                 // Note: can update HBtab->ebdHndBeg!
15508                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15509             }
15510
15511             // Queue up the handler for importing
15512             //
15513             impImportBlockPending(hndBegBB);
15514
15515             if (HBtab->HasFilter())
15516             {
15517                 /* @VERIFICATION : Ideally the end of filter state should get
15518                    propagated to the catch handler, this is an incompleteness,
15519                    but is not a security/compliance issue, since the only
15520                    interesting state is the 'thisInit' state.
15521                    */
15522
15523                 verCurrentState.esStackDepth = 0;
15524
15525                 BasicBlock* filterBB = HBtab->ebdFilter;
15526
15527                 // push catch arg the stack, spill to a temp if necessary
15528                 // Note: can update HBtab->ebdFilter!
15529                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15530
15531                 impImportBlockPending(filterBB);
15532             }
15533         }
15534         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15535         {
15536             /* Recursively process the handler block */
15537
15538             verCurrentState.esStackDepth = 0;
15539
15540             // Queue up the fault handler for importing
15541             //
15542             impImportBlockPending(HBtab->ebdHndBeg);
15543         }
15544
15545         // Now process our enclosing try index (if any)
15546         //
15547         tryIndex = HBtab->ebdEnclosingTryIndex;
15548         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15549         {
15550             HBtab = nullptr;
15551         }
15552         else
15553         {
15554             HBtab = ehGetDsc(tryIndex);
15555         }
15556     }
15557
15558     // Restore the stack contents
15559     impRestoreStackState(&blockState);
15560 }
15561
15562 //***************************************************************
15563 // Import the instructions for the given basic block.  Perform
15564 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15565 // time, or whose verification pre-state is changed.
15566
15567 #ifdef _PREFAST_
15568 #pragma warning(push)
15569 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15570 #endif
15571 void Compiler::impImportBlock(BasicBlock* block)
15572 {
15573     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15574     // handle them specially. In particular, there is no IL to import for them, but we do need
15575     // to mark them as imported and put their successors on the pending import list.
15576     if (block->bbFlags & BBF_INTERNAL)
15577     {
15578         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15579         block->bbFlags |= BBF_IMPORTED;
15580
15581         for (unsigned i = 0; i < block->NumSucc(); i++)
15582         {
15583             impImportBlockPending(block->GetSucc(i));
15584         }
15585
15586         return;
15587     }
15588
15589     bool markImport;
15590
15591     assert(block);
15592
15593     /* Make the block globaly available */
15594
15595     compCurBB = block;
15596
15597 #ifdef DEBUG
15598     /* Initialize the debug variables */
15599     impCurOpcName = "unknown";
15600     impCurOpcOffs = block->bbCodeOffs;
15601 #endif
15602
15603     /* Set the current stack state to the merged result */
15604     verResetCurrentState(block, &verCurrentState);
15605
15606     /* Now walk the code and import the IL into GenTrees */
15607
15608     struct FilterVerificationExceptionsParam
15609     {
15610         Compiler*   pThis;
15611         BasicBlock* block;
15612     };
15613     FilterVerificationExceptionsParam param;
15614
15615     param.pThis = this;
15616     param.block = block;
15617
15618     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15619     {
15620         /* @VERIFICATION : For now, the only state propagation from try
15621            to it's handler is "thisInit" state (stack is empty at start of try).
15622            In general, for state that we track in verification, we need to
15623            model the possibility that an exception might happen at any IL
15624            instruction, so we really need to merge all states that obtain
15625            between IL instructions in a try block into the start states of
15626            all handlers.
15627
15628            However we do not allow the 'this' pointer to be uninitialized when
15629            entering most kinds try regions (only try/fault are allowed to have
15630            an uninitialized this pointer on entry to the try)
15631
15632            Fortunately, the stack is thrown away when an exception
15633            leads to a handler, so we don't have to worry about that.
15634            We DO, however, have to worry about the "thisInit" state.
15635            But only for the try/fault case.
15636
15637            The only allowed transition is from TIS_Uninit to TIS_Init.
15638
15639            So for a try/fault region for the fault handler block
15640            we will merge the start state of the try begin
15641            and the post-state of each block that is part of this try region
15642         */
15643
15644         // merge the start state of the try begin
15645         //
15646         if (pParam->block->bbFlags & BBF_TRY_BEG)
15647         {
15648             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15649         }
15650
15651         pParam->pThis->impImportBlockCode(pParam->block);
15652
15653         // As discussed above:
15654         // merge the post-state of each block that is part of this try region
15655         //
15656         if (pParam->block->hasTryIndex())
15657         {
15658             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15659         }
15660     }
15661     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15662     {
15663         verHandleVerificationFailure(block DEBUGARG(false));
15664     }
15665     PAL_ENDTRY
15666
15667     if (compDonotInline())
15668     {
15669         return;
15670     }
15671
15672     assert(!compDonotInline());
15673
15674     markImport = false;
15675
15676 SPILLSTACK:
15677
15678     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15679     bool        reimportSpillClique = false;
15680     BasicBlock* tgtBlock            = nullptr;
15681
15682     /* If the stack is non-empty, we might have to spill its contents */
15683
15684     if (verCurrentState.esStackDepth != 0)
15685     {
15686         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15687                                   // on the stack, its lifetime is hard to determine, simply
15688                                   // don't reuse such temps.
15689
15690         GenTreePtr addStmt = nullptr;
15691
15692         /* Do the successors of 'block' have any other predecessors ?
15693            We do not want to do some of the optimizations related to multiRef
15694            if we can reimport blocks */
15695
15696         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15697
15698         switch (block->bbJumpKind)
15699         {
15700             case BBJ_COND:
15701
15702                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15703
15704                 assert(impTreeLast);
15705                 assert(impTreeLast->gtOper == GT_STMT);
15706                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15707
15708                 addStmt     = impTreeLast;
15709                 impTreeLast = impTreeLast->gtPrev;
15710
15711                 /* Note if the next block has more than one ancestor */
15712
15713                 multRef |= block->bbNext->bbRefs;
15714
15715                 /* Does the next block have temps assigned? */
15716
15717                 baseTmp  = block->bbNext->bbStkTempsIn;
15718                 tgtBlock = block->bbNext;
15719
15720                 if (baseTmp != NO_BASE_TMP)
15721                 {
15722                     break;
15723                 }
15724
15725                 /* Try the target of the jump then */
15726
15727                 multRef |= block->bbJumpDest->bbRefs;
15728                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15729                 tgtBlock = block->bbJumpDest;
15730                 break;
15731
15732             case BBJ_ALWAYS:
15733                 multRef |= block->bbJumpDest->bbRefs;
15734                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15735                 tgtBlock = block->bbJumpDest;
15736                 break;
15737
15738             case BBJ_NONE:
15739                 multRef |= block->bbNext->bbRefs;
15740                 baseTmp  = block->bbNext->bbStkTempsIn;
15741                 tgtBlock = block->bbNext;
15742                 break;
15743
15744             case BBJ_SWITCH:
15745
15746                 BasicBlock** jmpTab;
15747                 unsigned     jmpCnt;
15748
15749                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15750
15751                 assert(impTreeLast);
15752                 assert(impTreeLast->gtOper == GT_STMT);
15753                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15754
15755                 addStmt     = impTreeLast;
15756                 impTreeLast = impTreeLast->gtPrev;
15757
15758                 jmpCnt = block->bbJumpSwt->bbsCount;
15759                 jmpTab = block->bbJumpSwt->bbsDstTab;
15760
15761                 do
15762                 {
15763                     tgtBlock = (*jmpTab);
15764
15765                     multRef |= tgtBlock->bbRefs;
15766
15767                     // Thanks to spill cliques, we should have assigned all or none
15768                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15769                     baseTmp = tgtBlock->bbStkTempsIn;
15770                     if (multRef > 1)
15771                     {
15772                         break;
15773                     }
15774                 } while (++jmpTab, --jmpCnt);
15775
15776                 break;
15777
15778             case BBJ_CALLFINALLY:
15779             case BBJ_EHCATCHRET:
15780             case BBJ_RETURN:
15781             case BBJ_EHFINALLYRET:
15782             case BBJ_EHFILTERRET:
15783             case BBJ_THROW:
15784                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15785                 break;
15786
15787             default:
15788                 noway_assert(!"Unexpected bbJumpKind");
15789                 break;
15790         }
15791
15792         assert(multRef >= 1);
15793
15794         /* Do we have a base temp number? */
15795
15796         bool newTemps = (baseTmp == NO_BASE_TMP);
15797
15798         if (newTemps)
15799         {
15800             /* Grab enough temps for the whole stack */
15801             baseTmp = impGetSpillTmpBase(block);
15802         }
15803
15804         /* Spill all stack entries into temps */
15805         unsigned level, tempNum;
15806
15807         JITDUMP("\nSpilling stack entries into temps\n");
15808         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15809         {
15810             GenTreePtr tree = verCurrentState.esStack[level].val;
15811
15812             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15813                the other. This should merge to a byref in unverifiable code.
15814                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15815                successor would be imported assuming there was a TYP_I_IMPL on
15816                the stack. Thus the value would not get GC-tracked. Hence,
15817                change the temp to TYP_BYREF and reimport the successors.
15818                Note: We should only allow this in unverifiable code.
15819             */
15820             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15821             {
15822                 lvaTable[tempNum].lvType = TYP_BYREF;
15823                 impReimportMarkSuccessors(block);
15824                 markImport = true;
15825             }
15826
15827 #ifdef _TARGET_64BIT_
15828             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15829             {
15830                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15831                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15832                 {
15833                     // Merge the current state into the entry state of block;
15834                     // the call to verMergeEntryStates must have changed
15835                     // the entry state of the block by merging the int local var
15836                     // and the native-int stack entry.
15837                     bool changed = false;
15838                     if (verMergeEntryStates(tgtBlock, &changed))
15839                     {
15840                         impRetypeEntryStateTemps(tgtBlock);
15841                         impReimportBlockPending(tgtBlock);
15842                         assert(changed);
15843                     }
15844                     else
15845                     {
15846                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15847                         break;
15848                     }
15849                 }
15850
15851                 // Some other block in the spill clique set this to "int", but now we have "native int".
15852                 // Change the type and go back to re-import any blocks that used the wrong type.
15853                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15854                 reimportSpillClique      = true;
15855             }
15856             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15857             {
15858                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15859                 // Insert a sign-extension to "native int" so we match the clique.
15860                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15861             }
15862
15863             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15864             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15865             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15866             // behavior instead of asserting and then generating bad code (where we save/restore the
15867             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15868             // imported already, we need to change the type of the local and reimport the spill clique.
15869             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15870             // the 'byref' size.
15871             if (!tiVerificationNeeded)
15872             {
15873                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15874                 {
15875                     // Some other block in the spill clique set this to "int", but now we have "byref".
15876                     // Change the type and go back to re-import any blocks that used the wrong type.
15877                     lvaTable[tempNum].lvType = TYP_BYREF;
15878                     reimportSpillClique      = true;
15879                 }
15880                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15881                 {
15882                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15883                     // Insert a sign-extension to "native int" so we match the clique size.
15884                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15885                 }
15886             }
15887 #endif // _TARGET_64BIT_
15888
15889 #if FEATURE_X87_DOUBLES
15890             // X87 stack doesn't differentiate between float/double
15891             // so promoting is no big deal.
15892             // For everybody else keep it as float until we have a collision and then promote
15893             // Just like for x64's TYP_INT<->TYP_I_IMPL
15894
15895             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15896             {
15897                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15898             }
15899
15900 #else // !FEATURE_X87_DOUBLES
15901
15902             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15903             {
15904                 // Some other block in the spill clique set this to "float", but now we have "double".
15905                 // Change the type and go back to re-import any blocks that used the wrong type.
15906                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15907                 reimportSpillClique      = true;
15908             }
15909             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15910             {
15911                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15912                 // Insert a cast to "double" so we match the clique.
15913                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15914             }
15915
15916 #endif // FEATURE_X87_DOUBLES
15917
15918             /* If addStmt has a reference to tempNum (can only happen if we
15919                are spilling to the temps already used by a previous block),
15920                we need to spill addStmt */
15921
15922             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15923             {
15924                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15925
15926                 if (addTree->gtOper == GT_JTRUE)
15927                 {
15928                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15929                     assert(relOp->OperIsCompare());
15930
15931                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15932
15933                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15934                     {
15935                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15936                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15937                         type              = genActualType(lvaTable[temp].TypeGet());
15938                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15939                     }
15940
15941                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15942                     {
15943                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15944                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15945                         type              = genActualType(lvaTable[temp].TypeGet());
15946                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15947                     }
15948                 }
15949                 else
15950                 {
15951                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15952
15953                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15954                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15955                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15956                 }
15957             }
15958
15959             /* Spill the stack entry, and replace with the temp */
15960
15961             if (!impSpillStackEntry(level, tempNum
15962 #ifdef DEBUG
15963                                     ,
15964                                     true, "Spill Stack Entry"
15965 #endif
15966                                     ))
15967             {
15968                 if (markImport)
15969                 {
15970                     BADCODE("bad stack state");
15971                 }
15972
15973                 // Oops. Something went wrong when spilling. Bad code.
15974                 verHandleVerificationFailure(block DEBUGARG(true));
15975
15976                 goto SPILLSTACK;
15977             }
15978         }
15979
15980         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15981
15982         if (addStmt)
15983         {
15984             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15985         }
15986     }
15987
15988     // Some of the append/spill logic works on compCurBB
15989
15990     assert(compCurBB == block);
15991
15992     /* Save the tree list in the block */
15993     impEndTreeList(block);
15994
15995     // impEndTreeList sets BBF_IMPORTED on the block
15996     // We do *NOT* want to set it later than this because
15997     // impReimportSpillClique might clear it if this block is both a
15998     // predecessor and successor in the current spill clique
15999     assert(block->bbFlags & BBF_IMPORTED);
16000
16001     // If we had a int/native int, or float/double collision, we need to re-import
16002     if (reimportSpillClique)
16003     {
16004         // This will re-import all the successors of block (as well as each of their predecessors)
16005         impReimportSpillClique(block);
16006
16007         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16008         for (unsigned i = 0; i < block->NumSucc(); i++)
16009         {
16010             BasicBlock* succ = block->GetSucc(i);
16011             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16012             {
16013                 impImportBlockPending(succ);
16014             }
16015         }
16016     }
16017     else // the normal case
16018     {
16019         // otherwise just import the successors of block
16020
16021         /* Does this block jump to any other blocks? */
16022         for (unsigned i = 0; i < block->NumSucc(); i++)
16023         {
16024             impImportBlockPending(block->GetSucc(i));
16025         }
16026     }
16027 }
16028 #ifdef _PREFAST_
16029 #pragma warning(pop)
16030 #endif
16031
16032 /*****************************************************************************/
16033 //
16034 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16035 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16036 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16037 // (its "pre-state").
16038
16039 void Compiler::impImportBlockPending(BasicBlock* block)
16040 {
16041 #ifdef DEBUG
16042     if (verbose)
16043     {
16044         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16045     }
16046 #endif
16047
16048     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16049     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16050     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16051
16052     // If the block has not been imported, add to pending set.
16053     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16054
16055     // Initialize bbEntryState just the first time we try to add this block to the pending list
16056     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16057     // We use NULL to indicate the 'common' state to avoid memory allocation
16058     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16059         (impGetPendingBlockMember(block) == 0))
16060     {
16061         verInitBBEntryState(block, &verCurrentState);
16062         assert(block->bbStkDepth == 0);
16063         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16064         assert(addToPending);
16065         assert(impGetPendingBlockMember(block) == 0);
16066     }
16067     else
16068     {
16069         // The stack should have the same height on entry to the block from all its predecessors.
16070         if (block->bbStkDepth != verCurrentState.esStackDepth)
16071         {
16072 #ifdef DEBUG
16073             char buffer[400];
16074             sprintf_s(buffer, sizeof(buffer),
16075                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16076                       "Previous depth was %d, current depth is %d",
16077                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16078                       verCurrentState.esStackDepth);
16079             buffer[400 - 1] = 0;
16080             NO_WAY(buffer);
16081 #else
16082             NO_WAY("Block entered with different stack depths");
16083 #endif
16084         }
16085
16086         // Additionally, if we need to verify, merge the verification state.
16087         if (tiVerificationNeeded)
16088         {
16089             // Merge the current state into the entry state of block; if this does not change the entry state
16090             // by merging, do not add the block to the pending-list.
16091             bool changed = false;
16092             if (!verMergeEntryStates(block, &changed))
16093             {
16094                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16095                 addToPending = true; // We will pop it off, and check the flag set above.
16096             }
16097             else if (changed)
16098             {
16099                 addToPending = true;
16100
16101                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16102             }
16103         }
16104
16105         if (!addToPending)
16106         {
16107             return;
16108         }
16109
16110         if (block->bbStkDepth > 0)
16111         {
16112             // We need to fix the types of any spill temps that might have changed:
16113             //   int->native int, float->double, int->byref, etc.
16114             impRetypeEntryStateTemps(block);
16115         }
16116
16117         // OK, we must add to the pending list, if it's not already in it.
16118         if (impGetPendingBlockMember(block) != 0)
16119         {
16120             return;
16121         }
16122     }
16123
16124     // Get an entry to add to the pending list
16125
16126     PendingDsc* dsc;
16127
16128     if (impPendingFree)
16129     {
16130         // We can reuse one of the freed up dscs.
16131         dsc            = impPendingFree;
16132         impPendingFree = dsc->pdNext;
16133     }
16134     else
16135     {
16136         // We have to create a new dsc
16137         dsc = new (this, CMK_Unknown) PendingDsc;
16138     }
16139
16140     dsc->pdBB                 = block;
16141     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16142     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16143
16144     // Save the stack trees for later
16145
16146     if (verCurrentState.esStackDepth)
16147     {
16148         impSaveStackState(&dsc->pdSavedStack, false);
16149     }
16150
16151     // Add the entry to the pending list
16152
16153     dsc->pdNext    = impPendingList;
16154     impPendingList = dsc;
16155     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16156
16157     // Various assertions require us to now to consider the block as not imported (at least for
16158     // the final time...)
16159     block->bbFlags &= ~BBF_IMPORTED;
16160
16161 #ifdef DEBUG
16162     if (verbose && 0)
16163     {
16164         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16165     }
16166 #endif
16167 }
16168
16169 /*****************************************************************************/
16170 //
16171 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16172 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16173 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16174
16175 void Compiler::impReimportBlockPending(BasicBlock* block)
16176 {
16177     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16178
16179     assert(block->bbFlags & BBF_IMPORTED);
16180
16181     // OK, we must add to the pending list, if it's not already in it.
16182     if (impGetPendingBlockMember(block) != 0)
16183     {
16184         return;
16185     }
16186
16187     // Get an entry to add to the pending list
16188
16189     PendingDsc* dsc;
16190
16191     if (impPendingFree)
16192     {
16193         // We can reuse one of the freed up dscs.
16194         dsc            = impPendingFree;
16195         impPendingFree = dsc->pdNext;
16196     }
16197     else
16198     {
16199         // We have to create a new dsc
16200         dsc = new (this, CMK_ImpStack) PendingDsc;
16201     }
16202
16203     dsc->pdBB = block;
16204
16205     if (block->bbEntryState)
16206     {
16207         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16208         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16209         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16210     }
16211     else
16212     {
16213         dsc->pdThisPtrInit        = TIS_Bottom;
16214         dsc->pdSavedStack.ssDepth = 0;
16215         dsc->pdSavedStack.ssTrees = nullptr;
16216     }
16217
16218     // Add the entry to the pending list
16219
16220     dsc->pdNext    = impPendingList;
16221     impPendingList = dsc;
16222     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16223
16224     // Various assertions require us to now to consider the block as not imported (at least for
16225     // the final time...)
16226     block->bbFlags &= ~BBF_IMPORTED;
16227
16228 #ifdef DEBUG
16229     if (verbose && 0)
16230     {
16231         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16232     }
16233 #endif
16234 }
16235
16236 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16237 {
16238     if (comp->impBlockListNodeFreeList == nullptr)
16239     {
16240         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16241     }
16242     else
16243     {
16244         BlockListNode* res             = comp->impBlockListNodeFreeList;
16245         comp->impBlockListNodeFreeList = res->m_next;
16246         return res;
16247     }
16248 }
16249
16250 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16251 {
16252     node->m_next             = impBlockListNodeFreeList;
16253     impBlockListNodeFreeList = node;
16254 }
16255
16256 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16257 {
16258     bool toDo = true;
16259
16260     noway_assert(!fgComputePredsDone);
16261     if (!fgCheapPredsValid)
16262     {
16263         fgComputeCheapPreds();
16264     }
16265
16266     BlockListNode* succCliqueToDo = nullptr;
16267     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16268     while (toDo)
16269     {
16270         toDo = false;
16271         // Look at the successors of every member of the predecessor to-do list.
16272         while (predCliqueToDo != nullptr)
16273         {
16274             BlockListNode* node = predCliqueToDo;
16275             predCliqueToDo      = node->m_next;
16276             BasicBlock* blk     = node->m_blk;
16277             FreeBlockListNode(node);
16278
16279             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16280             {
16281                 BasicBlock* succ = blk->GetSucc(succNum);
16282                 // If it's not already in the clique, add it, and also add it
16283                 // as a member of the successor "toDo" set.
16284                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16285                 {
16286                     callback->Visit(SpillCliqueSucc, succ);
16287                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16288                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16289                     toDo           = true;
16290                 }
16291             }
16292         }
16293         // Look at the predecessors of every member of the successor to-do list.
16294         while (succCliqueToDo != nullptr)
16295         {
16296             BlockListNode* node = succCliqueToDo;
16297             succCliqueToDo      = node->m_next;
16298             BasicBlock* blk     = node->m_blk;
16299             FreeBlockListNode(node);
16300
16301             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16302             {
16303                 BasicBlock* predBlock = pred->block;
16304                 // If it's not already in the clique, add it, and also add it
16305                 // as a member of the predecessor "toDo" set.
16306                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16307                 {
16308                     callback->Visit(SpillCliquePred, predBlock);
16309                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16310                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16311                     toDo           = true;
16312                 }
16313             }
16314         }
16315     }
16316
16317     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16318     // miss walking back to include the predecessor we started from.
16319     // This most likely cause: missing or out of date bbPreds
16320     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16321 }
16322
16323 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16324 {
16325     if (predOrSucc == SpillCliqueSucc)
16326     {
16327         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16328         blk->bbStkTempsIn = m_baseTmp;
16329     }
16330     else
16331     {
16332         assert(predOrSucc == SpillCliquePred);
16333         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16334         blk->bbStkTempsOut = m_baseTmp;
16335     }
16336 }
16337
16338 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16339 {
16340     // For Preds we could be a little smarter and just find the existing store
16341     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16342     // just re-import the whole block (just like we do for successors)
16343
16344     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16345     {
16346         // If we haven't imported this block and we're not going to (because it isn't on
16347         // the pending list) then just ignore it for now.
16348
16349         // This block has either never been imported (EntryState == NULL) or it failed
16350         // verification. Neither state requires us to force it to be imported now.
16351         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16352         return;
16353     }
16354
16355     // For successors we have a valid verCurrentState, so just mark them for reimport
16356     // the 'normal' way
16357     // Unlike predecessors, we *DO* need to reimport the current block because the
16358     // initial import had the wrong entry state types.
16359     // Similarly, blocks that are currently on the pending list, still need to call
16360     // impImportBlockPending to fixup their entry state.
16361     if (predOrSucc == SpillCliqueSucc)
16362     {
16363         m_pComp->impReimportMarkBlock(blk);
16364
16365         // Set the current stack state to that of the blk->bbEntryState
16366         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16367         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16368
16369         m_pComp->impImportBlockPending(blk);
16370     }
16371     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16372     {
16373         // As described above, we are only visiting predecessors so they can
16374         // add the appropriate casts, since we have already done that for the current
16375         // block, it does not need to be reimported.
16376         // Nor do we need to reimport blocks that are still pending, but not yet
16377         // imported.
16378         //
16379         // For predecessors, we have no state to seed the EntryState, so we just have
16380         // to assume the existing one is correct.
16381         // If the block is also a successor, it will get the EntryState properly
16382         // updated when it is visited as a successor in the above "if" block.
16383         assert(predOrSucc == SpillCliquePred);
16384         m_pComp->impReimportBlockPending(blk);
16385     }
16386 }
16387
16388 // Re-type the incoming lclVar nodes to match the varDsc.
16389 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16390 {
16391     if (blk->bbEntryState != nullptr)
16392     {
16393         EntryState* es = blk->bbEntryState;
16394         for (unsigned level = 0; level < es->esStackDepth; level++)
16395         {
16396             GenTreePtr tree = es->esStack[level].val;
16397             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16398             {
16399                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16400                 noway_assert(lclNum < lvaCount);
16401                 LclVarDsc* varDsc              = lvaTable + lclNum;
16402                 es->esStack[level].val->gtType = varDsc->TypeGet();
16403             }
16404         }
16405     }
16406 }
16407
16408 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16409 {
16410     if (block->bbStkTempsOut != NO_BASE_TMP)
16411     {
16412         return block->bbStkTempsOut;
16413     }
16414
16415 #ifdef DEBUG
16416     if (verbose)
16417     {
16418         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16419     }
16420 #endif // DEBUG
16421
16422     // Otherwise, choose one, and propagate to all members of the spill clique.
16423     // Grab enough temps for the whole stack.
16424     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16425     SetSpillTempsBase callback(baseTmp);
16426
16427     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16428     // to one spill clique, and similarly can only be the sucessor to one spill clique
16429     impWalkSpillCliqueFromPred(block, &callback);
16430
16431     return baseTmp;
16432 }
16433
16434 void Compiler::impReimportSpillClique(BasicBlock* block)
16435 {
16436 #ifdef DEBUG
16437     if (verbose)
16438     {
16439         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16440     }
16441 #endif // DEBUG
16442
16443     // If we get here, it is because this block is already part of a spill clique
16444     // and one predecessor had an outgoing live stack slot of type int, and this
16445     // block has an outgoing live stack slot of type native int.
16446     // We need to reset these before traversal because they have already been set
16447     // by the previous walk to determine all the members of the spill clique.
16448     impInlineRoot()->impSpillCliquePredMembers.Reset();
16449     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16450
16451     ReimportSpillClique callback(this);
16452
16453     impWalkSpillCliqueFromPred(block, &callback);
16454 }
16455
16456 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16457 // a copy of "srcState", cloning tree pointers as required.
16458 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16459 {
16460     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16461     {
16462         block->bbEntryState = nullptr;
16463         return;
16464     }
16465
16466     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16467
16468     // block->bbEntryState.esRefcount = 1;
16469
16470     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16471     block->bbEntryState->thisInitialized = TIS_Bottom;
16472
16473     if (srcState->esStackDepth > 0)
16474     {
16475         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16476         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16477
16478         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16479         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16480         {
16481             GenTreePtr tree                         = srcState->esStack[level].val;
16482             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16483         }
16484     }
16485
16486     if (verTrackObjCtorInitState)
16487     {
16488         verSetThisInit(block, srcState->thisInitialized);
16489     }
16490
16491     return;
16492 }
16493
16494 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16495 {
16496     assert(tis != TIS_Bottom); // Precondition.
16497     if (block->bbEntryState == nullptr)
16498     {
16499         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16500     }
16501
16502     block->bbEntryState->thisInitialized = tis;
16503 }
16504
16505 /*
16506  * Resets the current state to the state at the start of the basic block
16507  */
16508 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16509 {
16510
16511     if (block->bbEntryState == nullptr)
16512     {
16513         destState->esStackDepth    = 0;
16514         destState->thisInitialized = TIS_Bottom;
16515         return;
16516     }
16517
16518     destState->esStackDepth = block->bbEntryState->esStackDepth;
16519
16520     if (destState->esStackDepth > 0)
16521     {
16522         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16523
16524         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16525     }
16526
16527     destState->thisInitialized = block->bbThisOnEntry();
16528
16529     return;
16530 }
16531
16532 ThisInitState BasicBlock::bbThisOnEntry()
16533 {
16534     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16535 }
16536
16537 unsigned BasicBlock::bbStackDepthOnEntry()
16538 {
16539     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16540 }
16541
16542 void BasicBlock::bbSetStack(void* stackBuffer)
16543 {
16544     assert(bbEntryState);
16545     assert(stackBuffer);
16546     bbEntryState->esStack = (StackEntry*)stackBuffer;
16547 }
16548
16549 StackEntry* BasicBlock::bbStackOnEntry()
16550 {
16551     assert(bbEntryState);
16552     return bbEntryState->esStack;
16553 }
16554
16555 void Compiler::verInitCurrentState()
16556 {
16557     verTrackObjCtorInitState        = FALSE;
16558     verCurrentState.thisInitialized = TIS_Bottom;
16559
16560     if (tiVerificationNeeded)
16561     {
16562         // Track this ptr initialization
16563         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16564         {
16565             verTrackObjCtorInitState        = TRUE;
16566             verCurrentState.thisInitialized = TIS_Uninit;
16567         }
16568     }
16569
16570     // initialize stack info
16571
16572     verCurrentState.esStackDepth = 0;
16573     assert(verCurrentState.esStack != nullptr);
16574
16575     // copy current state to entry state of first BB
16576     verInitBBEntryState(fgFirstBB, &verCurrentState);
16577 }
16578
16579 Compiler* Compiler::impInlineRoot()
16580 {
16581     if (impInlineInfo == nullptr)
16582     {
16583         return this;
16584     }
16585     else
16586     {
16587         return impInlineInfo->InlineRoot;
16588     }
16589 }
16590
16591 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16592 {
16593     if (predOrSucc == SpillCliquePred)
16594     {
16595         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16596     }
16597     else
16598     {
16599         assert(predOrSucc == SpillCliqueSucc);
16600         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16601     }
16602 }
16603
16604 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16605 {
16606     if (predOrSucc == SpillCliquePred)
16607     {
16608         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16609     }
16610     else
16611     {
16612         assert(predOrSucc == SpillCliqueSucc);
16613         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16614     }
16615 }
16616
16617 /*****************************************************************************
16618  *
16619  *  Convert the instrs ("import") into our internal format (trees). The
16620  *  basic flowgraph has already been constructed and is passed in.
16621  */
16622
16623 void Compiler::impImport(BasicBlock* method)
16624 {
16625 #ifdef DEBUG
16626     if (verbose)
16627     {
16628         printf("*************** In impImport() for %s\n", info.compFullName);
16629     }
16630 #endif
16631
16632     /* Allocate the stack contents */
16633
16634     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16635     {
16636         /* Use local variable, don't waste time allocating on the heap */
16637
16638         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16639         verCurrentState.esStack = impSmallStack;
16640     }
16641     else
16642     {
16643         impStkSize              = info.compMaxStack;
16644         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16645     }
16646
16647     // initialize the entry state at start of method
16648     verInitCurrentState();
16649
16650     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16651     Compiler* inlineRoot = impInlineRoot();
16652     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16653     {
16654         // We have initialized these previously, but to size 0.  Make them larger.
16655         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16656         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16657         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16658     }
16659     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16660     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16661     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16662     impBlockListNodeFreeList = nullptr;
16663
16664 #ifdef DEBUG
16665     impLastILoffsStmt   = nullptr;
16666     impNestedStackSpill = false;
16667 #endif
16668     impBoxTemp = BAD_VAR_NUM;
16669
16670     impPendingList = impPendingFree = nullptr;
16671
16672     /* Add the entry-point to the worker-list */
16673
16674     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16675     // from EH normalization.
16676     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16677     // out.
16678     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16679     {
16680         // Treat these as imported.
16681         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16682         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16683         method->bbFlags |= BBF_IMPORTED;
16684     }
16685
16686     impImportBlockPending(method);
16687
16688     /* Import blocks in the worker-list until there are no more */
16689
16690     while (impPendingList)
16691     {
16692         /* Remove the entry at the front of the list */
16693
16694         PendingDsc* dsc = impPendingList;
16695         impPendingList  = impPendingList->pdNext;
16696         impSetPendingBlockMember(dsc->pdBB, 0);
16697
16698         /* Restore the stack state */
16699
16700         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16701         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16702         if (verCurrentState.esStackDepth)
16703         {
16704             impRestoreStackState(&dsc->pdSavedStack);
16705         }
16706
16707         /* Add the entry to the free list for reuse */
16708
16709         dsc->pdNext    = impPendingFree;
16710         impPendingFree = dsc;
16711
16712         /* Now import the block */
16713
16714         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16715         {
16716
16717 #ifdef _TARGET_64BIT_
16718             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16719             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16720             // method for further explanation on why we raise this exception instead of making the jitted
16721             // code throw the verification exception during execution.
16722             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16723             {
16724                 BADCODE("Basic block marked as not verifiable");
16725             }
16726             else
16727 #endif // _TARGET_64BIT_
16728             {
16729                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16730                 impEndTreeList(dsc->pdBB);
16731             }
16732         }
16733         else
16734         {
16735             impImportBlock(dsc->pdBB);
16736
16737             if (compDonotInline())
16738             {
16739                 return;
16740             }
16741             if (compIsForImportOnly() && !tiVerificationNeeded)
16742             {
16743                 return;
16744             }
16745         }
16746     }
16747
16748 #ifdef DEBUG
16749     if (verbose && info.compXcptnsCount)
16750     {
16751         printf("\nAfter impImport() added block for try,catch,finally");
16752         fgDispBasicBlocks();
16753         printf("\n");
16754     }
16755
16756     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16757     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16758     {
16759         block->bbFlags &= ~BBF_VISITED;
16760     }
16761 #endif
16762
16763     assert(!compIsForInlining() || !tiVerificationNeeded);
16764 }
16765
16766 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16767 // The invariant here is that if it's not a ref or a method and has a class handle
16768 // it's a valuetype
16769 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16770 {
16771     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16772     {
16773         return true;
16774     }
16775     else
16776     {
16777         return false;
16778     }
16779 }
16780
16781 /*****************************************************************************
16782  *  Check to see if the tree is the address of a local or
16783     the address of a field in a local.
16784
16785     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16786
16787  */
16788
16789 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16790 {
16791     if (tree->gtOper != GT_ADDR)
16792     {
16793         return FALSE;
16794     }
16795
16796     GenTreePtr op = tree->gtOp.gtOp1;
16797     while (op->gtOper == GT_FIELD)
16798     {
16799         op = op->gtField.gtFldObj;
16800         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16801         {
16802             op = op->gtOp.gtOp1;
16803         }
16804         else
16805         {
16806             return false;
16807         }
16808     }
16809
16810     if (op->gtOper == GT_LCL_VAR)
16811     {
16812         *lclVarTreeOut = op;
16813         return TRUE;
16814     }
16815     else
16816     {
16817         return FALSE;
16818     }
16819 }
16820
16821 //------------------------------------------------------------------------
16822 // impMakeDiscretionaryInlineObservations: make observations that help
16823 // determine the profitability of a discretionary inline
16824 //
16825 // Arguments:
16826 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16827 //    inlineResult -- InlineResult accumulating information about this inline
16828 //
16829 // Notes:
16830 //    If inlining or prejitting the root, this method also makes
16831 //    various observations about the method that factor into inline
16832 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16833
16834 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16835 {
16836     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16837            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16838            );
16839
16840     // If we're really inlining, we should just have one result in play.
16841     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16842
16843     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16844     // to the trouble of estimating the native code size. Even if it did, it
16845     // shouldn't be relying on the result of this method.
16846     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16847
16848     // Note if the caller contains NEWOBJ or NEWARR.
16849     Compiler* rootCompiler = impInlineRoot();
16850
16851     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16852     {
16853         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16854     }
16855
16856     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16857     {
16858         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16859     }
16860
16861     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16862     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16863
16864     if (isSpecialMethod)
16865     {
16866         if (calleeIsStatic)
16867         {
16868             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16869         }
16870         else
16871         {
16872             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16873         }
16874     }
16875     else if (!calleeIsStatic)
16876     {
16877         // Callee is an instance method.
16878         //
16879         // Check if the callee has the same 'this' as the root.
16880         if (pInlineInfo != nullptr)
16881         {
16882             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16883             assert(thisArg);
16884             bool isSameThis = impIsThis(thisArg);
16885             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16886         }
16887     }
16888
16889     // Note if the callee's class is a promotable struct
16890     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16891     {
16892         lvaStructPromotionInfo structPromotionInfo;
16893         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16894         if (structPromotionInfo.canPromote)
16895         {
16896             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16897         }
16898     }
16899
16900 #ifdef FEATURE_SIMD
16901
16902     // Note if this method is has SIMD args or return value
16903     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16904     {
16905         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16906     }
16907
16908 #endif // FEATURE_SIMD
16909
16910     // Roughly classify callsite frequency.
16911     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16912
16913     // If this is a prejit root, or a maximally hot block...
16914     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16915     {
16916         frequency = InlineCallsiteFrequency::HOT;
16917     }
16918     // No training data.  Look for loop-like things.
16919     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16920     // However, give it to things nearby.
16921     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16922              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16923     {
16924         frequency = InlineCallsiteFrequency::LOOP;
16925     }
16926     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16927     {
16928         frequency = InlineCallsiteFrequency::WARM;
16929     }
16930     // Now modify the multiplier based on where we're called from.
16931     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16932     {
16933         frequency = InlineCallsiteFrequency::RARE;
16934     }
16935     else
16936     {
16937         frequency = InlineCallsiteFrequency::BORING;
16938     }
16939
16940     // Also capture the block weight of the call site.  In the prejit
16941     // root case, assume there's some hot call site for this method.
16942     unsigned weight = 0;
16943
16944     if (pInlineInfo != nullptr)
16945     {
16946         weight = pInlineInfo->iciBlock->bbWeight;
16947     }
16948     else
16949     {
16950         weight = BB_MAX_WEIGHT;
16951     }
16952
16953     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16954     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16955 }
16956
16957 /*****************************************************************************
16958  This method makes STATIC inlining decision based on the IL code.
16959  It should not make any inlining decision based on the context.
16960  If forceInline is true, then the inlining decision should not depend on
16961  performance heuristics (code size, etc.).
16962  */
16963
16964 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16965                               CORINFO_METHOD_INFO*  methInfo,
16966                               bool                  forceInline,
16967                               InlineResult*         inlineResult)
16968 {
16969     unsigned codeSize = methInfo->ILCodeSize;
16970
16971     // We shouldn't have made up our minds yet...
16972     assert(!inlineResult->IsDecided());
16973
16974     if (methInfo->EHcount)
16975     {
16976         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16977         return;
16978     }
16979
16980     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16981     {
16982         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16983         return;
16984     }
16985
16986     // For now we don't inline varargs (import code can't handle it)
16987
16988     if (methInfo->args.isVarArg())
16989     {
16990         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16991         return;
16992     }
16993
16994     // Reject if it has too many locals.
16995     // This is currently an implementation limit due to fixed-size arrays in the
16996     // inline info, rather than a performance heuristic.
16997
16998     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16999
17000     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17001     {
17002         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17003         return;
17004     }
17005
17006     // Make sure there aren't too many arguments.
17007     // This is currently an implementation limit due to fixed-size arrays in the
17008     // inline info, rather than a performance heuristic.
17009
17010     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17011
17012     if (methInfo->args.numArgs > MAX_INL_ARGS)
17013     {
17014         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17015         return;
17016     }
17017
17018     // Note force inline state
17019
17020     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17021
17022     // Note IL code size
17023
17024     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17025
17026     if (inlineResult->IsFailure())
17027     {
17028         return;
17029     }
17030
17031     // Make sure maxstack is not too big
17032
17033     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17034
17035     if (inlineResult->IsFailure())
17036     {
17037         return;
17038     }
17039 }
17040
17041 /*****************************************************************************
17042  */
17043
17044 void Compiler::impCheckCanInline(GenTreePtr             call,
17045                                  CORINFO_METHOD_HANDLE  fncHandle,
17046                                  unsigned               methAttr,
17047                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17048                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17049                                  InlineResult*          inlineResult)
17050 {
17051     // Either EE or JIT might throw exceptions below.
17052     // If that happens, just don't inline the method.
17053
17054     struct Param
17055     {
17056         Compiler*              pThis;
17057         GenTreePtr             call;
17058         CORINFO_METHOD_HANDLE  fncHandle;
17059         unsigned               methAttr;
17060         CORINFO_CONTEXT_HANDLE exactContextHnd;
17061         InlineResult*          result;
17062         InlineCandidateInfo**  ppInlineCandidateInfo;
17063     } param = {nullptr};
17064
17065     param.pThis                 = this;
17066     param.call                  = call;
17067     param.fncHandle             = fncHandle;
17068     param.methAttr              = methAttr;
17069     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17070     param.result                = inlineResult;
17071     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17072
17073     bool success = eeRunWithErrorTrap<Param>(
17074         [](Param* pParam) {
17075             DWORD                  dwRestrictions = 0;
17076             CorInfoInitClassResult initClassResult;
17077
17078 #ifdef DEBUG
17079             const char* methodName;
17080             const char* className;
17081             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17082
17083             if (JitConfig.JitNoInline())
17084             {
17085                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17086                 goto _exit;
17087             }
17088 #endif
17089
17090             /* Try to get the code address/size for the method */
17091
17092             CORINFO_METHOD_INFO methInfo;
17093             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17094             {
17095                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17096                 goto _exit;
17097             }
17098
17099             bool forceInline;
17100             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17101
17102             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17103
17104             if (pParam->result->IsFailure())
17105             {
17106                 assert(pParam->result->IsNever());
17107                 goto _exit;
17108             }
17109
17110             // Speculatively check if initClass() can be done.
17111             // If it can be done, we will try to inline the method. If inlining
17112             // succeeds, then we will do the non-speculative initClass() and commit it.
17113             // If this speculative call to initClass() fails, there is no point
17114             // trying to inline this method.
17115             initClassResult =
17116                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17117                                                            pParam->exactContextHnd /* context */,
17118                                                            TRUE /* speculative */);
17119
17120             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17121             {
17122                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17123                 goto _exit;
17124             }
17125
17126             // Given the EE the final say in whether to inline or not.
17127             // This should be last since for verifiable code, this can be expensive
17128
17129             /* VM Inline check also ensures that the method is verifiable if needed */
17130             CorInfoInline vmResult;
17131             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17132                                                                   &dwRestrictions);
17133
17134             if (vmResult == INLINE_FAIL)
17135             {
17136                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17137             }
17138             else if (vmResult == INLINE_NEVER)
17139             {
17140                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17141             }
17142
17143             if (pParam->result->IsFailure())
17144             {
17145                 // Make sure not to report this one.  It was already reported by the VM.
17146                 pParam->result->SetReported();
17147                 goto _exit;
17148             }
17149
17150             // check for unsupported inlining restrictions
17151             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17152
17153             if (dwRestrictions & INLINE_SAME_THIS)
17154             {
17155                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17156                 assert(thisArg);
17157
17158                 if (!pParam->pThis->impIsThis(thisArg))
17159                 {
17160                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17161                     goto _exit;
17162                 }
17163             }
17164
17165             /* Get the method properties */
17166
17167             CORINFO_CLASS_HANDLE clsHandle;
17168             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17169             unsigned clsAttr;
17170             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17171
17172             /* Get the return type */
17173
17174             var_types fncRetType;
17175             fncRetType = pParam->call->TypeGet();
17176
17177 #ifdef DEBUG
17178             var_types fncRealRetType;
17179             fncRealRetType = JITtype2varType(methInfo.args.retType);
17180
17181             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17182                    // <BUGNUM> VSW 288602 </BUGNUM>
17183                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17184                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17185                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17186 #endif
17187
17188             //
17189             // Allocate an InlineCandidateInfo structure
17190             //
17191             InlineCandidateInfo* pInfo;
17192             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17193
17194             pInfo->dwRestrictions  = dwRestrictions;
17195             pInfo->methInfo        = methInfo;
17196             pInfo->methAttr        = pParam->methAttr;
17197             pInfo->clsHandle       = clsHandle;
17198             pInfo->clsAttr         = clsAttr;
17199             pInfo->fncRetType      = fncRetType;
17200             pInfo->exactContextHnd = pParam->exactContextHnd;
17201             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17202             pInfo->initClassResult = initClassResult;
17203
17204             *(pParam->ppInlineCandidateInfo) = pInfo;
17205
17206         _exit:;
17207         },
17208         &param);
17209     if (!success)
17210     {
17211         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17212     }
17213 }
17214
17215 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17216                                       GenTreePtr    curArgVal,
17217                                       unsigned      argNum,
17218                                       InlineResult* inlineResult)
17219 {
17220     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17221
17222     if (curArgVal->gtOper == GT_MKREFANY)
17223     {
17224         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17225         return;
17226     }
17227
17228     inlCurArgInfo->argNode = curArgVal;
17229
17230     GenTreePtr lclVarTree;
17231     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17232     {
17233         inlCurArgInfo->argIsByRefToStructLocal = true;
17234 #ifdef FEATURE_SIMD
17235         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17236         {
17237             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17238         }
17239 #endif // FEATURE_SIMD
17240     }
17241
17242     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17243     {
17244         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17245         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17246     }
17247
17248     if (curArgVal->gtOper == GT_LCL_VAR)
17249     {
17250         inlCurArgInfo->argIsLclVar = true;
17251
17252         /* Remember the "original" argument number */
17253         curArgVal->gtLclVar.gtLclILoffs = argNum;
17254     }
17255
17256     if ((curArgVal->OperKind() & GTK_CONST) ||
17257         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17258     {
17259         inlCurArgInfo->argIsInvariant = true;
17260         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17261         {
17262             /* Abort, but do not mark as not inlinable */
17263             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17264             return;
17265         }
17266     }
17267
17268     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17269     {
17270         inlCurArgInfo->argHasLdargaOp = true;
17271     }
17272
17273 #ifdef DEBUG
17274     if (verbose)
17275     {
17276         if (inlCurArgInfo->argIsThis)
17277         {
17278             printf("thisArg:");
17279         }
17280         else
17281         {
17282             printf("\nArgument #%u:", argNum);
17283         }
17284         if (inlCurArgInfo->argIsLclVar)
17285         {
17286             printf(" is a local var");
17287         }
17288         if (inlCurArgInfo->argIsInvariant)
17289         {
17290             printf(" is a constant");
17291         }
17292         if (inlCurArgInfo->argHasGlobRef)
17293         {
17294             printf(" has global refs");
17295         }
17296         if (inlCurArgInfo->argHasSideEff)
17297         {
17298             printf(" has side effects");
17299         }
17300         if (inlCurArgInfo->argHasLdargaOp)
17301         {
17302             printf(" has ldarga effect");
17303         }
17304         if (inlCurArgInfo->argHasStargOp)
17305         {
17306             printf(" has starg effect");
17307         }
17308         if (inlCurArgInfo->argIsByRefToStructLocal)
17309         {
17310             printf(" is byref to a struct local");
17311         }
17312
17313         printf("\n");
17314         gtDispTree(curArgVal);
17315         printf("\n");
17316     }
17317 #endif
17318 }
17319
17320 /*****************************************************************************
17321  *
17322  */
17323
17324 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17325 {
17326     assert(!compIsForInlining());
17327
17328     GenTreePtr           call         = pInlineInfo->iciCall;
17329     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17330     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17331     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17332     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17333     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17334
17335     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17336
17337     /* init the argument stuct */
17338
17339     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17340
17341     /* Get hold of the 'this' pointer and the argument list proper */
17342
17343     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17344     GenTreePtr argList = call->gtCall.gtCallArgs;
17345     unsigned   argCnt  = 0; // Count of the arguments
17346
17347     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17348
17349     if (thisArg)
17350     {
17351         inlArgInfo[0].argIsThis = true;
17352
17353         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17354
17355         if (inlineResult->IsFailure())
17356         {
17357             return;
17358         }
17359
17360         /* Increment the argument count */
17361         argCnt++;
17362     }
17363
17364     /* Record some information about each of the arguments */
17365     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17366
17367 #if USER_ARGS_COME_LAST
17368     unsigned typeCtxtArg = thisArg ? 1 : 0;
17369 #else  // USER_ARGS_COME_LAST
17370     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17371 #endif // USER_ARGS_COME_LAST
17372
17373     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17374     {
17375         if (argTmp == argList && hasRetBuffArg)
17376         {
17377             continue;
17378         }
17379
17380         // Ignore the type context argument
17381         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17382         {
17383             typeCtxtArg = 0xFFFFFFFF;
17384             continue;
17385         }
17386
17387         assert(argTmp->gtOper == GT_LIST);
17388         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17389
17390         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17391
17392         if (inlineResult->IsFailure())
17393         {
17394             return;
17395         }
17396
17397         /* Increment the argument count */
17398         argCnt++;
17399     }
17400
17401     /* Make sure we got the arg number right */
17402     assert(argCnt == methInfo->args.totalILArgs());
17403
17404 #ifdef FEATURE_SIMD
17405     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17406 #endif // FEATURE_SIMD
17407
17408     /* We have typeless opcodes, get type information from the signature */
17409
17410     if (thisArg)
17411     {
17412         var_types sigType;
17413
17414         if (clsAttr & CORINFO_FLG_VALUECLASS)
17415         {
17416             sigType = TYP_BYREF;
17417         }
17418         else
17419         {
17420             sigType = TYP_REF;
17421         }
17422
17423         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17424         lclVarInfo[0].lclHasLdlocaOp = false;
17425
17426 #ifdef FEATURE_SIMD
17427         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17428         // the inlining multiplier) for anything in that assembly.
17429         // But we only need to normalize it if it is a TYP_STRUCT
17430         // (which we need to do even if we have already set foundSIMDType).
17431         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17432         {
17433             if (sigType == TYP_STRUCT)
17434             {
17435                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17436             }
17437             foundSIMDType = true;
17438         }
17439 #endif // FEATURE_SIMD
17440         lclVarInfo[0].lclTypeInfo = sigType;
17441
17442         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17443                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17444                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17445
17446         if (genActualType(thisArg->gtType) != genActualType(sigType))
17447         {
17448             if (sigType == TYP_REF)
17449             {
17450                 /* The argument cannot be bashed into a ref (see bug 750871) */
17451                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17452                 return;
17453             }
17454
17455             /* This can only happen with byrefs <-> ints/shorts */
17456
17457             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17458             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17459
17460             if (sigType == TYP_BYREF)
17461             {
17462                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17463             }
17464             else if (thisArg->gtType == TYP_BYREF)
17465             {
17466                 assert(sigType == TYP_I_IMPL);
17467
17468                 /* If possible change the BYREF to an int */
17469                 if (thisArg->IsVarAddr())
17470                 {
17471                     thisArg->gtType              = TYP_I_IMPL;
17472                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17473                 }
17474                 else
17475                 {
17476                     /* Arguments 'int <- byref' cannot be bashed */
17477                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17478                     return;
17479                 }
17480             }
17481         }
17482     }
17483
17484     /* Init the types of the arguments and make sure the types
17485      * from the trees match the types in the signature */
17486
17487     CORINFO_ARG_LIST_HANDLE argLst;
17488     argLst = methInfo->args.args;
17489
17490     unsigned i;
17491     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17492     {
17493         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17494
17495         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17496
17497 #ifdef FEATURE_SIMD
17498         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17499         {
17500             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17501             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17502             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17503             foundSIMDType = true;
17504             if (sigType == TYP_STRUCT)
17505             {
17506                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17507                 sigType              = structType;
17508             }
17509         }
17510 #endif // FEATURE_SIMD
17511
17512         lclVarInfo[i].lclTypeInfo    = sigType;
17513         lclVarInfo[i].lclHasLdlocaOp = false;
17514
17515         /* Does the tree type match the signature type? */
17516
17517         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17518
17519         if (sigType != inlArgNode->gtType)
17520         {
17521             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17522                but in bad IL cases with caller-callee signature mismatches we can see other types.
17523                Intentionally reject cases with mismatches so the jit is more flexible when
17524                encountering bad IL. */
17525
17526             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17527                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17528                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17529
17530             if (!isPlausibleTypeMatch)
17531             {
17532                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17533                 return;
17534             }
17535
17536             /* Is it a narrowing or widening cast?
17537              * Widening casts are ok since the value computed is already
17538              * normalized to an int (on the IL stack) */
17539
17540             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17541             {
17542                 if (sigType == TYP_BYREF)
17543                 {
17544                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17545                 }
17546                 else if (inlArgNode->gtType == TYP_BYREF)
17547                 {
17548                     assert(varTypeIsIntOrI(sigType));
17549
17550                     /* If possible bash the BYREF to an int */
17551                     if (inlArgNode->IsVarAddr())
17552                     {
17553                         inlArgNode->gtType           = TYP_I_IMPL;
17554                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17555                     }
17556                     else
17557                     {
17558                         /* Arguments 'int <- byref' cannot be changed */
17559                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17560                         return;
17561                     }
17562                 }
17563                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17564                 {
17565                     /* Narrowing cast */
17566
17567                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17568                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17569                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17570                     {
17571                         /* We don't need to insert a cast here as the variable
17572                            was assigned a normalized value of the right type */
17573
17574                         continue;
17575                     }
17576
17577                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17578
17579                     inlArgInfo[i].argIsLclVar = false;
17580
17581                     /* Try to fold the node in case we have constant arguments */
17582
17583                     if (inlArgInfo[i].argIsInvariant)
17584                     {
17585                         inlArgNode            = gtFoldExprConst(inlArgNode);
17586                         inlArgInfo[i].argNode = inlArgNode;
17587                         assert(inlArgNode->OperIsConst());
17588                     }
17589                 }
17590 #ifdef _TARGET_64BIT_
17591                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17592                 {
17593                     // This should only happen for int -> native int widening
17594                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17595
17596                     inlArgInfo[i].argIsLclVar = false;
17597
17598                     /* Try to fold the node in case we have constant arguments */
17599
17600                     if (inlArgInfo[i].argIsInvariant)
17601                     {
17602                         inlArgNode            = gtFoldExprConst(inlArgNode);
17603                         inlArgInfo[i].argNode = inlArgNode;
17604                         assert(inlArgNode->OperIsConst());
17605                     }
17606                 }
17607 #endif // _TARGET_64BIT_
17608             }
17609         }
17610     }
17611
17612     /* Init the types of the local variables */
17613
17614     CORINFO_ARG_LIST_HANDLE localsSig;
17615     localsSig = methInfo->locals.args;
17616
17617     for (i = 0; i < methInfo->locals.numArgs; i++)
17618     {
17619         bool      isPinned;
17620         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17621
17622         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17623         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17624         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17625
17626         if (isPinned)
17627         {
17628             // Pinned locals may cause inlines to fail.
17629             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17630             if (inlineResult->IsFailure())
17631             {
17632                 return;
17633             }
17634         }
17635
17636         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17637
17638         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17639         // out on the inline.
17640         if (type == TYP_STRUCT)
17641         {
17642             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17643             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17644             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17645             {
17646                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17647                 if (inlineResult->IsFailure())
17648                 {
17649                     return;
17650                 }
17651
17652                 // Do further notification in the case where the call site is rare; some policies do
17653                 // not track the relative hotness of call sites for "always" inline cases.
17654                 if (pInlineInfo->iciBlock->isRunRarely())
17655                 {
17656                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17657                     if (inlineResult->IsFailure())
17658                     {
17659
17660                         return;
17661                     }
17662                 }
17663             }
17664         }
17665
17666         localsSig = info.compCompHnd->getArgNext(localsSig);
17667
17668 #ifdef FEATURE_SIMD
17669         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17670         {
17671             foundSIMDType = true;
17672             if (featureSIMD && type == TYP_STRUCT)
17673             {
17674                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17675                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17676             }
17677         }
17678 #endif // FEATURE_SIMD
17679     }
17680
17681 #ifdef FEATURE_SIMD
17682     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17683     {
17684         foundSIMDType = true;
17685     }
17686     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17687 #endif // FEATURE_SIMD
17688 }
17689
17690 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17691 {
17692     assert(compIsForInlining());
17693
17694     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17695
17696     if (tmpNum == BAD_VAR_NUM)
17697     {
17698         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17699
17700         // The lifetime of this local might span multiple BBs.
17701         // So it is a long lifetime local.
17702         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17703
17704         lvaTable[tmpNum].lvType = lclTyp;
17705         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17706         {
17707             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17708         }
17709
17710         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17711         {
17712             lvaTable[tmpNum].lvPinned = 1;
17713
17714             if (!impInlineInfo->hasPinnedLocals)
17715             {
17716                 // If the inlinee returns a value, use a spill temp
17717                 // for the return value to ensure that even in case
17718                 // where the return expression refers to one of the
17719                 // pinned locals, we can unpin the local right after
17720                 // the inlined method body.
17721                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17722                 {
17723                     lvaInlineeReturnSpillTemp =
17724                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17725                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17726                 }
17727             }
17728
17729             impInlineInfo->hasPinnedLocals = true;
17730         }
17731
17732         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17733         {
17734             if (varTypeIsStruct(lclTyp))
17735             {
17736                 lvaSetStruct(tmpNum,
17737                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17738                              true /* unsafe value cls check */);
17739             }
17740             else
17741             {
17742                 // This is a wrapped primitive.  Make sure the verstate knows that
17743                 lvaTable[tmpNum].lvVerTypeInfo =
17744                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17745             }
17746         }
17747     }
17748
17749     return tmpNum;
17750 }
17751
17752 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17753 // Only use this method for the arguments of the inlinee method.
17754 // !!! Do not use it for the locals of the inlinee method. !!!!
17755
17756 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17757 {
17758     /* Get the argument type */
17759     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17760
17761     GenTreePtr op1 = nullptr;
17762
17763     // constant or address of local
17764     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17765     {
17766         /* Clone the constant. Note that we cannot directly use argNode
17767         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17768         would introduce aliasing between inlArgInfo[].argNode and
17769         impInlineExpr. Then gtFoldExpr() could change it, causing further
17770         references to the argument working off of the bashed copy. */
17771
17772         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17773         PREFIX_ASSUME(op1 != nullptr);
17774         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17775     }
17776     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17777     {
17778         /* Argument is a local variable (of the caller)
17779          * Can we re-use the passed argument node? */
17780
17781         op1                          = inlArgInfo[lclNum].argNode;
17782         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17783
17784         if (inlArgInfo[lclNum].argIsUsed)
17785         {
17786             assert(op1->gtOper == GT_LCL_VAR);
17787             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17788
17789             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17790             {
17791                 lclTyp = genActualType(lclTyp);
17792             }
17793
17794             /* Create a new lcl var node - remember the argument lclNum */
17795             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17796         }
17797     }
17798     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17799     {
17800         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17801            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17802            This way we will increase the chance for this byref to be optimized away by
17803            a subsequent "dereference" operation.
17804
17805            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17806            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17807            For example, if the caller is:
17808                 ldloca.s   V_1  // V_1 is a local struct
17809                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17810            and the callee being inlined has:
17811                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17812                     ldarga.s   ptrToInts
17813                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17814            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17815            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17816         */
17817         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17818                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17819         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17820     }
17821     else
17822     {
17823         /* Argument is a complex expression - it must be evaluated into a temp */
17824
17825         if (inlArgInfo[lclNum].argHasTmp)
17826         {
17827             assert(inlArgInfo[lclNum].argIsUsed);
17828             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17829
17830             /* Create a new lcl var node - remember the argument lclNum */
17831             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17832
17833             /* This is the second or later use of the this argument,
17834             so we have to use the temp (instead of the actual arg) */
17835             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17836         }
17837         else
17838         {
17839             /* First time use */
17840             assert(inlArgInfo[lclNum].argIsUsed == false);
17841
17842             /* Reserve a temp for the expression.
17843             * Use a large size node as we may change it later */
17844
17845             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17846
17847             lvaTable[tmpNum].lvType = lclTyp;
17848             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17849             if (inlArgInfo[lclNum].argHasLdargaOp)
17850             {
17851                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17852             }
17853
17854             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17855             {
17856                 if (varTypeIsStruct(lclTyp))
17857                 {
17858                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17859                                  true /* unsafe value cls check */);
17860                 }
17861                 else
17862                 {
17863                     // This is a wrapped primitive.  Make sure the verstate knows that
17864                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17865                 }
17866             }
17867
17868             inlArgInfo[lclNum].argHasTmp = true;
17869             inlArgInfo[lclNum].argTmpNum = tmpNum;
17870
17871             // If we require strict exception order, then arguments must
17872             // be evaluated in sequence before the body of the inlined method.
17873             // So we need to evaluate them to a temp.
17874             // Also, if arguments have global references, we need to
17875             // evaluate them to a temp before the inlined body as the
17876             // inlined body may be modifying the global ref.
17877             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17878             // if it is a struct, because it requires some additional handling.
17879
17880             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17881             {
17882                 /* Get a *LARGE* LCL_VAR node */
17883                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17884
17885                 /* Record op1 as the very first use of this argument.
17886                 If there are no further uses of the arg, we may be
17887                 able to use the actual arg node instead of the temp.
17888                 If we do see any further uses, we will clear this. */
17889                 inlArgInfo[lclNum].argBashTmpNode = op1;
17890             }
17891             else
17892             {
17893                 /* Get a small LCL_VAR node */
17894                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17895                 /* No bashing of this argument */
17896                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17897             }
17898         }
17899     }
17900
17901     /* Mark the argument as used */
17902
17903     inlArgInfo[lclNum].argIsUsed = true;
17904
17905     return op1;
17906 }
17907
17908 /******************************************************************************
17909  Is this the original "this" argument to the call being inlined?
17910
17911  Note that we do not inline methods with "starg 0", and so we do not need to
17912  worry about it.
17913 */
17914
17915 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17916 {
17917     assert(compIsForInlining());
17918     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17919 }
17920
17921 //-----------------------------------------------------------------------------
17922 // This function checks if a dereference in the inlinee can guarantee that
17923 // the "this" is non-NULL.
17924 // If we haven't hit a branch or a side effect, and we are dereferencing
17925 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17926 // then we can avoid a separate null pointer check.
17927 //
17928 // "additionalTreesToBeEvaluatedBefore"
17929 // is the set of pending trees that have not yet been added to the statement list,
17930 // and which have been removed from verCurrentState.esStack[]
17931
17932 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17933                                                                   GenTreePtr  variableBeingDereferenced,
17934                                                                   InlArgInfo* inlArgInfo)
17935 {
17936     assert(compIsForInlining());
17937     assert(opts.OptEnabled(CLFLG_INLINING));
17938
17939     BasicBlock* block = compCurBB;
17940
17941     GenTreePtr stmt;
17942     GenTreePtr expr;
17943
17944     if (block != fgFirstBB)
17945     {
17946         return FALSE;
17947     }
17948
17949     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17950     {
17951         return FALSE;
17952     }
17953
17954     if (additionalTreesToBeEvaluatedBefore &&
17955         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17956     {
17957         return FALSE;
17958     }
17959
17960     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17961     {
17962         expr = stmt->gtStmt.gtStmtExpr;
17963
17964         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17965         {
17966             return FALSE;
17967         }
17968     }
17969
17970     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17971     {
17972         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17973         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17974         {
17975             return FALSE;
17976         }
17977     }
17978
17979     return TRUE;
17980 }
17981
17982 /******************************************************************************/
17983 // Check the inlining eligibility of this GT_CALL node.
17984 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17985
17986 // Todo: find a way to record the failure reasons in the IR (or
17987 // otherwise build tree context) so when we do the inlining pass we
17988 // can capture these reasons
17989
17990 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17991                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17992                                       CORINFO_CALL_INFO*     callInfo)
17993 {
17994     // Let the strategy know there's another call
17995     impInlineRoot()->m_inlineStrategy->NoteCall();
17996
17997     if (!opts.OptEnabled(CLFLG_INLINING))
17998     {
17999         /* XXX Mon 8/18/2008
18000          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18001          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18002          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18003          * figure out why we did not set MAXOPT for this compile.
18004          */
18005         assert(!compIsForInlining());
18006         return;
18007     }
18008
18009     if (compIsForImportOnly())
18010     {
18011         // Don't bother creating the inline candidate during verification.
18012         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18013         // that leads to the creation of multiple instances of Compiler.
18014         return;
18015     }
18016
18017     GenTreeCall* call = callNode->AsCall();
18018     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18019
18020     // Don't inline if not optimizing root method
18021     if (opts.compDbgCode)
18022     {
18023         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18024         return;
18025     }
18026
18027     // Don't inline if inlining into root method is disabled.
18028     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18029     {
18030         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18031         return;
18032     }
18033
18034     // Inlining candidate determination needs to honor only IL tail prefix.
18035     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18036     if (call->IsTailPrefixedCall())
18037     {
18038         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18039         return;
18040     }
18041
18042     // Tail recursion elimination takes precedence over inlining.
18043     // TODO: We may want to do some of the additional checks from fgMorphCall
18044     // here to reduce the chance we don't inline a call that won't be optimized
18045     // as a fast tail call or turned into a loop.
18046     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18047     {
18048         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18049         return;
18050     }
18051
18052     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18053     {
18054         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18055         return;
18056     }
18057
18058     /* Ignore helper calls */
18059
18060     if (call->gtCallType == CT_HELPER)
18061     {
18062         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18063         return;
18064     }
18065
18066     /* Ignore indirect calls */
18067     if (call->gtCallType == CT_INDIRECT)
18068     {
18069         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18070         return;
18071     }
18072
18073     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18074      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18075      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18076
18077     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18078     unsigned              methAttr;
18079
18080     // Reuse method flags from the original callInfo if possible
18081     if (fncHandle == callInfo->hMethod)
18082     {
18083         methAttr = callInfo->methodFlags;
18084     }
18085     else
18086     {
18087         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18088     }
18089
18090 #ifdef DEBUG
18091     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18092     {
18093         methAttr |= CORINFO_FLG_FORCEINLINE;
18094     }
18095 #endif
18096
18097     // Check for COMPlus_AggressiveInlining
18098     if (compDoAggressiveInlining)
18099     {
18100         methAttr |= CORINFO_FLG_FORCEINLINE;
18101     }
18102
18103     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18104     {
18105         /* Don't bother inline blocks that are in the filter region */
18106         if (bbInCatchHandlerILRange(compCurBB))
18107         {
18108 #ifdef DEBUG
18109             if (verbose)
18110             {
18111                 printf("\nWill not inline blocks that are in the catch handler region\n");
18112             }
18113
18114 #endif
18115
18116             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18117             return;
18118         }
18119
18120         if (bbInFilterILRange(compCurBB))
18121         {
18122 #ifdef DEBUG
18123             if (verbose)
18124             {
18125                 printf("\nWill not inline blocks that are in the filter region\n");
18126             }
18127 #endif
18128
18129             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18130             return;
18131         }
18132     }
18133
18134     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18135
18136     if (opts.compNeedSecurityCheck)
18137     {
18138         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18139         return;
18140     }
18141
18142     /* Check if we tried to inline this method before */
18143
18144     if (methAttr & CORINFO_FLG_DONT_INLINE)
18145     {
18146         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18147         return;
18148     }
18149
18150     /* Cannot inline synchronized methods */
18151
18152     if (methAttr & CORINFO_FLG_SYNCH)
18153     {
18154         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18155         return;
18156     }
18157
18158     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18159
18160     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18161     {
18162         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18163         return;
18164     }
18165
18166     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18167     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18168
18169     if (inlineResult.IsFailure())
18170     {
18171         return;
18172     }
18173
18174     // The old value should be NULL
18175     assert(call->gtInlineCandidateInfo == nullptr);
18176
18177     call->gtInlineCandidateInfo = inlineCandidateInfo;
18178
18179     // Mark the call node as inline candidate.
18180     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18181
18182     // Let the strategy know there's another candidate.
18183     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18184
18185     // Since we're not actually inlining yet, and this call site is
18186     // still just an inline candidate, there's nothing to report.
18187     inlineResult.SetReported();
18188 }
18189
18190 /******************************************************************************/
18191 // Returns true if the given intrinsic will be implemented by target-specific
18192 // instructions
18193
18194 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18195 {
18196 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18197     switch (intrinsicId)
18198     {
18199         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18200         //
18201         // TODO: Because the x86 backend only targets SSE for floating-point code,
18202         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18203         //       implemented those intrinsics as x87 instructions). If this poses
18204         //       a CQ problem, it may be necessary to change the implementation of
18205         //       the helper calls to decrease call overhead or switch back to the
18206         //       x87 instructions. This is tracked by #7097.
18207         case CORINFO_INTRINSIC_Sqrt:
18208         case CORINFO_INTRINSIC_Abs:
18209             return true;
18210
18211         default:
18212             return false;
18213     }
18214 #elif defined(_TARGET_ARM64_)
18215     switch (intrinsicId)
18216     {
18217         case CORINFO_INTRINSIC_Sqrt:
18218         case CORINFO_INTRINSIC_Abs:
18219         case CORINFO_INTRINSIC_Round:
18220             return true;
18221
18222         default:
18223             return false;
18224     }
18225 #elif defined(_TARGET_ARM_)
18226     switch (intrinsicId)
18227     {
18228         case CORINFO_INTRINSIC_Sqrt:
18229         case CORINFO_INTRINSIC_Abs:
18230         case CORINFO_INTRINSIC_Round:
18231             return true;
18232
18233         default:
18234             return false;
18235     }
18236 #elif defined(_TARGET_X86_)
18237     switch (intrinsicId)
18238     {
18239         case CORINFO_INTRINSIC_Sin:
18240         case CORINFO_INTRINSIC_Cos:
18241         case CORINFO_INTRINSIC_Sqrt:
18242         case CORINFO_INTRINSIC_Abs:
18243         case CORINFO_INTRINSIC_Round:
18244             return true;
18245
18246         default:
18247             return false;
18248     }
18249 #else
18250     // TODO: This portion of logic is not implemented for other arch.
18251     // The reason for returning true is that on all other arch the only intrinsic
18252     // enabled are target intrinsics.
18253     return true;
18254 #endif //_TARGET_AMD64_
18255 }
18256
18257 /******************************************************************************/
18258 // Returns true if the given intrinsic will be implemented by calling System.Math
18259 // methods.
18260
18261 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18262 {
18263     // Currently, if an math intrisic is not implemented by target-specific
18264     // intructions, it will be implemented by a System.Math call. In the
18265     // future, if we turn to implementing some of them with helper callers,
18266     // this predicate needs to be revisited.
18267     return !IsTargetIntrinsic(intrinsicId);
18268 }
18269
18270 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18271 {
18272     switch (intrinsicId)
18273     {
18274         case CORINFO_INTRINSIC_Sin:
18275         case CORINFO_INTRINSIC_Sqrt:
18276         case CORINFO_INTRINSIC_Abs:
18277         case CORINFO_INTRINSIC_Cos:
18278         case CORINFO_INTRINSIC_Round:
18279         case CORINFO_INTRINSIC_Cosh:
18280         case CORINFO_INTRINSIC_Sinh:
18281         case CORINFO_INTRINSIC_Tan:
18282         case CORINFO_INTRINSIC_Tanh:
18283         case CORINFO_INTRINSIC_Asin:
18284         case CORINFO_INTRINSIC_Acos:
18285         case CORINFO_INTRINSIC_Atan:
18286         case CORINFO_INTRINSIC_Atan2:
18287         case CORINFO_INTRINSIC_Log10:
18288         case CORINFO_INTRINSIC_Pow:
18289         case CORINFO_INTRINSIC_Exp:
18290         case CORINFO_INTRINSIC_Ceiling:
18291         case CORINFO_INTRINSIC_Floor:
18292             return true;
18293         default:
18294             return false;
18295     }
18296 }
18297
18298 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18299 {
18300     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18301 }
18302 /*****************************************************************************/