Merge pull request #9319 from DrewScoggins/X86PerfRuns
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492     // On coreclr the check for GC includes a "may" to account for the special
1493     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1495     // pointer.
1496     const bool mayContainGCPtrs =
1497         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1498
1499 #ifdef FEATURE_SIMD
1500     // Check to see if this is a SIMD type.
1501     if (featureSIMD && !mayContainGCPtrs)
1502     {
1503         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1504
1505         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1506         {
1507             unsigned int sizeBytes;
1508             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509             if (simdBaseType != TYP_UNKNOWN)
1510             {
1511                 assert(sizeBytes == originalSize);
1512                 structType = getSIMDTypeForSize(sizeBytes);
1513                 if (pSimdBaseType != nullptr)
1514                 {
1515                     *pSimdBaseType = simdBaseType;
1516                 }
1517                 // Also indicate that we use floating point registers.
1518                 compFloatingPointUsed = true;
1519             }
1520         }
1521     }
1522 #endif // FEATURE_SIMD
1523
1524     // Fetch GC layout info if requested
1525     if (gcLayout != nullptr)
1526     {
1527         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1528
1529         // Verify that the quick test up above via the class attributes gave a
1530         // safe view of the type's GCness.
1531         //
1532         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1533         // does not report any gc fields.
1534
1535         assert(mayContainGCPtrs || (numGCVars == 0));
1536
1537         if (pNumGCVars != nullptr)
1538         {
1539             *pNumGCVars = numGCVars;
1540         }
1541     }
1542     else
1543     {
1544         // Can't safely ask for number of GC pointers without also
1545         // asking for layout.
1546         assert(pNumGCVars == nullptr);
1547     }
1548
1549     return structType;
1550 }
1551
1552 //****************************************************************************
1553 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1554 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1555 //
1556 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1557                                       CORINFO_CLASS_HANDLE structHnd,
1558                                       unsigned             curLevel,
1559                                       bool                 forceNormalization /*=false*/)
1560 {
1561     assert(forceNormalization || varTypeIsStruct(structVal));
1562     assert(structHnd != NO_CLASS_HANDLE);
1563     var_types structType = structVal->TypeGet();
1564     bool      makeTemp   = false;
1565     if (structType == TYP_STRUCT)
1566     {
1567         structType = impNormStructType(structHnd);
1568     }
1569     bool                 alreadyNormalized = false;
1570     GenTreeLclVarCommon* structLcl         = nullptr;
1571
1572     genTreeOps oper = structVal->OperGet();
1573     switch (oper)
1574     {
1575         // GT_RETURN and GT_MKREFANY don't capture the handle.
1576         case GT_RETURN:
1577             break;
1578         case GT_MKREFANY:
1579             alreadyNormalized = true;
1580             break;
1581
1582         case GT_CALL:
1583             structVal->gtCall.gtRetClsHnd = structHnd;
1584             makeTemp                      = true;
1585             break;
1586
1587         case GT_RET_EXPR:
1588             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1589             makeTemp                         = true;
1590             break;
1591
1592         case GT_ARGPLACE:
1593             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1594             break;
1595
1596         case GT_INDEX:
1597             // This will be transformed to an OBJ later.
1598             alreadyNormalized                    = true;
1599             structVal->gtIndex.gtStructElemClass = structHnd;
1600             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1601             break;
1602
1603         case GT_FIELD:
1604             // Wrap it in a GT_OBJ.
1605             structVal->gtType = structType;
1606             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1607             break;
1608
1609         case GT_LCL_VAR:
1610         case GT_LCL_FLD:
1611             structLcl = structVal->AsLclVarCommon();
1612             // Wrap it in a GT_OBJ.
1613             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1614             __fallthrough;
1615
1616         case GT_OBJ:
1617         case GT_BLK:
1618         case GT_DYN_BLK:
1619         case GT_ASG:
1620             // These should already have the appropriate type.
1621             assert(structVal->gtType == structType);
1622             alreadyNormalized = true;
1623             break;
1624
1625         case GT_IND:
1626             assert(structVal->gtType == structType);
1627             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1628             alreadyNormalized = true;
1629             break;
1630
1631 #ifdef FEATURE_SIMD
1632         case GT_SIMD:
1633             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1634             break;
1635 #endif // FEATURE_SIMD
1636
1637         case GT_COMMA:
1638         {
1639             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1640             GenTree* blockNode = structVal->gtOp.gtOp2;
1641             assert(blockNode->gtType == structType);
1642
1643             // Is this GT_COMMA(op1, GT_COMMA())?
1644             GenTree* parent = structVal;
1645             if (blockNode->OperGet() == GT_COMMA)
1646             {
1647                 // Find the last node in the comma chain.
1648                 do
1649                 {
1650                     assert(blockNode->gtType == structType);
1651                     parent    = blockNode;
1652                     blockNode = blockNode->gtOp.gtOp2;
1653                 } while (blockNode->OperGet() == GT_COMMA);
1654             }
1655
1656 #ifdef FEATURE_SIMD
1657             if (blockNode->OperGet() == GT_SIMD)
1658             {
1659                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1660                 alreadyNormalized  = true;
1661             }
1662             else
1663 #endif
1664             {
1665                 assert(blockNode->OperIsBlk());
1666
1667                 // Sink the GT_COMMA below the blockNode addr.
1668                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1669                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1670                 //
1671                 // In case of a chained GT_COMMA case, we sink the last
1672                 // GT_COMMA below the blockNode addr.
1673                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1674                 assert(blockNodeAddr->gtType == TYP_BYREF);
1675                 GenTree* commaNode    = parent;
1676                 commaNode->gtType     = TYP_BYREF;
1677                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1678                 blockNode->gtOp.gtOp1 = commaNode;
1679                 if (parent == structVal)
1680                 {
1681                     structVal = blockNode;
1682                 }
1683                 alreadyNormalized = true;
1684             }
1685         }
1686         break;
1687
1688         default:
1689             assert(!"Unexpected node in impNormStructVal()");
1690             break;
1691     }
1692     structVal->gtType  = structType;
1693     GenTree* structObj = structVal;
1694
1695     if (!alreadyNormalized || forceNormalization)
1696     {
1697         if (makeTemp)
1698         {
1699             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1700
1701             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1702
1703             // The structVal is now the temp itself
1704
1705             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1706             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1707             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1708         }
1709         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1710         {
1711             // Wrap it in a GT_OBJ
1712             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1713         }
1714     }
1715
1716     if (structLcl != nullptr)
1717     {
1718         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1719         // so we don't set GTF_EXCEPT here.
1720         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1721         {
1722             structObj->gtFlags &= ~GTF_GLOB_REF;
1723         }
1724     }
1725     else
1726     {
1727         // In general a OBJ is an indirection and could raise an exception.
1728         structObj->gtFlags |= GTF_EXCEPT;
1729     }
1730     return (structObj);
1731 }
1732
1733 /******************************************************************************/
1734 // Given a type token, generate code that will evaluate to the correct
1735 // handle representation of that token (type handle, field handle, or method handle)
1736 //
1737 // For most cases, the handle is determined at compile-time, and the code
1738 // generated is simply an embedded handle.
1739 //
1740 // Run-time lookup is required if the enclosing method is shared between instantiations
1741 // and the token refers to formal type parameters whose instantiation is not known
1742 // at compile-time.
1743 //
1744 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1745                                       BOOL*                   pRuntimeLookup /* = NULL */,
1746                                       BOOL                    mustRestoreHandle /* = FALSE */,
1747                                       BOOL                    importParent /* = FALSE */)
1748 {
1749     assert(!fgGlobalMorph);
1750
1751     CORINFO_GENERICHANDLE_RESULT embedInfo;
1752     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1753
1754     if (pRuntimeLookup)
1755     {
1756         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1757     }
1758
1759     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1760     {
1761         switch (embedInfo.handleType)
1762         {
1763             case CORINFO_HANDLETYPE_CLASS:
1764                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1765                 break;
1766
1767             case CORINFO_HANDLETYPE_METHOD:
1768                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1769                 break;
1770
1771             case CORINFO_HANDLETYPE_FIELD:
1772                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1773                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1774                 break;
1775
1776             default:
1777                 break;
1778         }
1779     }
1780
1781     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1782                            embedInfo.compileTimeHandle);
1783 }
1784
1785 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                      CORINFO_LOOKUP*         pLookup,
1787                                      unsigned                handleFlags,
1788                                      void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                                unsigned              handleFlags,
1828                                                void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1843 }
1844
1845 GenTreePtr Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853 #if COR_JIT_EE_VERSION > 460
1854     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1855     {
1856         return nullptr;
1857     }
1858 #else
1859     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1860 #endif
1861
1862     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1863
1864     op1->gtCall.setEntryPoint(lookup);
1865
1866     return op1;
1867 }
1868 #endif
1869
1870 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1871 {
1872     GenTreePtr op1 = nullptr;
1873
1874     switch (pCallInfo->kind)
1875     {
1876         case CORINFO_CALL:
1877             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1878
1879 #ifdef FEATURE_READYTORUN_COMPILER
1880             if (opts.IsReadyToRun())
1881             {
1882                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1883                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1884                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1885             }
1886             else
1887             {
1888                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1889             }
1890 #endif
1891             break;
1892
1893         case CORINFO_CALL_CODE_POINTER:
1894             if (compIsForInlining())
1895             {
1896                 // Don't import runtime lookups when inlining
1897                 // Inlining has to be aborted in such a case
1898                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1899                 return nullptr;
1900             }
1901
1902             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1903             break;
1904
1905         default:
1906             noway_assert(!"unknown call kind");
1907             break;
1908     }
1909
1910     return op1;
1911 }
1912
1913 //------------------------------------------------------------------------
1914 // getRuntimeContextTree: find pointer to context for runtime lookup.
1915 //
1916 // Arguments:
1917 //    kind - lookup kind.
1918 //
1919 // Return Value:
1920 //    Return GenTree pointer to generic shared context.
1921 //
1922 // Notes:
1923 //    Reports about generic context using.
1924
1925 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1926 {
1927     GenTreePtr ctxTree = nullptr;
1928
1929     // Collectible types requires that for shared generic code, if we use the generic context parameter
1930     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1931     // context parameter is this that we don't need the eager reporting logic.)
1932     lvaGenericsContextUsed = true;
1933
1934     if (kind == CORINFO_LOOKUP_THISOBJ)
1935     {
1936         // this Object
1937         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1938
1939         // Vtable pointer of this object
1940         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1941         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1942         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1943     }
1944     else
1945     {
1946         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1947
1948         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1949     }
1950     return ctxTree;
1951 }
1952
1953 /*****************************************************************************/
1954 /* Import a dictionary lookup to access a handle in code shared between
1955    generic instantiations.
1956    The lookup depends on the typeContext which is only available at
1957    runtime, and not at compile-time.
1958    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1959    The cases are:
1960
1961    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1962       instantiation-specific handle, and the tokens to lookup the handle.
1963    2. pLookup->indirections != CORINFO_USEHELPER :
1964       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1965           to get the handle.
1966       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1967           If it is non-NULL, it is the handle required. Else, call a helper
1968           to lookup the handle.
1969  */
1970
1971 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1972                                             CORINFO_LOOKUP*         pLookup,
1973                                             void*                   compileTimeHandle)
1974 {
1975
1976     // This method can only be called from the importer instance of the Compiler.
1977     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1978     assert(!compIsForInlining());
1979
1980     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1981
1982 #ifdef FEATURE_READYTORUN_COMPILER
1983     if (opts.IsReadyToRun())
1984     {
1985         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1986                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1987     }
1988 #endif
1989
1990     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1991     // It's available only via the run-time helper function
1992     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1993     {
1994         GenTreeArgList* helperArgs =
1995             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1996                                                       nullptr, compileTimeHandle));
1997
1998         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1999     }
2000
2001     // Slot pointer
2002     GenTreePtr slotPtrTree = ctxTree;
2003
2004     if (pRuntimeLookup->testForNull)
2005     {
2006         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2007                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2008     }
2009
2010     // Applied repeated indirections
2011     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2012     {
2013         if (i != 0)
2014         {
2015             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2016             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2017             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2018         }
2019         if (pRuntimeLookup->offsets[i] != 0)
2020         {
2021             slotPtrTree =
2022                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2023         }
2024     }
2025
2026     // No null test required
2027     if (!pRuntimeLookup->testForNull)
2028     {
2029         if (pRuntimeLookup->indirections == 0)
2030         {
2031             return slotPtrTree;
2032         }
2033
2034         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2035         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2036
2037         if (!pRuntimeLookup->testForFixup)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2043
2044         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2045                                       nullptr DEBUGARG("impRuntimeLookup test"));
2046         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2047
2048         // Use a GT_AND to check for the lowest bit and indirect if it is set
2049         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2050         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2051         relop->gtFlags |= GTF_RELOP_QMARK;
2052
2053         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2054                            nullptr DEBUGARG("impRuntimeLookup indir"));
2055         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2056         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2057         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2058
2059         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2060
2061         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2062         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2063         return gtNewLclvNode(tmp, TYP_I_IMPL);
2064     }
2065
2066     assert(pRuntimeLookup->indirections != 0);
2067
2068     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2069
2070     // Extract the handle
2071     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2072     handle->gtFlags |= GTF_IND_NONFAULTING;
2073
2074     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2075                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2076
2077     // Call to helper
2078     GenTreeArgList* helperArgs =
2079         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2080                                                   compileTimeHandle));
2081     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2082
2083     // Check for null and possibly call helper
2084     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2085     relop->gtFlags |= GTF_RELOP_QMARK;
2086
2087     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2088                                                          gtNewNothingNode(), // do nothing if nonnull
2089                                                          helperCall);
2090
2091     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2092
2093     unsigned tmp;
2094     if (handleCopy->IsLocal())
2095     {
2096         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2097     }
2098     else
2099     {
2100         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2101     }
2102
2103     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2104     return gtNewLclvNode(tmp, TYP_I_IMPL);
2105 }
2106
2107 /******************************************************************************
2108  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2109  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2110  *     else, grab a new temp.
2111  *  For structs (which can be pushed on the stack using obj, etc),
2112  *  special handling is needed
2113  */
2114
2115 struct RecursiveGuard
2116 {
2117 public:
2118     RecursiveGuard()
2119     {
2120         m_pAddress = nullptr;
2121     }
2122
2123     ~RecursiveGuard()
2124     {
2125         if (m_pAddress)
2126         {
2127             *m_pAddress = false;
2128         }
2129     }
2130
2131     void Init(bool* pAddress, bool bInitialize)
2132     {
2133         assert(pAddress && *pAddress == false && "Recursive guard violation");
2134         m_pAddress = pAddress;
2135
2136         if (bInitialize)
2137         {
2138             *m_pAddress = true;
2139         }
2140     }
2141
2142 protected:
2143     bool* m_pAddress;
2144 };
2145
2146 bool Compiler::impSpillStackEntry(unsigned level,
2147                                   unsigned tnum
2148 #ifdef DEBUG
2149                                   ,
2150                                   bool        bAssertOnRecursion,
2151                                   const char* reason
2152 #endif
2153                                   )
2154 {
2155
2156 #ifdef DEBUG
2157     RecursiveGuard guard;
2158     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2159 #endif
2160
2161     GenTreePtr tree = verCurrentState.esStack[level].val;
2162
2163     /* Allocate a temp if we haven't been asked to use a particular one */
2164
2165     if (tiVerificationNeeded)
2166     {
2167         // Ignore bad temp requests (they will happen with bad code and will be
2168         // catched when importing the destblock)
2169         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2170         {
2171             return false;
2172         }
2173     }
2174     else
2175     {
2176         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2177         {
2178             return false;
2179         }
2180     }
2181
2182     if (tnum == BAD_VAR_NUM)
2183     {
2184         tnum = lvaGrabTemp(true DEBUGARG(reason));
2185     }
2186     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2187     {
2188         // if verification is needed and tnum's type is incompatible with
2189         // type on that stack, we grab a new temp. This is safe since
2190         // we will throw a verification exception in the dest block.
2191
2192         var_types valTyp = tree->TypeGet();
2193         var_types dstTyp = lvaTable[tnum].TypeGet();
2194
2195         // if the two types are different, we return. This will only happen with bad code and will
2196         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2197         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2198             !(
2199 #ifndef _TARGET_64BIT_
2200                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2201 #endif // !_TARGET_64BIT_
2202                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2203         {
2204             if (verNeedsVerification())
2205             {
2206                 return false;
2207             }
2208         }
2209     }
2210
2211     /* Assign the spilled entry to the temp */
2212     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2213
2214     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2215     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2216     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2217     verCurrentState.esStack[level].val = temp;
2218
2219     return true;
2220 }
2221
2222 /*****************************************************************************
2223  *
2224  *  Ensure that the stack has only spilled values
2225  */
2226
2227 void Compiler::impSpillStackEnsure(bool spillLeaves)
2228 {
2229     assert(!spillLeaves || opts.compDbgCode);
2230
2231     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2232     {
2233         GenTreePtr tree = verCurrentState.esStack[level].val;
2234
2235         if (!spillLeaves && tree->OperIsLeaf())
2236         {
2237             continue;
2238         }
2239
2240         // Temps introduced by the importer itself don't need to be spilled
2241
2242         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2243
2244         if (isTempLcl)
2245         {
2246             continue;
2247         }
2248
2249         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2250     }
2251 }
2252
2253 void Compiler::impSpillEvalStack()
2254 {
2255     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2256     {
2257         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2258     }
2259 }
2260
2261 /*****************************************************************************
2262  *
2263  *  If the stack contains any trees with side effects in them, assign those
2264  *  trees to temps and append the assignments to the statement list.
2265  *  On return the stack is guaranteed to be empty.
2266  */
2267
2268 inline void Compiler::impEvalSideEffects()
2269 {
2270     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2271     verCurrentState.esStackDepth = 0;
2272 }
2273
2274 /*****************************************************************************
2275  *
2276  *  If the stack contains any trees with side effects in them, assign those
2277  *  trees to temps and replace them on the stack with refs to their temps.
2278  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2279  */
2280
2281 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2282 {
2283     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2284
2285     /* Before we make any appends to the tree list we must spill the
2286      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2287
2288     impSpillSpecialSideEff();
2289
2290     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2291     {
2292         chkLevel = verCurrentState.esStackDepth;
2293     }
2294
2295     assert(chkLevel <= verCurrentState.esStackDepth);
2296
2297     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2298
2299     for (unsigned i = 0; i < chkLevel; i++)
2300     {
2301         GenTreePtr tree = verCurrentState.esStack[i].val;
2302
2303         GenTreePtr lclVarTree;
2304
2305         if ((tree->gtFlags & spillFlags) != 0 ||
2306             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2307              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2308              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2309                                            // lvAddrTaken flag.
2310         {
2311             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2312         }
2313     }
2314 }
2315
2316 /*****************************************************************************
2317  *
2318  *  If the stack contains any trees with special side effects in them, assign
2319  *  those trees to temps and replace them on the stack with refs to their temps.
2320  */
2321
2322 inline void Compiler::impSpillSpecialSideEff()
2323 {
2324     // Only exception objects need to be carefully handled
2325
2326     if (!compCurBB->bbCatchTyp)
2327     {
2328         return;
2329     }
2330
2331     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2332     {
2333         GenTreePtr tree = verCurrentState.esStack[level].val;
2334         // Make sure if we have an exception object in the sub tree we spill ourselves.
2335         if (gtHasCatchArg(tree))
2336         {
2337             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2338         }
2339     }
2340 }
2341
2342 /*****************************************************************************
2343  *
2344  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2345  */
2346
2347 void Compiler::impSpillValueClasses()
2348 {
2349     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2350     {
2351         GenTreePtr tree = verCurrentState.esStack[level].val;
2352
2353         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2354         {
2355             // Tree walk was aborted, which means that we found a
2356             // value class on the stack.  Need to spill that
2357             // stack entry.
2358
2359             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2360         }
2361     }
2362 }
2363
2364 /*****************************************************************************
2365  *
2366  *  Callback that checks if a tree node is TYP_STRUCT
2367  */
2368
2369 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2370 {
2371     fgWalkResult walkResult = WALK_CONTINUE;
2372
2373     if ((*pTree)->gtType == TYP_STRUCT)
2374     {
2375         // Abort the walk and indicate that we found a value class
2376
2377         walkResult = WALK_ABORT;
2378     }
2379
2380     return walkResult;
2381 }
2382
2383 /*****************************************************************************
2384  *
2385  *  If the stack contains any trees with references to local #lclNum, assign
2386  *  those trees to temps and replace their place on the stack with refs to
2387  *  their temps.
2388  */
2389
2390 void Compiler::impSpillLclRefs(ssize_t lclNum)
2391 {
2392     /* Before we make any appends to the tree list we must spill the
2393      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2394
2395     impSpillSpecialSideEff();
2396
2397     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2398     {
2399         GenTreePtr tree = verCurrentState.esStack[level].val;
2400
2401         /* If the tree may throw an exception, and the block has a handler,
2402            then we need to spill assignments to the local if the local is
2403            live on entry to the handler.
2404            Just spill 'em all without considering the liveness */
2405
2406         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2407
2408         /* Skip the tree if it doesn't have an affected reference,
2409            unless xcptnCaught */
2410
2411         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2412         {
2413             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2414         }
2415     }
2416 }
2417
2418 /*****************************************************************************
2419  *
2420  *  Push catch arg onto the stack.
2421  *  If there are jumps to the beginning of the handler, insert basic block
2422  *  and spill catch arg to a temp. Update the handler block if necessary.
2423  *
2424  *  Returns the basic block of the actual handler.
2425  */
2426
2427 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2428 {
2429     // Do not inject the basic block twice on reimport. This should be
2430     // hit only under JIT stress. See if the block is the one we injected.
2431     // Note that EH canonicalization can inject internal blocks here. We might
2432     // be able to re-use such a block (but we don't, right now).
2433     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2434         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2435     {
2436         GenTreePtr tree = hndBlk->bbTreeList;
2437
2438         if (tree != nullptr && tree->gtOper == GT_STMT)
2439         {
2440             tree = tree->gtStmt.gtStmtExpr;
2441             assert(tree != nullptr);
2442
2443             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2444                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2445             {
2446                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2447
2448                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2449
2450                 return hndBlk->bbNext;
2451             }
2452         }
2453
2454         // If we get here, it must have been some other kind of internal block. It's possible that
2455         // someone prepended something to our injected block, but that's unlikely.
2456     }
2457
2458     /* Push the exception address value on the stack */
2459     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2460
2461     /* Mark the node as having a side-effect - i.e. cannot be
2462      * moved around since it is tied to a fixed location (EAX) */
2463     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2464
2465     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2466     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2467     {
2468         if (hndBlk->bbRefs == 1)
2469         {
2470             hndBlk->bbRefs++;
2471         }
2472
2473         /* Create extra basic block for the spill */
2474         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2475         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2476         newBlk->setBBWeight(hndBlk->bbWeight);
2477         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2478
2479         /* Account for the new link we are about to create */
2480         hndBlk->bbRefs++;
2481
2482         /* Spill into a temp */
2483         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2484         lvaTable[tempNum].lvType = TYP_REF;
2485         arg                      = gtNewTempAssign(tempNum, arg);
2486
2487         hndBlk->bbStkTempsIn = tempNum;
2488
2489         /* Report the debug info. impImportBlockCode won't treat
2490          * the actual handler as exception block and thus won't do it for us. */
2491         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2492         {
2493             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2494             arg            = gtNewStmt(arg, impCurStmtOffs);
2495         }
2496
2497         fgInsertStmtAtEnd(newBlk, arg);
2498
2499         arg = gtNewLclvNode(tempNum, TYP_REF);
2500     }
2501
2502     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2503
2504     return hndBlk;
2505 }
2506
2507 /*****************************************************************************
2508  *
2509  *  Given a tree, clone it. *pClone is set to the cloned tree.
2510  *  Returns the original tree if the cloning was easy,
2511  *   else returns the temp to which the tree had to be spilled to.
2512  *  If the tree has side-effects, it will be spilled to a temp.
2513  */
2514
2515 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2516                                   GenTreePtr*          pClone,
2517                                   CORINFO_CLASS_HANDLE structHnd,
2518                                   unsigned             curLevel,
2519                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2520 {
2521     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2522     {
2523         GenTreePtr clone = gtClone(tree, true);
2524
2525         if (clone)
2526         {
2527             *pClone = clone;
2528             return tree;
2529         }
2530     }
2531
2532     /* Store the operand in a temp and return the temp */
2533
2534     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2535
2536     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2537     // return a struct type. It also may modify the struct type to a more
2538     // specialized type (e.g. a SIMD type).  So we will get the type from
2539     // the lclVar AFTER calling impAssignTempGen().
2540
2541     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2542     var_types type = genActualType(lvaTable[temp].TypeGet());
2543
2544     *pClone = gtNewLclvNode(temp, type);
2545     return gtNewLclvNode(temp, type);
2546 }
2547
2548 /*****************************************************************************
2549  * Remember the IL offset (including stack-empty info) for the trees we will
2550  * generate now.
2551  */
2552
2553 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2554 {
2555     if (compIsForInlining())
2556     {
2557         GenTreePtr callStmt = impInlineInfo->iciStmt;
2558         assert(callStmt->gtOper == GT_STMT);
2559         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2560     }
2561     else
2562     {
2563         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2564         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2565         impCurStmtOffs    = offs | stkBit;
2566     }
2567 }
2568
2569 /*****************************************************************************
2570  * Returns current IL offset with stack-empty and call-instruction info incorporated
2571  */
2572 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2573 {
2574     if (compIsForInlining())
2575     {
2576         return BAD_IL_OFFSET;
2577     }
2578     else
2579     {
2580         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2581         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2582         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2583         return offs | stkBit | callInstructionBit;
2584     }
2585 }
2586
2587 /*****************************************************************************
2588  *
2589  *  Remember the instr offset for the statements
2590  *
2591  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2592  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2593  *  as some of the trees corresponding to code up to impCurOpcOffs might
2594  *  still be sitting on the stack.
2595  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2596  *  This should be called when an opcode finally/explicitly causes
2597  *  impAppendTree(tree) to be called (as opposed to being called because of
2598  *  a spill caused by the opcode)
2599  */
2600
2601 #ifdef DEBUG
2602
2603 void Compiler::impNoteLastILoffs()
2604 {
2605     if (impLastILoffsStmt == nullptr)
2606     {
2607         // We should have added a statement for the current basic block
2608         // Is this assert correct ?
2609
2610         assert(impTreeLast);
2611         assert(impTreeLast->gtOper == GT_STMT);
2612
2613         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2614     }
2615     else
2616     {
2617         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2618         impLastILoffsStmt                          = nullptr;
2619     }
2620 }
2621
2622 #endif // DEBUG
2623
2624 /*****************************************************************************
2625  * We don't create any GenTree (excluding spills) for a branch.
2626  * For debugging info, we need a placeholder so that we can note
2627  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2628  */
2629
2630 void Compiler::impNoteBranchOffs()
2631 {
2632     if (opts.compDbgCode)
2633     {
2634         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2635     }
2636 }
2637
2638 /*****************************************************************************
2639  * Locate the next stmt boundary for which we need to record info.
2640  * We will have to spill the stack at such boundaries if it is not
2641  * already empty.
2642  * Returns the next stmt boundary (after the start of the block)
2643  */
2644
2645 unsigned Compiler::impInitBlockLineInfo()
2646 {
2647     /* Assume the block does not correspond with any IL offset. This prevents
2648        us from reporting extra offsets. Extra mappings can cause confusing
2649        stepping, especially if the extra mapping is a jump-target, and the
2650        debugger does not ignore extra mappings, but instead rewinds to the
2651        nearest known offset */
2652
2653     impCurStmtOffsSet(BAD_IL_OFFSET);
2654
2655     if (compIsForInlining())
2656     {
2657         return ~0;
2658     }
2659
2660     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2661
2662     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2663     {
2664         impCurStmtOffsSet(blockOffs);
2665     }
2666
2667     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2668     {
2669         impCurStmtOffsSet(blockOffs);
2670     }
2671
2672     /* Always report IL offset 0 or some tests get confused.
2673        Probably a good idea anyways */
2674
2675     if (blockOffs == 0)
2676     {
2677         impCurStmtOffsSet(blockOffs);
2678     }
2679
2680     if (!info.compStmtOffsetsCount)
2681     {
2682         return ~0;
2683     }
2684
2685     /* Find the lowest explicit stmt boundary within the block */
2686
2687     /* Start looking at an entry that is based on our instr offset */
2688
2689     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2690
2691     if (index >= info.compStmtOffsetsCount)
2692     {
2693         index = info.compStmtOffsetsCount - 1;
2694     }
2695
2696     /* If we've guessed too far, back up */
2697
2698     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2699     {
2700         index--;
2701     }
2702
2703     /* If we guessed short, advance ahead */
2704
2705     while (info.compStmtOffsets[index] < blockOffs)
2706     {
2707         index++;
2708
2709         if (index == info.compStmtOffsetsCount)
2710         {
2711             return info.compStmtOffsetsCount;
2712         }
2713     }
2714
2715     assert(index < info.compStmtOffsetsCount);
2716
2717     if (info.compStmtOffsets[index] == blockOffs)
2718     {
2719         /* There is an explicit boundary for the start of this basic block.
2720            So we will start with bbCodeOffs. Else we will wait until we
2721            get to the next explicit boundary */
2722
2723         impCurStmtOffsSet(blockOffs);
2724
2725         index++;
2726     }
2727
2728     return index;
2729 }
2730
2731 /*****************************************************************************/
2732
2733 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2734 {
2735     switch (opcode)
2736     {
2737         case CEE_CALL:
2738         case CEE_CALLI:
2739         case CEE_CALLVIRT:
2740             return true;
2741
2742         default:
2743             return false;
2744     }
2745 }
2746
2747 /*****************************************************************************/
2748
2749 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2750 {
2751     switch (opcode)
2752     {
2753         case CEE_CALL:
2754         case CEE_CALLI:
2755         case CEE_CALLVIRT:
2756         case CEE_JMP:
2757         case CEE_NEWOBJ:
2758         case CEE_NEWARR:
2759             return true;
2760
2761         default:
2762             return false;
2763     }
2764 }
2765
2766 /*****************************************************************************/
2767
2768 // One might think it is worth caching these values, but results indicate
2769 // that it isn't.
2770 // In addition, caching them causes SuperPMI to be unable to completely
2771 // encapsulate an individual method context.
2772 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2773 {
2774     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2775     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2776     return refAnyClass;
2777 }
2778
2779 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2780 {
2781     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2782     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2783     return typeHandleClass;
2784 }
2785
2786 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2787 {
2788     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2789     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2790     return argIteratorClass;
2791 }
2792
2793 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2794 {
2795     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2796     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2797     return stringClass;
2798 }
2799
2800 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2801 {
2802     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2803     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2804     return objectClass;
2805 }
2806
2807 /*****************************************************************************
2808  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2809  *  set its type to TYP_BYREF when we create it. We know if it can be
2810  *  changed to TYP_I_IMPL only at the point where we use it
2811  */
2812
2813 /* static */
2814 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2815 {
2816     if (tree1->IsVarAddr())
2817     {
2818         tree1->gtType = TYP_I_IMPL;
2819     }
2820
2821     if (tree2 && tree2->IsVarAddr())
2822     {
2823         tree2->gtType = TYP_I_IMPL;
2824     }
2825 }
2826
2827 /*****************************************************************************
2828  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2829  *  to make that an explicit cast in our trees, so any implicit casts that
2830  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2831  *  turned into explicit casts here.
2832  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2833  */
2834
2835 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2836 {
2837     var_types currType   = genActualType(tree->gtType);
2838     var_types wantedType = genActualType(dstTyp);
2839
2840     if (wantedType != currType)
2841     {
2842         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2843         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2844         {
2845             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2846             {
2847                 tree->gtType = TYP_I_IMPL;
2848             }
2849         }
2850 #ifdef _TARGET_64BIT_
2851         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2852         {
2853             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2854             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2855         }
2856         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2857         {
2858             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2859             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2860         }
2861 #endif // _TARGET_64BIT_
2862     }
2863
2864     return tree;
2865 }
2866
2867 /*****************************************************************************
2868  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2869  *  but we want to make that an explicit cast in our trees, so any implicit casts
2870  *  that exist in the IL are turned into explicit casts here.
2871  */
2872
2873 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2874 {
2875 #ifndef LEGACY_BACKEND
2876     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2877     {
2878         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2879     }
2880 #endif // !LEGACY_BACKEND
2881
2882     return tree;
2883 }
2884
2885 //------------------------------------------------------------------------
2886 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2887 //    with a GT_COPYBLK node.
2888 //
2889 // Arguments:
2890 //    sig - The InitializeArray signature.
2891 //
2892 // Return Value:
2893 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2894 //    nullptr otherwise.
2895 //
2896 // Notes:
2897 //    The function recognizes the following IL pattern:
2898 //      ldc <length> or a list of ldc <lower bound>/<length>
2899 //      newarr or newobj
2900 //      dup
2901 //      ldtoken <field handle>
2902 //      call InitializeArray
2903 //    The lower bounds need not be constant except when the array rank is 1.
2904 //    The function recognizes all kinds of arrays thus enabling a small runtime
2905 //    such as CoreRT to skip providing an implementation for InitializeArray.
2906
2907 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2908 {
2909     assert(sig->numArgs == 2);
2910
2911     GenTreePtr fieldTokenNode = impStackTop(0).val;
2912     GenTreePtr arrayLocalNode = impStackTop(1).val;
2913
2914     //
2915     // Verify that the field token is known and valid.  Note that It's also
2916     // possible for the token to come from reflection, in which case we cannot do
2917     // the optimization and must therefore revert to calling the helper.  You can
2918     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2919     //
2920
2921     // Check to see if the ldtoken helper call is what we see here.
2922     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2923         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2924     {
2925         return nullptr;
2926     }
2927
2928     // Strip helper call away
2929     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2930
2931     if (fieldTokenNode->gtOper == GT_IND)
2932     {
2933         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2934     }
2935
2936     // Check for constant
2937     if (fieldTokenNode->gtOper != GT_CNS_INT)
2938     {
2939         return nullptr;
2940     }
2941
2942     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2943     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2944     {
2945         return nullptr;
2946     }
2947
2948     //
2949     // We need to get the number of elements in the array and the size of each element.
2950     // We verify that the newarr statement is exactly what we expect it to be.
2951     // If it's not then we just return NULL and we don't optimize this call
2952     //
2953
2954     //
2955     // It is possible the we don't have any statements in the block yet
2956     //
2957     if (impTreeLast->gtOper != GT_STMT)
2958     {
2959         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2960         return nullptr;
2961     }
2962
2963     //
2964     // We start by looking at the last statement, making sure it's an assignment, and
2965     // that the target of the assignment is the array passed to InitializeArray.
2966     //
2967     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2968     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2969         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2970         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2971     {
2972         return nullptr;
2973     }
2974
2975     //
2976     // Make sure that the object being assigned is a helper call.
2977     //
2978
2979     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2980     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2981     {
2982         return nullptr;
2983     }
2984
2985     //
2986     // Verify that it is one of the new array helpers.
2987     //
2988
2989     bool isMDArray = false;
2990
2991     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2992         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2993         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2995 #ifdef FEATURE_READYTORUN_COMPILER
2996         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2997 #endif
2998             )
2999     {
3000 #if COR_JIT_EE_VERSION > 460
3001         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3002         {
3003             return nullptr;
3004         }
3005
3006         isMDArray = true;
3007 #endif
3008     }
3009
3010     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3011
3012     //
3013     // Make sure we found a compile time handle to the array
3014     //
3015
3016     if (!arrayClsHnd)
3017     {
3018         return nullptr;
3019     }
3020
3021     unsigned rank = 0;
3022     S_UINT32 numElements;
3023
3024     if (isMDArray)
3025     {
3026         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3027
3028         if (rank == 0)
3029         {
3030             return nullptr;
3031         }
3032
3033         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3034         assert(tokenArg != nullptr);
3035         GenTreeArgList* numArgsArg = tokenArg->Rest();
3036         assert(numArgsArg != nullptr);
3037         GenTreeArgList* argsArg = numArgsArg->Rest();
3038         assert(argsArg != nullptr);
3039
3040         //
3041         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3042         // so at least one length must be present and the rank can't exceed 32 so there can
3043         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3044         //
3045
3046         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3047             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3048         {
3049             return nullptr;
3050         }
3051
3052         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3053         bool     lowerBoundsSpecified;
3054
3055         if (numArgs == rank * 2)
3056         {
3057             lowerBoundsSpecified = true;
3058         }
3059         else if (numArgs == rank)
3060         {
3061             lowerBoundsSpecified = false;
3062
3063             //
3064             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3065             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3066             // we get a SDArray as well, see the for loop below.
3067             //
3068
3069             if (rank == 1)
3070             {
3071                 isMDArray = false;
3072             }
3073         }
3074         else
3075         {
3076             return nullptr;
3077         }
3078
3079         //
3080         // The rank is known to be at least 1 so we can start with numElements being 1
3081         // to avoid the need to special case the first dimension.
3082         //
3083
3084         numElements = S_UINT32(1);
3085
3086         struct Match
3087         {
3088             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3089             {
3090                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3091                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3092             }
3093
3094             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3095             {
3096                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3097                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3098                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3099             }
3100
3101             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3102             {
3103                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3104                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3105             }
3106
3107             static bool IsComma(GenTree* tree)
3108             {
3109                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3110             }
3111         };
3112
3113         unsigned argIndex = 0;
3114         GenTree* comma;
3115
3116         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3117         {
3118             if (lowerBoundsSpecified)
3119             {
3120                 //
3121                 // In general lower bounds can be ignored because they're not needed to
3122                 // calculate the total number of elements. But for single dimensional arrays
3123                 // we need to know if the lower bound is 0 because in this case the runtime
3124                 // creates a SDArray and this affects the way the array data offset is calculated.
3125                 //
3126
3127                 if (rank == 1)
3128                 {
3129                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3130                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3131                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3132
3133                     if (lowerBoundNode->IsIntegralConst(0))
3134                     {
3135                         isMDArray = false;
3136                     }
3137                 }
3138
3139                 comma = comma->gtGetOp2();
3140                 argIndex++;
3141             }
3142
3143             GenTree* lengthNodeAssign = comma->gtGetOp1();
3144             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3145             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3146
3147             if (!lengthNode->IsCnsIntOrI())
3148             {
3149                 return nullptr;
3150             }
3151
3152             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3153             argIndex++;
3154         }
3155
3156         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3157
3158         if (argIndex != numArgs)
3159         {
3160             return nullptr;
3161         }
3162     }
3163     else
3164     {
3165         //
3166         // Make sure there are exactly two arguments:  the array class and
3167         // the number of elements.
3168         //
3169
3170         GenTreePtr arrayLengthNode;
3171
3172         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3173 #ifdef FEATURE_READYTORUN_COMPILER
3174         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3175         {
3176             // Array length is 1st argument for readytorun helper
3177             arrayLengthNode = args->Current();
3178         }
3179         else
3180 #endif
3181         {
3182             // Array length is 2nd argument for regular helper
3183             arrayLengthNode = args->Rest()->Current();
3184         }
3185
3186         //
3187         // Make sure that the number of elements look valid.
3188         //
3189         if (arrayLengthNode->gtOper != GT_CNS_INT)
3190         {
3191             return nullptr;
3192         }
3193
3194         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3195
3196         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3197         {
3198             return nullptr;
3199         }
3200     }
3201
3202     CORINFO_CLASS_HANDLE elemClsHnd;
3203     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3204
3205     //
3206     // Note that genTypeSize will return zero for non primitive types, which is exactly
3207     // what we want (size will then be 0, and we will catch this in the conditional below).
3208     // Note that we don't expect this to fail for valid binaries, so we assert in the
3209     // non-verification case (the verification case should not assert but rather correctly
3210     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3211     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3212     // why.
3213     //
3214
3215     S_UINT32 elemSize(genTypeSize(elementType));
3216     S_UINT32 size = elemSize * S_UINT32(numElements);
3217
3218     if (size.IsOverflow())
3219     {
3220         return nullptr;
3221     }
3222
3223     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3224     {
3225         assert(verNeedsVerification());
3226         return nullptr;
3227     }
3228
3229     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3230     if (!initData)
3231     {
3232         return nullptr;
3233     }
3234
3235     //
3236     // At this point we are ready to commit to implementing the InitializeArray
3237     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3238     // return the struct assignment node.
3239     //
3240
3241     impPopStack();
3242     impPopStack();
3243
3244     const unsigned blkSize = size.Value();
3245     GenTreePtr     dst;
3246
3247     if (isMDArray)
3248     {
3249         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3250
3251         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3252     }
3253     else
3254     {
3255         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3256     }
3257     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3258     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3259     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3260
3261     return gtNewBlkOpNode(blk,     // dst
3262                           src,     // src
3263                           blkSize, // size
3264                           false,   // volatil
3265                           true);   // copyBlock
3266 }
3267
3268 /*****************************************************************************/
3269 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3270 // Returns NULL if an intrinsic cannot be used
3271
3272 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3273                                   CORINFO_CLASS_HANDLE  clsHnd,
3274                                   CORINFO_METHOD_HANDLE method,
3275                                   CORINFO_SIG_INFO*     sig,
3276                                   int                   memberRef,
3277                                   bool                  readonlyCall,
3278                                   bool                  tailCall,
3279                                   CorInfoIntrinsics*    pIntrinsicID)
3280 {
3281     bool mustExpand = false;
3282 #if COR_JIT_EE_VERSION > 460
3283     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3284 #else
3285     CorInfoIntrinsics intrinsicID                                      = info.compCompHnd->getIntrinsicID(method);
3286 #endif
3287     *pIntrinsicID = intrinsicID;
3288
3289 #ifndef _TARGET_ARM_
3290     genTreeOps interlockedOperator;
3291 #endif
3292
3293     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3294     {
3295         // must be done regardless of DbgCode and MinOpts
3296         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3297     }
3298 #ifdef _TARGET_64BIT_
3299     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3300     {
3301         // must be done regardless of DbgCode and MinOpts
3302         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3303     }
3304 #else
3305     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3306 #endif
3307
3308     GenTreePtr retNode = nullptr;
3309
3310     //
3311     // We disable the inlining of instrinsics for MinOpts.
3312     //
3313     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3314     {
3315         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3316         return retNode;
3317     }
3318
3319     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3320     // seem to work properly for Infinity values, we don't do
3321     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3322
3323     var_types callType = JITtype2varType(sig->retType);
3324
3325     /* First do the intrinsics which are always smaller than a call */
3326
3327     switch (intrinsicID)
3328     {
3329         GenTreePtr op1, op2;
3330
3331         case CORINFO_INTRINSIC_Sin:
3332         case CORINFO_INTRINSIC_Sqrt:
3333         case CORINFO_INTRINSIC_Abs:
3334         case CORINFO_INTRINSIC_Cos:
3335         case CORINFO_INTRINSIC_Round:
3336         case CORINFO_INTRINSIC_Cosh:
3337         case CORINFO_INTRINSIC_Sinh:
3338         case CORINFO_INTRINSIC_Tan:
3339         case CORINFO_INTRINSIC_Tanh:
3340         case CORINFO_INTRINSIC_Asin:
3341         case CORINFO_INTRINSIC_Acos:
3342         case CORINFO_INTRINSIC_Atan:
3343         case CORINFO_INTRINSIC_Atan2:
3344         case CORINFO_INTRINSIC_Log10:
3345         case CORINFO_INTRINSIC_Pow:
3346         case CORINFO_INTRINSIC_Exp:
3347         case CORINFO_INTRINSIC_Ceiling:
3348         case CORINFO_INTRINSIC_Floor:
3349
3350             // These are math intrinsics
3351
3352             assert(callType != TYP_STRUCT);
3353
3354             op1 = nullptr;
3355
3356 #if defined(LEGACY_BACKEND)
3357             if (IsTargetIntrinsic(intrinsicID))
3358 #elif !defined(_TARGET_X86_)
3359             // Intrinsics that are not implemented directly by target instructions will
3360             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3361             // don't do this optimization, because
3362             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3363             //  b) It will be non-trivial task or too late to re-materialize a surviving
3364             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3365             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3366 #else
3367             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3368             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3369             // code generation for certain EH constructs.
3370             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3371 #endif
3372             {
3373                 switch (sig->numArgs)
3374                 {
3375                     case 1:
3376                         op1 = impPopStack().val;
3377
3378 #if FEATURE_X87_DOUBLES
3379
3380                         // X87 stack doesn't differentiate between float/double
3381                         // so it doesn't need a cast, but everybody else does
3382                         // Just double check it is at least a FP type
3383                         noway_assert(varTypeIsFloating(op1));
3384
3385 #else // FEATURE_X87_DOUBLES
3386
3387                         if (op1->TypeGet() != callType)
3388                         {
3389                             op1 = gtNewCastNode(callType, op1, callType);
3390                         }
3391
3392 #endif // FEATURE_X87_DOUBLES
3393
3394                         op1 = new (this, GT_INTRINSIC)
3395                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3396                         break;
3397
3398                     case 2:
3399                         op2 = impPopStack().val;
3400                         op1 = impPopStack().val;
3401
3402 #if FEATURE_X87_DOUBLES
3403
3404                         // X87 stack doesn't differentiate between float/double
3405                         // so it doesn't need a cast, but everybody else does
3406                         // Just double check it is at least a FP type
3407                         noway_assert(varTypeIsFloating(op2));
3408                         noway_assert(varTypeIsFloating(op1));
3409
3410 #else // FEATURE_X87_DOUBLES
3411
3412                         if (op2->TypeGet() != callType)
3413                         {
3414                             op2 = gtNewCastNode(callType, op2, callType);
3415                         }
3416                         if (op1->TypeGet() != callType)
3417                         {
3418                             op1 = gtNewCastNode(callType, op1, callType);
3419                         }
3420
3421 #endif // FEATURE_X87_DOUBLES
3422
3423                         op1 = new (this, GT_INTRINSIC)
3424                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3425                         break;
3426
3427                     default:
3428                         NO_WAY("Unsupported number of args for Math Instrinsic");
3429                 }
3430
3431 #ifndef LEGACY_BACKEND
3432                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3433                 {
3434                     op1->gtFlags |= GTF_CALL;
3435                 }
3436 #endif
3437             }
3438
3439             retNode = op1;
3440             break;
3441
3442 #ifdef _TARGET_XARCH_
3443         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3444         case CORINFO_INTRINSIC_InterlockedAdd32:
3445             interlockedOperator = GT_LOCKADD;
3446             goto InterlockedBinOpCommon;
3447         case CORINFO_INTRINSIC_InterlockedXAdd32:
3448             interlockedOperator = GT_XADD;
3449             goto InterlockedBinOpCommon;
3450         case CORINFO_INTRINSIC_InterlockedXchg32:
3451             interlockedOperator = GT_XCHG;
3452             goto InterlockedBinOpCommon;
3453
3454 #ifdef _TARGET_AMD64_
3455         case CORINFO_INTRINSIC_InterlockedAdd64:
3456             interlockedOperator = GT_LOCKADD;
3457             goto InterlockedBinOpCommon;
3458         case CORINFO_INTRINSIC_InterlockedXAdd64:
3459             interlockedOperator = GT_XADD;
3460             goto InterlockedBinOpCommon;
3461         case CORINFO_INTRINSIC_InterlockedXchg64:
3462             interlockedOperator = GT_XCHG;
3463             goto InterlockedBinOpCommon;
3464 #endif // _TARGET_AMD64_
3465
3466         InterlockedBinOpCommon:
3467             assert(callType != TYP_STRUCT);
3468             assert(sig->numArgs == 2);
3469
3470             op2 = impPopStack().val;
3471             op1 = impPopStack().val;
3472
3473             // This creates:
3474             //   val
3475             // XAdd
3476             //   addr
3477             //     field (for example)
3478             //
3479             // In the case where the first argument is the address of a local, we might
3480             // want to make this *not* make the var address-taken -- but atomic instructions
3481             // on a local are probably pretty useless anyway, so we probably don't care.
3482
3483             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3484             op1->gtFlags |= GTF_GLOB_EFFECT;
3485             retNode = op1;
3486             break;
3487 #endif // _TARGET_XARCH_
3488
3489         case CORINFO_INTRINSIC_MemoryBarrier:
3490
3491             assert(sig->numArgs == 0);
3492
3493             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3494             op1->gtFlags |= GTF_GLOB_EFFECT;
3495             retNode = op1;
3496             break;
3497
3498 #ifdef _TARGET_XARCH_
3499         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3500         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3501 #ifdef _TARGET_AMD64_
3502         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3503 #endif
3504         {
3505             assert(callType != TYP_STRUCT);
3506             assert(sig->numArgs == 3);
3507             GenTreePtr op3;
3508
3509             op3 = impPopStack().val; // comparand
3510             op2 = impPopStack().val; // value
3511             op1 = impPopStack().val; // location
3512
3513             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3514
3515             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3516             retNode = node;
3517             break;
3518         }
3519 #endif
3520
3521         case CORINFO_INTRINSIC_StringLength:
3522             op1 = impPopStack().val;
3523             if (!opts.MinOpts() && !opts.compDbgCode)
3524             {
3525                 GenTreeArrLen* arrLen =
3526                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3527                 op1 = arrLen;
3528             }
3529             else
3530             {
3531                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3532                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3533                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3534                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3535             }
3536             retNode = op1;
3537             break;
3538
3539         case CORINFO_INTRINSIC_StringGetChar:
3540             op2 = impPopStack().val;
3541             op1 = impPopStack().val;
3542             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3543             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3544             retNode = op1;
3545             break;
3546
3547         case CORINFO_INTRINSIC_InitializeArray:
3548             retNode = impInitializeArrayIntrinsic(sig);
3549             break;
3550
3551         case CORINFO_INTRINSIC_Array_Address:
3552         case CORINFO_INTRINSIC_Array_Get:
3553         case CORINFO_INTRINSIC_Array_Set:
3554             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3555             break;
3556
3557         case CORINFO_INTRINSIC_GetTypeFromHandle:
3558             op1 = impStackTop(0).val;
3559             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3560                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3561             {
3562                 op1 = impPopStack().val;
3563                 // Change call to return RuntimeType directly.
3564                 op1->gtType = TYP_REF;
3565                 retNode     = op1;
3566             }
3567             // Call the regular function.
3568             break;
3569
3570         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3571             op1 = impStackTop(0).val;
3572             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3574             {
3575                 // Old tree
3576                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3577                 //
3578                 // New tree
3579                 // TreeToGetNativeTypeHandle
3580
3581                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3582                 // to that helper.
3583
3584                 op1 = impPopStack().val;
3585
3586                 // Get native TypeHandle argument to old helper
3587                 op1 = op1->gtCall.gtCallArgs;
3588                 assert(op1->OperIsList());
3589                 assert(op1->gtOp.gtOp2 == nullptr);
3590                 op1     = op1->gtOp.gtOp1;
3591                 retNode = op1;
3592             }
3593             // Call the regular function.
3594             break;
3595
3596 #ifndef LEGACY_BACKEND
3597         case CORINFO_INTRINSIC_Object_GetType:
3598
3599             op1 = impPopStack().val;
3600             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3601
3602             // Set the CALL flag to indicate that the operator is implemented by a call.
3603             // Set also the EXCEPTION flag because the native implementation of
3604             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3605             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3606             retNode = op1;
3607             break;
3608 #endif
3609         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3610         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3611         // substitution.  The parameter byref will be assigned into the newly allocated object.
3612         case CORINFO_INTRINSIC_ByReference_Ctor:
3613         {
3614             // Remove call to constructor and directly assign the byref passed
3615             // to the call to the first slot of the ByReference struct.
3616             op1                                    = impPopStack().val;
3617             GenTreePtr           thisptr           = newobjThis;
3618             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3619             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3620             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3621             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3622             assert(byReferenceStruct != nullptr);
3623             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3624             retNode = assign;
3625             break;
3626         }
3627         // Implement ptr value getter for ByReference struct.
3628         case CORINFO_INTRINSIC_ByReference_Value:
3629         {
3630             op1                         = impPopStack().val;
3631             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3633             retNode                     = field;
3634             break;
3635         }
3636         default:
3637             /* Unknown intrinsic */
3638             break;
3639     }
3640
3641     if (mustExpand)
3642     {
3643         if (retNode == nullptr)
3644         {
3645             NO_WAY("JIT must expand the intrinsic!");
3646         }
3647     }
3648
3649     return retNode;
3650 }
3651
3652 /*****************************************************************************/
3653
3654 GenTreePtr Compiler::impArrayAccessIntrinsic(
3655     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3656 {
3657     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3658        the following, as it generates fatter code.
3659     */
3660
3661     if (compCodeOpt() == SMALL_CODE)
3662     {
3663         return nullptr;
3664     }
3665
3666     /* These intrinsics generate fatter (but faster) code and are only
3667        done if we don't need SMALL_CODE */
3668
3669     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3670
3671     // The rank 1 case is special because it has to handle two array formats
3672     // we will simply not do that case
3673     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3674     {
3675         return nullptr;
3676     }
3677
3678     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3679     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3680
3681     // For the ref case, we will only be able to inline if the types match
3682     // (verifier checks for this, we don't care for the nonverified case and the
3683     // type is final (so we don't need to do the cast)
3684     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3685     {
3686         // Get the call site signature
3687         CORINFO_SIG_INFO LocalSig;
3688         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3689         assert(LocalSig.hasThis());
3690
3691         CORINFO_CLASS_HANDLE actualElemClsHnd;
3692
3693         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3694         {
3695             // Fetch the last argument, the one that indicates the type we are setting.
3696             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3697             for (unsigned r = 0; r < rank; r++)
3698             {
3699                 argType = info.compCompHnd->getArgNext(argType);
3700             }
3701
3702             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3703             actualElemClsHnd = argInfo.GetClassHandle();
3704         }
3705         else
3706         {
3707             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3708
3709             // Fetch the return type
3710             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3711             assert(retInfo.IsByRef());
3712             actualElemClsHnd = retInfo.GetClassHandle();
3713         }
3714
3715         // if it's not final, we can't do the optimization
3716         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3717         {
3718             return nullptr;
3719         }
3720     }
3721
3722     unsigned arrayElemSize;
3723     if (elemType == TYP_STRUCT)
3724     {
3725         assert(arrElemClsHnd);
3726
3727         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3728     }
3729     else
3730     {
3731         arrayElemSize = genTypeSize(elemType);
3732     }
3733
3734     if ((unsigned char)arrayElemSize != arrayElemSize)
3735     {
3736         // arrayElemSize would be truncated as an unsigned char.
3737         // This means the array element is too large. Don't do the optimization.
3738         return nullptr;
3739     }
3740
3741     GenTreePtr val = nullptr;
3742
3743     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3744     {
3745         // Assignment of a struct is more work, and there are more gets than sets.
3746         if (elemType == TYP_STRUCT)
3747         {
3748             return nullptr;
3749         }
3750
3751         val = impPopStack().val;
3752         assert(genActualType(elemType) == genActualType(val->gtType) ||
3753                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3754                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3755                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3756     }
3757
3758     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3759
3760     GenTreePtr inds[GT_ARR_MAX_RANK];
3761     for (unsigned k = rank; k > 0; k--)
3762     {
3763         inds[k - 1] = impPopStack().val;
3764     }
3765
3766     GenTreePtr arr = impPopStack().val;
3767     assert(arr->gtType == TYP_REF);
3768
3769     GenTreePtr arrElem =
3770         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3771                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3772
3773     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3774     {
3775         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3776     }
3777
3778     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3779     {
3780         assert(val != nullptr);
3781         return gtNewAssignNode(arrElem, val);
3782     }
3783     else
3784     {
3785         return arrElem;
3786     }
3787 }
3788
3789 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3790 {
3791     unsigned i;
3792
3793     // do some basic checks first
3794     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3795     {
3796         return FALSE;
3797     }
3798
3799     if (verCurrentState.esStackDepth > 0)
3800     {
3801         // merge stack types
3802         StackEntry* parentStack = block->bbStackOnEntry();
3803         StackEntry* childStack  = verCurrentState.esStack;
3804
3805         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3806         {
3807             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3808             {
3809                 return FALSE;
3810             }
3811         }
3812     }
3813
3814     // merge initialization status of this ptr
3815
3816     if (verTrackObjCtorInitState)
3817     {
3818         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3819         assert(verCurrentState.thisInitialized != TIS_Bottom);
3820
3821         // If the successor block's thisInit state is unknown, copy it from the current state.
3822         if (block->bbThisOnEntry() == TIS_Bottom)
3823         {
3824             *changed = true;
3825             verSetThisInit(block, verCurrentState.thisInitialized);
3826         }
3827         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3828         {
3829             if (block->bbThisOnEntry() != TIS_Top)
3830             {
3831                 *changed = true;
3832                 verSetThisInit(block, TIS_Top);
3833
3834                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3835                 {
3836                     // The block is bad. Control can flow through the block to any handler that catches the
3837                     // verification exception, but the importer ignores bad blocks and therefore won't model
3838                     // this flow in the normal way. To complete the merge into the bad block, the new state
3839                     // needs to be manually pushed to the handlers that may be reached after the verification
3840                     // exception occurs.
3841                     //
3842                     // Usually, the new state was already propagated to the relevant handlers while processing
3843                     // the predecessors of the bad block. The exception is when the bad block is at the start
3844                     // of a try region, meaning it is protected by additional handlers that do not protect its
3845                     // predecessors.
3846                     //
3847                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3848                     {
3849                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3850                         // recursive calls back into this code path (if successors of the current bad block are
3851                         // also bad blocks).
3852                         //
3853                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3854                         verCurrentState.thisInitialized = TIS_Top;
3855                         impVerifyEHBlock(block, true);
3856                         verCurrentState.thisInitialized = origTIS;
3857                     }
3858                 }
3859             }
3860         }
3861     }
3862     else
3863     {
3864         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3865     }
3866
3867     return TRUE;
3868 }
3869
3870 /*****************************************************************************
3871  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3872  *   already logged it (presumably in a more detailed fashion than done here)
3873  * 'bVerificationException' is true for a verification exception, false for a
3874  *   "call unauthorized by host" exception.
3875  */
3876
3877 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3878 {
3879     block->bbJumpKind = BBJ_THROW;
3880     block->bbFlags |= BBF_FAILED_VERIFICATION;
3881
3882     impCurStmtOffsSet(block->bbCodeOffs);
3883
3884 #ifdef DEBUG
3885     // we need this since BeginTreeList asserts otherwise
3886     impTreeList = impTreeLast = nullptr;
3887     block->bbFlags &= ~BBF_IMPORTED;
3888
3889     if (logMsg)
3890     {
3891         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3892                 block->bbCodeOffs, block->bbCodeOffsEnd));
3893         if (verbose)
3894         {
3895             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3896         }
3897     }
3898
3899     if (JitConfig.DebugBreakOnVerificationFailure())
3900     {
3901         DebugBreak();
3902     }
3903 #endif
3904
3905     impBeginTreeList();
3906
3907     // if the stack is non-empty evaluate all the side-effects
3908     if (verCurrentState.esStackDepth > 0)
3909     {
3910         impEvalSideEffects();
3911     }
3912     assert(verCurrentState.esStackDepth == 0);
3913
3914     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3915                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3916     // verCurrentState.esStackDepth = 0;
3917     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3918
3919     // The inliner is not able to handle methods that require throw block, so
3920     // make sure this methods never gets inlined.
3921     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3922 }
3923
3924 /*****************************************************************************
3925  *
3926  */
3927 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3928
3929 {
3930     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3931     // slightly different mechanism in which it calls the JIT to perform IL verification:
3932     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3933     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3934     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3935     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3936     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3937     // to fail upon runtime of the jitted method.
3938     //
3939     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3940     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3941     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3942     // we detect these two conditions, instead of generating a throw statement inside the offending
3943     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3944     // to return false and make RyuJIT behave the same way JIT64 does.
3945     //
3946     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3947     // RyuJIT for the time being until we completely replace JIT64.
3948     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3949
3950     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3951     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3952     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3953     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3954     // be turned off during importation).
3955     CLANG_FORMAT_COMMENT_ANCHOR;
3956
3957 #ifdef _TARGET_64BIT_
3958
3959 #ifdef DEBUG
3960     bool canSkipVerificationResult =
3961         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3962     assert(tiVerificationNeeded || canSkipVerificationResult);
3963 #endif // DEBUG
3964
3965     // Add the non verifiable flag to the compiler
3966     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3967     {
3968         tiIsVerifiableCode = FALSE;
3969     }
3970 #endif //_TARGET_64BIT_
3971     verResetCurrentState(block, &verCurrentState);
3972     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3973
3974 #ifdef DEBUG
3975     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3976 #endif                   // DEBUG
3977 }
3978
3979 /******************************************************************************/
3980 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3981 {
3982     assert(ciType < CORINFO_TYPE_COUNT);
3983
3984     typeInfo tiResult;
3985     switch (ciType)
3986     {
3987         case CORINFO_TYPE_STRING:
3988         case CORINFO_TYPE_CLASS:
3989             tiResult = verMakeTypeInfo(clsHnd);
3990             if (!tiResult.IsType(TI_REF))
3991             { // type must be consistent with element type
3992                 return typeInfo();
3993             }
3994             break;
3995
3996 #ifdef _TARGET_64BIT_
3997         case CORINFO_TYPE_NATIVEINT:
3998         case CORINFO_TYPE_NATIVEUINT:
3999             if (clsHnd)
4000             {
4001                 // If we have more precise information, use it
4002                 return verMakeTypeInfo(clsHnd);
4003             }
4004             else
4005             {
4006                 return typeInfo::nativeInt();
4007             }
4008             break;
4009 #endif // _TARGET_64BIT_
4010
4011         case CORINFO_TYPE_VALUECLASS:
4012         case CORINFO_TYPE_REFANY:
4013             tiResult = verMakeTypeInfo(clsHnd);
4014             // type must be constant with element type;
4015             if (!tiResult.IsValueClass())
4016             {
4017                 return typeInfo();
4018             }
4019             break;
4020         case CORINFO_TYPE_VAR:
4021             return verMakeTypeInfo(clsHnd);
4022
4023         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4024         case CORINFO_TYPE_VOID:
4025             return typeInfo();
4026             break;
4027
4028         case CORINFO_TYPE_BYREF:
4029         {
4030             CORINFO_CLASS_HANDLE childClassHandle;
4031             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4032             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4033         }
4034         break;
4035
4036         default:
4037             if (clsHnd)
4038             { // If we have more precise information, use it
4039                 return typeInfo(TI_STRUCT, clsHnd);
4040             }
4041             else
4042             {
4043                 return typeInfo(JITtype2tiType(ciType));
4044             }
4045     }
4046     return tiResult;
4047 }
4048
4049 /******************************************************************************/
4050
4051 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4052 {
4053     if (clsHnd == nullptr)
4054     {
4055         return typeInfo();
4056     }
4057
4058     // Byrefs should only occur in method and local signatures, which are accessed
4059     // using ICorClassInfo and ICorClassInfo.getChildType.
4060     // So findClass() and getClassAttribs() should not be called for byrefs
4061
4062     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4063     {
4064         assert(!"Did findClass() return a Byref?");
4065         return typeInfo();
4066     }
4067
4068     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4069
4070     if (attribs & CORINFO_FLG_VALUECLASS)
4071     {
4072         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4073
4074         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4075         // not occur here, so we may want to change this to an assert instead.
4076         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4077         {
4078             return typeInfo();
4079         }
4080
4081 #ifdef _TARGET_64BIT_
4082         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4083         {
4084             return typeInfo::nativeInt();
4085         }
4086 #endif // _TARGET_64BIT_
4087
4088         if (t != CORINFO_TYPE_UNDEF)
4089         {
4090             return (typeInfo(JITtype2tiType(t)));
4091         }
4092         else if (bashStructToRef)
4093         {
4094             return (typeInfo(TI_REF, clsHnd));
4095         }
4096         else
4097         {
4098             return (typeInfo(TI_STRUCT, clsHnd));
4099         }
4100     }
4101     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4102     {
4103         // See comment in _typeInfo.h for why we do it this way.
4104         return (typeInfo(TI_REF, clsHnd, true));
4105     }
4106     else
4107     {
4108         return (typeInfo(TI_REF, clsHnd));
4109     }
4110 }
4111
4112 /******************************************************************************/
4113 BOOL Compiler::verIsSDArray(typeInfo ti)
4114 {
4115     if (ti.IsNullObjRef())
4116     { // nulls are SD arrays
4117         return TRUE;
4118     }
4119
4120     if (!ti.IsType(TI_REF))
4121     {
4122         return FALSE;
4123     }
4124
4125     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4126     {
4127         return FALSE;
4128     }
4129     return TRUE;
4130 }
4131
4132 /******************************************************************************/
4133 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4134 /* Returns an error type if anything goes wrong */
4135
4136 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4137 {
4138     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4139
4140     if (!verIsSDArray(arrayObjectType))
4141     {
4142         return typeInfo();
4143     }
4144
4145     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4146     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4147
4148     return verMakeTypeInfo(ciType, childClassHandle);
4149 }
4150
4151 /*****************************************************************************
4152  */
4153 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4154 {
4155     CORINFO_CLASS_HANDLE classHandle;
4156     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4157
4158     var_types type = JITtype2varType(ciType);
4159     if (varTypeIsGC(type))
4160     {
4161         // For efficiency, getArgType only returns something in classHandle for
4162         // value types.  For other types that have addition type info, you
4163         // have to call back explicitly
4164         classHandle = info.compCompHnd->getArgClass(sig, args);
4165         if (!classHandle)
4166         {
4167             NO_WAY("Could not figure out Class specified in argument or local signature");
4168         }
4169     }
4170
4171     return verMakeTypeInfo(ciType, classHandle);
4172 }
4173
4174 /*****************************************************************************/
4175
4176 // This does the expensive check to figure out whether the method
4177 // needs to be verified. It is called only when we fail verification,
4178 // just before throwing the verification exception.
4179
4180 BOOL Compiler::verNeedsVerification()
4181 {
4182     // If we have previously determined that verification is NOT needed
4183     // (for example in Compiler::compCompile), that means verification is really not needed.
4184     // Return the same decision we made before.
4185     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4186
4187     if (!tiVerificationNeeded)
4188     {
4189         return tiVerificationNeeded;
4190     }
4191
4192     assert(tiVerificationNeeded);
4193
4194     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4195     // obtain the answer.
4196     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4197         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4198
4199     // canSkipVerification will return one of the following three values:
4200     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4201     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4202     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4203     //     but need to insert a callout to the VM to ask during runtime
4204     //     whether to skip verification or not.
4205
4206     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4207     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4208     {
4209         tiRuntimeCalloutNeeded = true;
4210     }
4211
4212     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4213     {
4214         // Dev10 706080 - Testers don't like the assert, so just silence it
4215         // by not using the macros that invoke debugAssert.
4216         badCode();
4217     }
4218
4219     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4220     // The following line means we will NOT do jit time verification if canSkipVerification
4221     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4222     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4223     return tiVerificationNeeded;
4224 }
4225
4226 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4227 {
4228     if (ti.IsByRef())
4229     {
4230         return TRUE;
4231     }
4232     if (!ti.IsType(TI_STRUCT))
4233     {
4234         return FALSE;
4235     }
4236     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4237 }
4238
4239 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4240 {
4241     if (ti.IsPermanentHomeByRef())
4242     {
4243         return TRUE;
4244     }
4245     else
4246     {
4247         return FALSE;
4248     }
4249 }
4250
4251 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4252 {
4253     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4254             || ti.IsUnboxedGenericTypeVar() ||
4255             (ti.IsType(TI_STRUCT) &&
4256              // exclude byreflike structs
4257              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4258 }
4259
4260 // Is it a boxed value type?
4261 bool Compiler::verIsBoxedValueType(typeInfo ti)
4262 {
4263     if (ti.GetType() == TI_REF)
4264     {
4265         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4266         return !!eeIsValueClass(clsHnd);
4267     }
4268     else
4269     {
4270         return false;
4271     }
4272 }
4273
4274 /*****************************************************************************
4275  *
4276  *  Check if a TailCall is legal.
4277  */
4278
4279 bool Compiler::verCheckTailCallConstraint(
4280     OPCODE                  opcode,
4281     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4282     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4283     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4284                                                        // return false to the caller.
4285                                                        // If false, it will throw.
4286     )
4287 {
4288     DWORD            mflags;
4289     CORINFO_SIG_INFO sig;
4290     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4291                                    // this counter is used to keep track of how many items have been
4292                                    // virtually popped
4293
4294     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4295     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4296     unsigned              methodClassFlgs = 0;
4297
4298     assert(impOpcodeIsCallOpcode(opcode));
4299
4300     if (compIsForInlining())
4301     {
4302         return false;
4303     }
4304
4305     // for calli, VerifyOrReturn that this is not a virtual method
4306     if (opcode == CEE_CALLI)
4307     {
4308         /* Get the call sig */
4309         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4310
4311         // We don't know the target method, so we have to infer the flags, or
4312         // assume the worst-case.
4313         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4314     }
4315     else
4316     {
4317         methodHnd = pResolvedToken->hMethod;
4318
4319         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4320
4321         // When verifying generic code we pair the method handle with its
4322         // owning class to get the exact method signature.
4323         methodClassHnd = pResolvedToken->hClass;
4324         assert(methodClassHnd);
4325
4326         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4327
4328         // opcode specific check
4329         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4330     }
4331
4332     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4333     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4334
4335     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4336     {
4337         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4338     }
4339
4340     // check compatibility of the arguments
4341     unsigned int argCount;
4342     argCount = sig.numArgs;
4343     CORINFO_ARG_LIST_HANDLE args;
4344     args = sig.args;
4345     while (argCount--)
4346     {
4347         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4348
4349         // check that the argument is not a byref for tailcalls
4350         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4351
4352         // For unsafe code, we might have parameters containing pointer to the stack location.
4353         // Disallow the tailcall for this kind.
4354         CORINFO_CLASS_HANDLE classHandle;
4355         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4356         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4357
4358         args = info.compCompHnd->getArgNext(args);
4359     }
4360
4361     // update popCount
4362     popCount += sig.numArgs;
4363
4364     // check for 'this' which is on non-static methods, not called via NEWOBJ
4365     if (!(mflags & CORINFO_FLG_STATIC))
4366     {
4367         // Always update the popCount.
4368         // This is crucial for the stack calculation to be correct.
4369         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4370         popCount++;
4371
4372         if (opcode == CEE_CALLI)
4373         {
4374             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4375             // on the stack.
4376             if (tiThis.IsValueClass())
4377             {
4378                 tiThis.MakeByRef();
4379             }
4380             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4381         }
4382         else
4383         {
4384             // Check type compatibility of the this argument
4385             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4386             if (tiDeclaredThis.IsValueClass())
4387             {
4388                 tiDeclaredThis.MakeByRef();
4389             }
4390
4391             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4392         }
4393     }
4394
4395     // Tail calls on constrained calls should be illegal too:
4396     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4397     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4398
4399     // Get the exact view of the signature for an array method
4400     if (sig.retType != CORINFO_TYPE_VOID)
4401     {
4402         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4403         {
4404             assert(opcode != CEE_CALLI);
4405             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4406         }
4407     }
4408
4409     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4410     typeInfo tiCallerRetType =
4411         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4412
4413     // void return type gets morphed into the error type, so we have to treat them specially here
4414     if (sig.retType == CORINFO_TYPE_VOID)
4415     {
4416         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4417                                   speculative);
4418     }
4419     else
4420     {
4421         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4422                                                    NormaliseForStack(tiCallerRetType), true),
4423                                   "tailcall return mismatch", speculative);
4424     }
4425
4426     // for tailcall, stack must be empty
4427     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4428
4429     return true; // Yes, tailcall is legal
4430 }
4431
4432 /*****************************************************************************
4433  *
4434  *  Checks the IL verification rules for the call
4435  */
4436
4437 void Compiler::verVerifyCall(OPCODE                  opcode,
4438                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4439                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4440                              bool                    tailCall,
4441                              bool                    readonlyCall,
4442                              const BYTE*             delegateCreateStart,
4443                              const BYTE*             codeAddr,
4444                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4445 {
4446     DWORD             mflags;
4447     CORINFO_SIG_INFO* sig      = nullptr;
4448     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4449                                     // this counter is used to keep track of how many items have been
4450                                     // virtually popped
4451
4452     // for calli, VerifyOrReturn that this is not a virtual method
4453     if (opcode == CEE_CALLI)
4454     {
4455         Verify(false, "Calli not verifiable");
4456         return;
4457     }
4458
4459     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4460     mflags = callInfo->verMethodFlags;
4461
4462     sig = &callInfo->verSig;
4463
4464     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4465     {
4466         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4467     }
4468
4469     // opcode specific check
4470     unsigned methodClassFlgs = callInfo->classFlags;
4471     switch (opcode)
4472     {
4473         case CEE_CALLVIRT:
4474             // cannot do callvirt on valuetypes
4475             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4476             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4477             break;
4478
4479         case CEE_NEWOBJ:
4480         {
4481             assert(!tailCall); // Importer should not allow this
4482             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4483                            "newobj must be on instance");
4484
4485             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4486             {
4487                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4488                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4489                 typeInfo tiDeclaredFtn =
4490                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4491                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4492
4493                 assert(popCount == 0);
4494                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4495                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4496
4497                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4498                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4499                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4500                                "delegate object type mismatch");
4501
4502                 CORINFO_CLASS_HANDLE objTypeHandle =
4503                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4504
4505                 // the method signature must be compatible with the delegate's invoke method
4506
4507                 // check that for virtual functions, the type of the object used to get the
4508                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4509                 // since this is a bit of work to determine in general, we pattern match stylized
4510                 // code sequences
4511
4512                 // the delegate creation code check, which used to be done later, is now done here
4513                 // so we can read delegateMethodRef directly from
4514                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4515                 // we then use it in our call to isCompatibleDelegate().
4516
4517                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4518                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4519                                "must create delegates with certain IL");
4520
4521                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4522                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4523                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4524                 delegateResolvedToken.token        = delegateMethodRef;
4525                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4526                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4527
4528                 CORINFO_CALL_INFO delegateCallInfo;
4529                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4530                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4531
4532                 BOOL isOpenDelegate = FALSE;
4533                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4534                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4535                                                                       &isOpenDelegate),
4536                                "function incompatible with delegate");
4537
4538                 // check the constraints on the target method
4539                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4540                                "delegate target has unsatisfied class constraints");
4541                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4542                                                                             tiActualFtn.GetMethod()),
4543                                "delegate target has unsatisfied method constraints");
4544
4545                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4546                 // for additional verification rules for delegates
4547                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4548                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4549                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4550                 {
4551
4552                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4553 #ifdef DEBUG
4554                         && StrictCheckForNonVirtualCallToVirtualMethod()
4555 #endif
4556                             )
4557                     {
4558                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4559                         {
4560                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4561                                                verIsBoxedValueType(tiActualObj),
4562                                            "The 'this' parameter to the call must be either the calling method's "
4563                                            "'this' parameter or "
4564                                            "a boxed value type.");
4565                         }
4566                     }
4567                 }
4568
4569                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4570                 {
4571                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4572
4573                     Verify(targetIsStatic || !isOpenDelegate,
4574                            "Unverifiable creation of an open instance delegate for a protected member.");
4575
4576                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4577                                                                 ? info.compClassHnd
4578                                                                 : tiActualObj.GetClassHandleForObjRef();
4579
4580                     // In the case of protected methods, it is a requirement that the 'this'
4581                     // pointer be a subclass of the current context.  Perform this check.
4582                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4583                            "Accessing protected method through wrong type.");
4584                 }
4585                 goto DONE_ARGS;
4586             }
4587         }
4588         // fall thru to default checks
4589         default:
4590             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4591     }
4592     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4593                    "can only newobj a delegate constructor");
4594
4595     // check compatibility of the arguments
4596     unsigned int argCount;
4597     argCount = sig->numArgs;
4598     CORINFO_ARG_LIST_HANDLE args;
4599     args = sig->args;
4600     while (argCount--)
4601     {
4602         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4603
4604         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4605         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4606
4607         args = info.compCompHnd->getArgNext(args);
4608     }
4609
4610 DONE_ARGS:
4611
4612     // update popCount
4613     popCount += sig->numArgs;
4614
4615     // check for 'this' which are is non-static methods, not called via NEWOBJ
4616     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4617     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4618     {
4619         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4620         popCount++;
4621
4622         // If it is null, we assume we can access it (since it will AV shortly)
4623         // If it is anything but a reference class, there is no hierarchy, so
4624         // again, we don't need the precise instance class to compute 'protected' access
4625         if (tiThis.IsType(TI_REF))
4626         {
4627             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4628         }
4629
4630         // Check type compatibility of the this argument
4631         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4632         if (tiDeclaredThis.IsValueClass())
4633         {
4634             tiDeclaredThis.MakeByRef();
4635         }
4636
4637         // If this is a call to the base class .ctor, set thisPtr Init for
4638         // this block.
4639         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4640         {
4641             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4642                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4643             {
4644                 assert(verCurrentState.thisInitialized !=
4645                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4646                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4647                                "Call to base class constructor when 'this' is possibly initialized");
4648                 // Otherwise, 'this' is now initialized.
4649                 verCurrentState.thisInitialized = TIS_Init;
4650                 tiThis.SetInitialisedObjRef();
4651             }
4652             else
4653             {
4654                 // We allow direct calls to value type constructors
4655                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4656                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4657                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4658                                "Bad call to a constructor");
4659             }
4660         }
4661
4662         if (pConstrainedResolvedToken != nullptr)
4663         {
4664             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4665
4666             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4667
4668             // We just dereference this and test for equality
4669             tiThis.DereferenceByRef();
4670             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4671                            "this type mismatch with constrained type operand");
4672
4673             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4674             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4675         }
4676
4677         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4678         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4679         {
4680             tiDeclaredThis.SetIsReadonlyByRef();
4681         }
4682
4683         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4684
4685         if (tiThis.IsByRef())
4686         {
4687             // Find the actual type where the method exists (as opposed to what is declared
4688             // in the metadata). This is to prevent passing a byref as the "this" argument
4689             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4690
4691             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4692             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4693                            "Call to base type of valuetype (which is never a valuetype)");
4694         }
4695
4696         // Rules for non-virtual call to a non-final virtual method:
4697
4698         // Define:
4699         // The "this" pointer is considered to be "possibly written" if
4700         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4701         //   (or)
4702         //   2. It has been stored to (STARG.0) anywhere in the method.
4703
4704         // A non-virtual call to a non-final virtual method is only allowed if
4705         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4706         //   (or)
4707         //   2. The this pointer passed to the callee is the current method's this pointer.
4708         //      (and) The current method's this pointer is not "possibly written".
4709
4710         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4711         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4712         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4713         // hard and more error prone.
4714
4715         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4716 #ifdef DEBUG
4717             && StrictCheckForNonVirtualCallToVirtualMethod()
4718 #endif
4719                 )
4720         {
4721             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4722             {
4723                 VerifyOrReturn(
4724                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4725                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4726                     "a boxed value type.");
4727             }
4728         }
4729     }
4730
4731     // check any constraints on the callee's class and type parameters
4732     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4733                    "method has unsatisfied class constraints");
4734     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4735                    "method has unsatisfied method constraints");
4736
4737     if (mflags & CORINFO_FLG_PROTECTED)
4738     {
4739         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4740                        "Can't access protected method");
4741     }
4742
4743     // Get the exact view of the signature for an array method
4744     if (sig->retType != CORINFO_TYPE_VOID)
4745     {
4746         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4747     }
4748
4749     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4750     // The methods supported by array types are under the control of the EE
4751     // so we can trust that only the Address operation returns a byref.
4752     if (readonlyCall)
4753     {
4754         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4755         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4756                        "unexpected use of readonly prefix");
4757     }
4758
4759     // Verify the tailcall
4760     if (tailCall)
4761     {
4762         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4763     }
4764 }
4765
4766 /*****************************************************************************
4767  *  Checks that a delegate creation is done using the following pattern:
4768  *     dup
4769  *     ldvirtftn targetMemberRef
4770  *  OR
4771  *     ldftn targetMemberRef
4772  *
4773  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4774  *  not in this basic block)
4775  *
4776  *  targetMemberRef is read from the code sequence.
4777  *  targetMemberRef is validated iff verificationNeeded.
4778  */
4779
4780 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4781                                         const BYTE*  codeAddr,
4782                                         mdMemberRef& targetMemberRef)
4783 {
4784     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4785     {
4786         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4787         return TRUE;
4788     }
4789     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4790     {
4791         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4792         return TRUE;
4793     }
4794
4795     return FALSE;
4796 }
4797
4798 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4799 {
4800     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4801     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4802     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4803     if (!tiCompatibleWith(value, normPtrVal, true))
4804     {
4805         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4806         compUnsafeCastUsed = true;
4807     }
4808     return ptrVal;
4809 }
4810
4811 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4812 {
4813     assert(!instrType.IsStruct());
4814
4815     typeInfo ptrVal;
4816     if (ptr.IsByRef())
4817     {
4818         ptrVal = DereferenceByRef(ptr);
4819         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4820         {
4821             Verify(false, "bad pointer");
4822             compUnsafeCastUsed = true;
4823         }
4824         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4825         {
4826             Verify(false, "pointer not consistent with instr");
4827             compUnsafeCastUsed = true;
4828         }
4829     }
4830     else
4831     {
4832         Verify(false, "pointer not byref");
4833         compUnsafeCastUsed = true;
4834     }
4835
4836     return ptrVal;
4837 }
4838
4839 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4840 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4841 // ld*flda or a st*fld.
4842 // 'enclosingClass' is given if we are accessing a field in some specific type.
4843
4844 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4845                               const CORINFO_FIELD_INFO& fieldInfo,
4846                               const typeInfo*           tiThis,
4847                               BOOL                      mutator,
4848                               BOOL                      allowPlainStructAsThis)
4849 {
4850     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4851     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4852     CORINFO_CLASS_HANDLE instanceClass =
4853         info.compClassHnd; // for statics, we imagine the instance is the current class.
4854
4855     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4856     if (mutator)
4857     {
4858         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4859         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4860         {
4861             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4862                        info.compIsStatic == isStaticField,
4863                    "bad use of initonly field (set or address taken)");
4864         }
4865     }
4866
4867     if (tiThis == nullptr)
4868     {
4869         Verify(isStaticField, "used static opcode with non-static field");
4870     }
4871     else
4872     {
4873         typeInfo tThis = *tiThis;
4874
4875         if (allowPlainStructAsThis && tThis.IsValueClass())
4876         {
4877             tThis.MakeByRef();
4878         }
4879
4880         // If it is null, we assume we can access it (since it will AV shortly)
4881         // If it is anything but a refernce class, there is no hierarchy, so
4882         // again, we don't need the precise instance class to compute 'protected' access
4883         if (tiThis->IsType(TI_REF))
4884         {
4885             instanceClass = tiThis->GetClassHandleForObjRef();
4886         }
4887
4888         // Note that even if the field is static, we require that the this pointer
4889         // satisfy the same constraints as a non-static field  This happens to
4890         // be simpler and seems reasonable
4891         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4892         if (tiDeclaredThis.IsValueClass())
4893         {
4894             tiDeclaredThis.MakeByRef();
4895
4896             // we allow read-only tThis, on any field access (even stores!), because if the
4897             // class implementor wants to prohibit stores he should make the field private.
4898             // we do this by setting the read-only bit on the type we compare tThis to.
4899             tiDeclaredThis.SetIsReadonlyByRef();
4900         }
4901         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4902         {
4903             // Any field access is legal on "uninitialized" this pointers.
4904             // The easiest way to implement this is to simply set the
4905             // initialized bit for the duration of the type check on the
4906             // field access only.  It does not change the state of the "this"
4907             // for the function as a whole. Note that the "tThis" is a copy
4908             // of the original "this" type (*tiThis) passed in.
4909             tThis.SetInitialisedObjRef();
4910         }
4911
4912         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4913     }
4914
4915     // Presently the JIT does not check that we don't store or take the address of init-only fields
4916     // since we cannot guarantee their immutability and it is not a security issue.
4917
4918     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4919     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4920                    "field has unsatisfied class constraints");
4921     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4922     {
4923         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4924                "Accessing protected method through wrong type.");
4925     }
4926 }
4927
4928 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4929 {
4930     if (tiOp1.IsNumberType())
4931     {
4932 #ifdef _TARGET_64BIT_
4933         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4934 #else  // _TARGET_64BIT
4935         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4936         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4937         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4938         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4939 #endif // !_TARGET_64BIT_
4940     }
4941     else if (tiOp1.IsObjRef())
4942     {
4943         switch (opcode)
4944         {
4945             case CEE_BEQ_S:
4946             case CEE_BEQ:
4947             case CEE_BNE_UN_S:
4948             case CEE_BNE_UN:
4949             case CEE_CEQ:
4950             case CEE_CGT_UN:
4951                 break;
4952             default:
4953                 Verify(FALSE, "Cond not allowed on object types");
4954         }
4955         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4956     }
4957     else if (tiOp1.IsByRef())
4958     {
4959         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4960     }
4961     else
4962     {
4963         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4964     }
4965 }
4966
4967 void Compiler::verVerifyThisPtrInitialised()
4968 {
4969     if (verTrackObjCtorInitState)
4970     {
4971         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4972     }
4973 }
4974
4975 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4976 {
4977     // Either target == context, in this case calling an alternate .ctor
4978     // Or target is the immediate parent of context
4979
4980     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4981 }
4982
4983 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4984                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4985                                         CORINFO_CALL_INFO*      pCallInfo)
4986 {
4987     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4988     {
4989         NO_WAY("Virtual call to a function added via EnC is not supported");
4990     }
4991
4992 #ifdef FEATURE_READYTORUN_COMPILER
4993     if (opts.IsReadyToRun())
4994     {
4995         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4996         {
4997             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4998                                                     gtNewArgList(thisPtr));
4999
5000             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5001
5002             return call;
5003         }
5004
5005         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5006         if (IsTargetAbi(CORINFO_CORERT_ABI))
5007         {
5008             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5009
5010             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5011                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5012         }
5013     }
5014 #endif
5015
5016     // Get the exact descriptor for the static callsite
5017     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5018     if (exactTypeDesc == nullptr)
5019     { // compDonotInline()
5020         return nullptr;
5021     }
5022
5023     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5024     if (exactMethodDesc == nullptr)
5025     { // compDonotInline()
5026         return nullptr;
5027     }
5028
5029     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5030
5031     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5032
5033     helpArgs = gtNewListNode(thisPtr, helpArgs);
5034
5035     // Call helper function.  This gets the target address of the final destination callsite.
5036
5037     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5038 }
5039
5040 /*****************************************************************************
5041  *
5042  *  Build and import a box node
5043  */
5044
5045 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5046 {
5047     // Get the tree for the type handle for the boxed object.  In the case
5048     // of shared generic code or ngen'd code this might be an embedded
5049     // computation.
5050     // Note we can only box do it if the class construtor has been called
5051     // We can always do it on primitive types
5052
5053     GenTreePtr op1 = nullptr;
5054     GenTreePtr op2 = nullptr;
5055     var_types  lclTyp;
5056
5057     impSpillSpecialSideEff();
5058
5059     // Now get the expression to box from the stack.
5060     CORINFO_CLASS_HANDLE operCls;
5061     GenTreePtr           exprToBox = impPopStack(operCls).val;
5062
5063     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5064     if (boxHelper == CORINFO_HELP_BOX)
5065     {
5066         // we are doing 'normal' boxing.  This means that we can inline the box operation
5067         // Box(expr) gets morphed into
5068         // temp = new(clsHnd)
5069         // cpobj(temp+4, expr, clsHnd)
5070         // push temp
5071         // The code paths differ slightly below for structs and primitives because
5072         // "cpobj" differs in these cases.  In one case you get
5073         //    impAssignStructPtr(temp+4, expr, clsHnd)
5074         // and the other you get
5075         //    *(temp+4) = expr
5076
5077         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5078         {
5079             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5080         }
5081
5082         // needs to stay in use until this box expression is appended
5083         // some other node.  We approximate this by keeping it alive until
5084         // the opcode stack becomes empty
5085         impBoxTempInUse = true;
5086
5087 #ifdef FEATURE_READYTORUN_COMPILER
5088         bool usingReadyToRunHelper = false;
5089
5090         if (opts.IsReadyToRun())
5091         {
5092             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5093             usingReadyToRunHelper = (op1 != nullptr);
5094         }
5095
5096         if (!usingReadyToRunHelper)
5097 #endif
5098         {
5099             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5100             // and the newfast call with a single call to a dynamic R2R cell that will:
5101             //      1) Load the context
5102             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5103             //      3) Allocate and return the new object for boxing
5104             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5105
5106             // Ensure that the value class is restored
5107             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5108             if (op2 == nullptr)
5109             { // compDonotInline()
5110                 return;
5111             }
5112
5113             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5114                                       gtNewArgList(op2));
5115         }
5116
5117         /* Remember that this basic block contains 'new' of an array */
5118         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5119
5120         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5121
5122         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5123
5124         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5125         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5126         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5127
5128         if (varTypeIsStruct(exprToBox))
5129         {
5130             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5131             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5132         }
5133         else
5134         {
5135             lclTyp = exprToBox->TypeGet();
5136             if (lclTyp == TYP_BYREF)
5137             {
5138                 lclTyp = TYP_I_IMPL;
5139             }
5140             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5141             if (impIsPrimitive(jitType))
5142             {
5143                 lclTyp = JITtype2varType(jitType);
5144             }
5145             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5146                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5147             var_types srcTyp = exprToBox->TypeGet();
5148             var_types dstTyp = lclTyp;
5149
5150             if (srcTyp != dstTyp)
5151             {
5152                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5153                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5154                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5155             }
5156             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5157         }
5158
5159         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5160         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5161
5162         // Record that this is a "box" node.
5163         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5164
5165         // If it is a value class, mark the "box" node.  We can use this information
5166         // to optimise several cases:
5167         //    "box(x) == null" --> false
5168         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5169         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5170
5171         op1->gtFlags |= GTF_BOX_VALUE;
5172         assert(op1->IsBoxedValue());
5173         assert(asg->gtOper == GT_ASG);
5174     }
5175     else
5176     {
5177         // Don't optimize, just call the helper and be done with it
5178
5179         // Ensure that the value class is restored
5180         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5181         if (op2 == nullptr)
5182         { // compDonotInline()
5183             return;
5184         }
5185
5186         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5187         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5188     }
5189
5190     /* Push the result back on the stack, */
5191     /* even if clsHnd is a value class we want the TI_REF */
5192     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5193     impPushOnStack(op1, tiRetVal);
5194 }
5195
5196 //------------------------------------------------------------------------
5197 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5198 //
5199 // Arguments:
5200 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5201 //                     by a call to CEEInfo::resolveToken().
5202 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5203 //                by a call to CEEInfo::getCallInfo().
5204 //
5205 // Assumptions:
5206 //    The multi-dimensional array constructor arguments (array dimensions) are
5207 //    pushed on the IL stack on entry to this method.
5208 //
5209 // Notes:
5210 //    Multi-dimensional array constructors are imported as calls to a JIT
5211 //    helper, not as regular calls.
5212
5213 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5214 {
5215     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5216     if (classHandle == nullptr)
5217     { // compDonotInline()
5218         return;
5219     }
5220
5221     assert(pCallInfo->sig.numArgs);
5222
5223     GenTreePtr      node;
5224     GenTreeArgList* args;
5225
5226     //
5227     // There are two different JIT helpers that can be used to allocate
5228     // multi-dimensional arrays:
5229     //
5230     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5231     //      This variant is deprecated. It should be eventually removed.
5232     //
5233     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5234     //      pointer to block of int32s. This variant is more portable.
5235     //
5236     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5237     // unconditionally would require ReadyToRun version bump.
5238     //
5239     CLANG_FORMAT_COMMENT_ANCHOR;
5240
5241 #if COR_JIT_EE_VERSION > 460
5242     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5243     {
5244         LclVarDsc* newObjArrayArgsVar;
5245
5246         // Reuse the temp used to pass the array dimensions to avoid bloating
5247         // the stack frame in case there are multiple calls to multi-dim array
5248         // constructors within a single method.
5249         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5250         {
5251             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5252             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5253             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5254         }
5255
5256         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5257         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5258         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5259             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5260
5261         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5262         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5263         // to one allocation at a time.
5264         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5265
5266         //
5267         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5268         //  - Array class handle
5269         //  - Number of dimension arguments
5270         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5271         //
5272
5273         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5274         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5275
5276         // Pop dimension arguments from the stack one at a time and store it
5277         // into lvaNewObjArrayArgs temp.
5278         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5279         {
5280             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5281
5282             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5283             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5284             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5285                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5286             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5287
5288             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5289         }
5290
5291         args = gtNewArgList(node);
5292
5293         // pass number of arguments to the helper
5294         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5295
5296         args = gtNewListNode(classHandle, args);
5297
5298         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5299     }
5300     else
5301 #endif
5302     {
5303         //
5304         // The varargs helper needs the type and method handles as last
5305         // and  last-1 param (this is a cdecl call, so args will be
5306         // pushed in reverse order on the CPU stack)
5307         //
5308
5309         args = gtNewArgList(classHandle);
5310
5311         // pass number of arguments to the helper
5312         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5313
5314         unsigned argFlags = 0;
5315         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5316
5317         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5318
5319         // varargs, so we pop the arguments
5320         node->gtFlags |= GTF_CALL_POP_ARGS;
5321
5322 #ifdef DEBUG
5323         // At the present time we don't track Caller pop arguments
5324         // that have GC references in them
5325         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5326         {
5327             assert(temp->Current()->gtType != TYP_REF);
5328         }
5329 #endif
5330     }
5331
5332     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5333     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5334
5335     // Remember that this basic block contains 'new' of a md array
5336     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5337
5338     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5339 }
5340
5341 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5342                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5343                                       CORINFO_THIS_TRANSFORM  transform)
5344 {
5345     switch (transform)
5346     {
5347         case CORINFO_DEREF_THIS:
5348         {
5349             GenTreePtr obj = thisPtr;
5350
5351             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5352             impBashVarAddrsToI(obj);
5353             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5354             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5355
5356             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5357             // ldind could point anywhere, example a boxed class static int
5358             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5359
5360             return obj;
5361         }
5362
5363         case CORINFO_BOX_THIS:
5364         {
5365             // Constraint calls where there might be no
5366             // unboxed entry point require us to implement the call via helper.
5367             // These only occur when a possible target of the call
5368             // may have inherited an implementation of an interface
5369             // method from System.Object or System.ValueType.  The EE does not provide us with
5370             // "unboxed" versions of these methods.
5371
5372             GenTreePtr obj = thisPtr;
5373
5374             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5375             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5376             obj->gtFlags |= GTF_EXCEPT;
5377
5378             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5379             var_types   objType = JITtype2varType(jitTyp);
5380             if (impIsPrimitive(jitTyp))
5381             {
5382                 if (obj->OperIsBlk())
5383                 {
5384                     obj->ChangeOperUnchecked(GT_IND);
5385
5386                     // Obj could point anywhere, example a boxed class static int
5387                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5388                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5389                 }
5390
5391                 obj->gtType = JITtype2varType(jitTyp);
5392                 assert(varTypeIsArithmetic(obj->gtType));
5393             }
5394
5395             // This pushes on the dereferenced byref
5396             // This is then used immediately to box.
5397             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5398
5399             // This pops off the byref-to-a-value-type remaining on the stack and
5400             // replaces it with a boxed object.
5401             // This is then used as the object to the virtual call immediately below.
5402             impImportAndPushBox(pConstrainedResolvedToken);
5403             if (compDonotInline())
5404             {
5405                 return nullptr;
5406             }
5407
5408             obj = impPopStack().val;
5409             return obj;
5410         }
5411         case CORINFO_NO_THIS_TRANSFORM:
5412         default:
5413             return thisPtr;
5414     }
5415 }
5416
5417 //------------------------------------------------------------------------
5418 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5419 //
5420 // Return Value:
5421 //    true if PInvoke inlining should be enabled in current method, false otherwise
5422 //
5423 // Notes:
5424 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5425
5426 bool Compiler::impCanPInvokeInline()
5427 {
5428     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5429            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5430         ;
5431 }
5432
5433 //------------------------------------------------------------------------
5434 // impCanPInvokeInlineCallSite: basic legality checks using information
5435 // from a call to see if the call qualifies as an inline pinvoke.
5436 //
5437 // Arguments:
5438 //    block      - block contaning the call, or for inlinees, block
5439 //                 containing the call being inlined
5440 //
5441 // Return Value:
5442 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5443 //
5444 // Notes:
5445 //    For runtimes that support exception handling interop there are
5446 //    restrictions on using inline pinvoke in handler regions.
5447 //
5448 //    * We have to disable pinvoke inlining inside of filters because
5449 //    in case the main execution (i.e. in the try block) is inside
5450 //    unmanaged code, we cannot reuse the inlined stub (we still need
5451 //    the original state until we are in the catch handler)
5452 //
5453 //    * We disable pinvoke inlining inside handlers since the GSCookie
5454 //    is in the inlined Frame (see
5455 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5456 //    this would not protect framelets/return-address of handlers.
5457 //
5458 //    These restrictions are currently also in place for CoreCLR but
5459 //    can be relaxed when coreclr/#8459 is addressed.
5460
5461 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5462 {
5463     if (block->hasHndIndex())
5464     {
5465         return false;
5466     }
5467
5468     // The remaining limitations do not apply to CoreRT
5469     if (IsTargetAbi(CORINFO_CORERT_ABI))
5470     {
5471         return true;
5472     }
5473
5474 #ifdef _TARGET_AMD64_
5475     // On x64, we disable pinvoke inlining inside of try regions.
5476     // Here is the comment from JIT64 explaining why:
5477     //
5478     //   [VSWhidbey: 611015] - because the jitted code links in the
5479     //   Frame (instead of the stub) we rely on the Frame not being
5480     //   'active' until inside the stub.  This normally happens by the
5481     //   stub setting the return address pointer in the Frame object
5482     //   inside the stub.  On a normal return, the return address
5483     //   pointer is zeroed out so the Frame can be safely re-used, but
5484     //   if an exception occurs, nobody zeros out the return address
5485     //   pointer.  Thus if we re-used the Frame object, it would go
5486     //   'active' as soon as we link it into the Frame chain.
5487     //
5488     //   Technically we only need to disable PInvoke inlining if we're
5489     //   in a handler or if we're in a try body with a catch or
5490     //   filter/except where other non-handler code in this method
5491     //   might run and try to re-use the dirty Frame object.
5492     //
5493     //   A desktop test case where this seems to matter is
5494     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5495     if (block->hasTryIndex())
5496     {
5497         return false;
5498     }
5499 #endif // _TARGET_AMD64_
5500
5501     return true;
5502 }
5503
5504 //------------------------------------------------------------------------
5505 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5506 // if it can be expressed as an inline pinvoke.
5507 //
5508 // Arguments:
5509 //    call       - tree for the call
5510 //    methHnd    - handle for the method being called (may be null)
5511 //    sig        - signature of the method being called
5512 //    mflags     - method flags for the method being called
5513 //    block      - block contaning the call, or for inlinees, block
5514 //                 containing the call being inlined
5515 //
5516 // Notes:
5517 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5518 //
5519 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5520 //   call passes a combination of legality and profitabilty checks.
5521 //
5522 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5523
5524 void Compiler::impCheckForPInvokeCall(
5525     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5526 {
5527     CorInfoUnmanagedCallConv unmanagedCallConv;
5528
5529     // If VM flagged it as Pinvoke, flag the call node accordingly
5530     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5531     {
5532         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5533     }
5534
5535     if (methHnd)
5536     {
5537         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5538         {
5539             return;
5540         }
5541
5542         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5543     }
5544     else
5545     {
5546         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5547         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5548         {
5549             // Used by the IL Stubs.
5550             callConv = CORINFO_CALLCONV_C;
5551         }
5552         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5553         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5554         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5555         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5556
5557         assert(!call->gtCall.gtCallCookie);
5558     }
5559
5560     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5561         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5562     {
5563         return;
5564     }
5565     optNativeCallCount++;
5566
5567     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5568     {
5569         // PInvoke CALLI in IL stubs must be inlined
5570     }
5571     else
5572     {
5573         // Check legality
5574         if (!impCanPInvokeInlineCallSite(block))
5575         {
5576             return;
5577         }
5578
5579         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5580         // profitability checks
5581         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5582         {
5583             if (!impCanPInvokeInline())
5584             {
5585                 return;
5586             }
5587
5588             // Size-speed tradeoff: don't use inline pinvoke at rarely
5589             // executed call sites.  The non-inline version is more
5590             // compact.
5591             if (block->isRunRarely())
5592             {
5593                 return;
5594             }
5595         }
5596
5597         // The expensive check should be last
5598         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5599         {
5600             return;
5601         }
5602     }
5603
5604     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5605
5606     call->gtFlags |= GTF_CALL_UNMANAGED;
5607     info.compCallUnmanaged++;
5608
5609     // AMD64 convention is same for native and managed
5610     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5611     {
5612         call->gtFlags |= GTF_CALL_POP_ARGS;
5613     }
5614
5615     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5616     {
5617         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5618     }
5619 }
5620
5621 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5622 {
5623     var_types callRetTyp = JITtype2varType(sig->retType);
5624
5625     /* The function pointer is on top of the stack - It may be a
5626      * complex expression. As it is evaluated after the args,
5627      * it may cause registered args to be spilled. Simply spill it.
5628      */
5629
5630     // Ignore this trivial case.
5631     if (impStackTop().val->gtOper != GT_LCL_VAR)
5632     {
5633         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5634                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5635     }
5636
5637     /* Get the function pointer */
5638
5639     GenTreePtr fptr = impPopStack().val;
5640     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5641
5642 #ifdef DEBUG
5643     // This temporary must never be converted to a double in stress mode,
5644     // because that can introduce a call to the cast helper after the
5645     // arguments have already been evaluated.
5646
5647     if (fptr->OperGet() == GT_LCL_VAR)
5648     {
5649         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5650     }
5651 #endif
5652
5653     /* Create the call node */
5654
5655     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5656
5657     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5658
5659     return call;
5660 }
5661
5662 /*****************************************************************************/
5663
5664 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5665 {
5666     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5667
5668     /* Since we push the arguments in reverse order (i.e. right -> left)
5669      * spill any side effects from the stack
5670      *
5671      * OBS: If there is only one side effect we do not need to spill it
5672      *      thus we have to spill all side-effects except last one
5673      */
5674
5675     unsigned lastLevelWithSideEffects = UINT_MAX;
5676
5677     unsigned argsToReverse = sig->numArgs;
5678
5679     // For "thiscall", the first argument goes in a register. Since its
5680     // order does not need to be changed, we do not need to spill it
5681
5682     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5683     {
5684         assert(argsToReverse);
5685         argsToReverse--;
5686     }
5687
5688 #ifndef _TARGET_X86_
5689     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5690     argsToReverse = 0;
5691 #endif
5692
5693     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5694     {
5695         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5696         {
5697             assert(lastLevelWithSideEffects == UINT_MAX);
5698
5699             impSpillStackEntry(level,
5700                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5701         }
5702         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5703         {
5704             if (lastLevelWithSideEffects != UINT_MAX)
5705             {
5706                 /* We had a previous side effect - must spill it */
5707                 impSpillStackEntry(lastLevelWithSideEffects,
5708                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5709
5710                 /* Record the level for the current side effect in case we will spill it */
5711                 lastLevelWithSideEffects = level;
5712             }
5713             else
5714             {
5715                 /* This is the first side effect encountered - record its level */
5716
5717                 lastLevelWithSideEffects = level;
5718             }
5719         }
5720     }
5721
5722     /* The argument list is now "clean" - no out-of-order side effects
5723      * Pop the argument list in reverse order */
5724
5725     unsigned   argFlags = 0;
5726     GenTreePtr args     = call->gtCall.gtCallArgs =
5727         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5728
5729     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5730     {
5731         GenTreePtr thisPtr = args->Current();
5732         impBashVarAddrsToI(thisPtr);
5733         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5734     }
5735
5736     if (args)
5737     {
5738         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5739     }
5740 }
5741
5742 //------------------------------------------------------------------------
5743 // impInitClass: Build a node to initialize the class before accessing the
5744 //               field if necessary
5745 //
5746 // Arguments:
5747 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5748 //                     by a call to CEEInfo::resolveToken().
5749 //
5750 // Return Value: If needed, a pointer to the node that will perform the class
5751 //               initializtion.  Otherwise, nullptr.
5752 //
5753
5754 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5755 {
5756     CorInfoInitClassResult initClassResult =
5757         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5758
5759     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5760     {
5761         return nullptr;
5762     }
5763     BOOL runtimeLookup;
5764
5765     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5766
5767     if (node == nullptr)
5768     {
5769         assert(compDonotInline());
5770         return nullptr;
5771     }
5772
5773     if (runtimeLookup)
5774     {
5775         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5776     }
5777     else
5778     {
5779         // Call the shared non gc static helper, as its the fastest
5780         node = fgGetSharedCCtor(pResolvedToken->hClass);
5781     }
5782
5783     return node;
5784 }
5785
5786 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5787 {
5788     GenTreePtr op1 = nullptr;
5789
5790     switch (lclTyp)
5791     {
5792         int     ival;
5793         __int64 lval;
5794         double  dval;
5795
5796         case TYP_BOOL:
5797             ival = *((bool*)fldAddr);
5798             goto IVAL_COMMON;
5799
5800         case TYP_BYTE:
5801             ival = *((signed char*)fldAddr);
5802             goto IVAL_COMMON;
5803
5804         case TYP_UBYTE:
5805             ival = *((unsigned char*)fldAddr);
5806             goto IVAL_COMMON;
5807
5808         case TYP_SHORT:
5809             ival = *((short*)fldAddr);
5810             goto IVAL_COMMON;
5811
5812         case TYP_CHAR:
5813         case TYP_USHORT:
5814             ival = *((unsigned short*)fldAddr);
5815             goto IVAL_COMMON;
5816
5817         case TYP_UINT:
5818         case TYP_INT:
5819             ival = *((int*)fldAddr);
5820         IVAL_COMMON:
5821             op1 = gtNewIconNode(ival);
5822             break;
5823
5824         case TYP_LONG:
5825         case TYP_ULONG:
5826             lval = *((__int64*)fldAddr);
5827             op1  = gtNewLconNode(lval);
5828             break;
5829
5830         case TYP_FLOAT:
5831             dval = *((float*)fldAddr);
5832             op1  = gtNewDconNode(dval);
5833 #if !FEATURE_X87_DOUBLES
5834             // X87 stack doesn't differentiate between float/double
5835             // so R4 is treated as R8, but everybody else does
5836             op1->gtType = TYP_FLOAT;
5837 #endif // FEATURE_X87_DOUBLES
5838             break;
5839
5840         case TYP_DOUBLE:
5841             dval = *((double*)fldAddr);
5842             op1  = gtNewDconNode(dval);
5843             break;
5844
5845         default:
5846             assert(!"Unexpected lclTyp");
5847             break;
5848     }
5849
5850     return op1;
5851 }
5852
5853 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5854                                                 CORINFO_ACCESS_FLAGS    access,
5855                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5856                                                 var_types               lclTyp)
5857 {
5858     GenTreePtr op1;
5859
5860     switch (pFieldInfo->fieldAccessor)
5861     {
5862         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5863         {
5864             assert(!compIsForInlining());
5865
5866             // We first call a special helper to get the statics base pointer
5867             op1 = impParentClassTokenToHandle(pResolvedToken);
5868
5869             // compIsForInlining() is false so we should not neve get NULL here
5870             assert(op1 != nullptr);
5871
5872             var_types type = TYP_BYREF;
5873
5874             switch (pFieldInfo->helper)
5875             {
5876                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5877                     type = TYP_I_IMPL;
5878                     break;
5879                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5880                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5881                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5882                     break;
5883                 default:
5884                     assert(!"unknown generic statics helper");
5885                     break;
5886             }
5887
5888             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5889
5890             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5891             op1              = gtNewOperNode(GT_ADD, type, op1,
5892                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5893         }
5894         break;
5895
5896         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5897         {
5898 #ifdef FEATURE_READYTORUN_COMPILER
5899             if (opts.IsReadyToRun())
5900             {
5901                 unsigned callFlags = 0;
5902
5903                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5904                 {
5905                     callFlags |= GTF_CALL_HOISTABLE;
5906                 }
5907
5908                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5909
5910                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5911             }
5912             else
5913 #endif
5914             {
5915                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5916             }
5917
5918             {
5919                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5920                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5921                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5922             }
5923             break;
5924         }
5925 #if COR_JIT_EE_VERSION > 460
5926         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5927         {
5928 #ifdef FEATURE_READYTORUN_COMPILER
5929             noway_assert(opts.IsReadyToRun());
5930             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5931             assert(kind.needsRuntimeLookup);
5932
5933             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5934             GenTreeArgList* args    = gtNewArgList(ctxTree);
5935
5936             unsigned callFlags = 0;
5937
5938             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5939             {
5940                 callFlags |= GTF_CALL_HOISTABLE;
5941             }
5942             var_types type = TYP_BYREF;
5943             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5944
5945             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5946             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5947             op1              = gtNewOperNode(GT_ADD, type, op1,
5948                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5949 #else
5950             unreached();
5951 #endif // FEATURE_READYTORUN_COMPILER
5952         }
5953         break;
5954 #endif // COR_JIT_EE_VERSION > 460
5955         default:
5956         {
5957             if (!(access & CORINFO_ACCESS_ADDRESS))
5958             {
5959                 // In future, it may be better to just create the right tree here instead of folding it later.
5960                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5961
5962                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5963                 {
5964                     op1->gtType = TYP_REF; // points at boxed object
5965                     FieldSeqNode* firstElemFldSeq =
5966                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5967                     op1 =
5968                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5969                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5970
5971                     if (varTypeIsStruct(lclTyp))
5972                     {
5973                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5974                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5975                     }
5976                     else
5977                     {
5978                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5979                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5980                     }
5981                 }
5982
5983                 return op1;
5984             }
5985             else
5986             {
5987                 void** pFldAddr = nullptr;
5988                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5989
5990                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5991
5992                 /* Create the data member node */
5993                 if (pFldAddr == nullptr)
5994                 {
5995                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5996                 }
5997                 else
5998                 {
5999                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6000
6001                     // There are two cases here, either the static is RVA based,
6002                     // in which case the type of the FIELD node is not a GC type
6003                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6004                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6005                     // because handles to statics now go into the large object heap
6006
6007                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6008                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6009                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6010                 }
6011             }
6012             break;
6013         }
6014     }
6015
6016     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6017     {
6018         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6019
6020         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6021
6022         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6023                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6024     }
6025
6026     if (!(access & CORINFO_ACCESS_ADDRESS))
6027     {
6028         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6029         op1->gtFlags |= GTF_GLOB_REF;
6030     }
6031
6032     return op1;
6033 }
6034
6035 // In general try to call this before most of the verification work.  Most people expect the access
6036 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6037 // out if you can't access something we also think that you're unverifiable for other reasons.
6038 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6039 {
6040     if (result != CORINFO_ACCESS_ALLOWED)
6041     {
6042         impHandleAccessAllowedInternal(result, helperCall);
6043     }
6044 }
6045
6046 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6047 {
6048     switch (result)
6049     {
6050         case CORINFO_ACCESS_ALLOWED:
6051             break;
6052         case CORINFO_ACCESS_ILLEGAL:
6053             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6054             // method is verifiable.  Otherwise, delay the exception to runtime.
6055             if (compIsForImportOnly())
6056             {
6057                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6058             }
6059             else
6060             {
6061                 impInsertHelperCall(helperCall);
6062             }
6063             break;
6064         case CORINFO_ACCESS_RUNTIME_CHECK:
6065             impInsertHelperCall(helperCall);
6066             break;
6067     }
6068 }
6069
6070 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6071 {
6072     // Construct the argument list
6073     GenTreeArgList* args = nullptr;
6074     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6075     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6076     {
6077         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6078         GenTreePtr                currentArg = nullptr;
6079         switch (helperArg.argType)
6080         {
6081             case CORINFO_HELPER_ARG_TYPE_Field:
6082                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6083                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6084                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6085                 break;
6086             case CORINFO_HELPER_ARG_TYPE_Method:
6087                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6088                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6089                 break;
6090             case CORINFO_HELPER_ARG_TYPE_Class:
6091                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6092                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6093                 break;
6094             case CORINFO_HELPER_ARG_TYPE_Module:
6095                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6096                 break;
6097             case CORINFO_HELPER_ARG_TYPE_Const:
6098                 currentArg = gtNewIconNode(helperArg.constant);
6099                 break;
6100             default:
6101                 NO_WAY("Illegal helper arg type");
6102         }
6103         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6104     }
6105
6106     /* TODO-Review:
6107      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6108      * Also, consider sticking this in the first basic block.
6109      */
6110     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6111     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6112 }
6113
6114 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6115                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6116                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6117 {
6118 #ifdef FEATURE_CORECLR
6119     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6120     {
6121         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6122         // This helper throws an exception if the CLR host disallows the call.
6123
6124         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6125                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6126                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6127         // Append the callout statement
6128         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6129     }
6130 #endif // FEATURE_CORECLR
6131 }
6132
6133 // Checks whether the return types of caller and callee are compatible
6134 // so that callee can be tail called. Note that here we don't check
6135 // compatibility in IL Verifier sense, but on the lines of return type
6136 // sizes are equal and get returned in the same return register.
6137 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6138                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6139                                             var_types            calleeRetType,
6140                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6141 {
6142     // Note that we can not relax this condition with genActualType() as the
6143     // calling convention dictates that the caller of a function with a small
6144     // typed return value is responsible for normalizing the return val.
6145     if (callerRetType == calleeRetType)
6146     {
6147         return true;
6148     }
6149
6150 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6151     // Jit64 compat:
6152     if (callerRetType == TYP_VOID)
6153     {
6154         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6155         //     tail.call
6156         //     pop
6157         //     ret
6158         //
6159         // Note that the above IL pattern is not valid as per IL verification rules.
6160         // Therefore, only full trust code can take advantage of this pattern.
6161         return true;
6162     }
6163
6164     // These checks return true if the return value type sizes are the same and
6165     // get returned in the same return register i.e. caller doesn't need to normalize
6166     // return value. Some of the tail calls permitted by below checks would have
6167     // been rejected by IL Verifier before we reached here.  Therefore, only full
6168     // trust code can make those tail calls.
6169     unsigned callerRetTypeSize = 0;
6170     unsigned calleeRetTypeSize = 0;
6171     bool     isCallerRetTypMBEnreg =
6172         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6173     bool isCalleeRetTypMBEnreg =
6174         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6175
6176     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6177     {
6178         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6179     }
6180 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6181
6182     return false;
6183 }
6184
6185 // For prefixFlags
6186 enum
6187 {
6188     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6189     PREFIX_TAILCALL_IMPLICIT =
6190         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6191     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6192     PREFIX_VOLATILE    = 0x00000100,
6193     PREFIX_UNALIGNED   = 0x00001000,
6194     PREFIX_CONSTRAINED = 0x00010000,
6195     PREFIX_READONLY    = 0x00100000
6196 };
6197
6198 /********************************************************************************
6199  *
6200  * Returns true if the current opcode and and the opcodes following it correspond
6201  * to a supported tail call IL pattern.
6202  *
6203  */
6204 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6205                                       OPCODE      curOpcode,
6206                                       const BYTE* codeAddrOfNextOpcode,
6207                                       const BYTE* codeEnd,
6208                                       bool        isRecursive,
6209                                       bool*       isCallPopAndRet /* = nullptr */)
6210 {
6211     // Bail out if the current opcode is not a call.
6212     if (!impOpcodeIsCallOpcode(curOpcode))
6213     {
6214         return false;
6215     }
6216
6217 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6218     // If shared ret tail opt is not enabled, we will enable
6219     // it for recursive methods.
6220     if (isRecursive)
6221 #endif
6222     {
6223         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6224         // sequence. Make sure we don't go past the end of the IL however.
6225         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6226     }
6227
6228     // Bail out if there is no next opcode after call
6229     if (codeAddrOfNextOpcode >= codeEnd)
6230     {
6231         return false;
6232     }
6233
6234     // Scan the opcodes to look for the following IL patterns if either
6235     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6236     //  ii) if tail prefixed, IL verification is not needed for the method.
6237     //
6238     // Only in the above two cases we can allow the below tail call patterns
6239     // violating ECMA spec.
6240     //
6241     // Pattern1:
6242     //       call
6243     //       nop*
6244     //       ret
6245     //
6246     // Pattern2:
6247     //       call
6248     //       nop*
6249     //       pop
6250     //       nop*
6251     //       ret
6252     int    cntPop = 0;
6253     OPCODE nextOpcode;
6254
6255 #ifdef _TARGET_AMD64_
6256     do
6257     {
6258         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6259         codeAddrOfNextOpcode += sizeof(__int8);
6260     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6261              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6262              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6263                                                                                          // one pop seen so far.
6264 #else
6265     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6266 #endif
6267
6268     if (isCallPopAndRet)
6269     {
6270         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6271         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6272     }
6273
6274 #ifdef _TARGET_AMD64_
6275     // Jit64 Compat:
6276     // Tail call IL pattern could be either of the following
6277     // 1) call/callvirt/calli + ret
6278     // 2) call/callvirt/calli + pop + ret in a method returning void.
6279     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6280 #else //!_TARGET_AMD64_
6281     return (nextOpcode == CEE_RET) && (cntPop == 0);
6282 #endif
6283 }
6284
6285 /*****************************************************************************
6286  *
6287  * Determine whether the call could be converted to an implicit tail call
6288  *
6289  */
6290 bool Compiler::impIsImplicitTailCallCandidate(
6291     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6292 {
6293
6294 #if FEATURE_TAILCALL_OPT
6295     if (!opts.compTailCallOpt)
6296     {
6297         return false;
6298     }
6299
6300     if (opts.compDbgCode || opts.MinOpts())
6301     {
6302         return false;
6303     }
6304
6305     // must not be tail prefixed
6306     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6307     {
6308         return false;
6309     }
6310
6311 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6312     // the block containing call is marked as BBJ_RETURN
6313     // We allow shared ret tail call optimization on recursive calls even under
6314     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6315     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6316         return false;
6317 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6318
6319     // must be call+ret or call+pop+ret
6320     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6321     {
6322         return false;
6323     }
6324
6325     return true;
6326 #else
6327     return false;
6328 #endif // FEATURE_TAILCALL_OPT
6329 }
6330
6331 //------------------------------------------------------------------------
6332 // impImportCall: import a call-inspiring opcode
6333 //
6334 // Arguments:
6335 //    opcode                    - opcode that inspires the call
6336 //    pResolvedToken            - resolved token for the call target
6337 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6338 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6339 //    prefixFlags               - IL prefix flags for the call
6340 //    callInfo                  - EE supplied info for the call
6341 //    rawILOffset               - IL offset of the opcode
6342 //
6343 // Returns:
6344 //    Type of the call's return value.
6345 //
6346 // Notes:
6347 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6348 //
6349 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6350 //    uninitalized object.
6351
6352 #ifdef _PREFAST_
6353 #pragma warning(push)
6354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6355 #endif
6356
6357 var_types Compiler::impImportCall(OPCODE                  opcode,
6358                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6359                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6360                                   GenTreePtr              newobjThis,
6361                                   int                     prefixFlags,
6362                                   CORINFO_CALL_INFO*      callInfo,
6363                                   IL_OFFSET               rawILOffset)
6364 {
6365     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6366
6367     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6368     var_types              callRetTyp                     = TYP_COUNT;
6369     CORINFO_SIG_INFO*      sig                            = nullptr;
6370     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6371     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6372     unsigned               clsFlags                       = 0;
6373     unsigned               mflags                         = 0;
6374     unsigned               argFlags                       = 0;
6375     GenTreePtr             call                           = nullptr;
6376     GenTreeArgList*        args                           = nullptr;
6377     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6378     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6379     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6380     bool                   canTailCall                    = true;
6381     const char*            szCanTailCallFailReason        = nullptr;
6382     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6383     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6384
6385     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6386     // do that before tailcalls, but that is probably not the intended
6387     // semantic. So just disallow tailcalls from synchronized methods.
6388     // Also, popping arguments in a varargs function is more work and NYI
6389     // If we have a security object, we have to keep our frame around for callers
6390     // to see any imperative security.
6391     if (info.compFlags & CORINFO_FLG_SYNCH)
6392     {
6393         canTailCall             = false;
6394         szCanTailCallFailReason = "Caller is synchronized";
6395     }
6396 #if !FEATURE_FIXED_OUT_ARGS
6397     else if (info.compIsVarArgs)
6398     {
6399         canTailCall             = false;
6400         szCanTailCallFailReason = "Caller is varargs";
6401     }
6402 #endif // FEATURE_FIXED_OUT_ARGS
6403     else if (opts.compNeedSecurityCheck)
6404     {
6405         canTailCall             = false;
6406         szCanTailCallFailReason = "Caller requires a security check.";
6407     }
6408
6409     // We only need to cast the return value of pinvoke inlined calls that return small types
6410
6411     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6412     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6413     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6414     // the time being that the callee might be compiled by the other JIT and thus the return
6415     // value will need to be widened by us (or not widened at all...)
6416
6417     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6418
6419     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6420     bool bIntrinsicImported = false;
6421
6422     CORINFO_SIG_INFO calliSig;
6423     GenTreeArgList*  extraArg = nullptr;
6424
6425     /*-------------------------------------------------------------------------
6426      * First create the call node
6427      */
6428
6429     if (opcode == CEE_CALLI)
6430     {
6431         /* Get the call site sig */
6432         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6433
6434         callRetTyp = JITtype2varType(calliSig.retType);
6435
6436         call = impImportIndirectCall(&calliSig, ilOffset);
6437
6438         // We don't know the target method, so we have to infer the flags, or
6439         // assume the worst-case.
6440         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6441
6442 #ifdef DEBUG
6443         if (verbose)
6444         {
6445             unsigned structSize =
6446                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6447             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6448                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6449         }
6450 #endif
6451         // This should be checked in impImportBlockCode.
6452         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6453
6454         sig = &calliSig;
6455
6456 #ifdef DEBUG
6457         // We cannot lazily obtain the signature of a CALLI call because it has no method
6458         // handle that we can use, so we need to save its full call signature here.
6459         assert(call->gtCall.callSig == nullptr);
6460         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6461         *call->gtCall.callSig = calliSig;
6462 #endif // DEBUG
6463     }
6464     else // (opcode != CEE_CALLI)
6465     {
6466         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6467
6468         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6469         // supply the instantiation parameters necessary to make direct calls to underlying
6470         // shared generic code, rather than calling through instantiating stubs.  If the
6471         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6472         // must indeed pass an instantiation parameter.
6473
6474         methHnd = callInfo->hMethod;
6475
6476         sig        = &(callInfo->sig);
6477         callRetTyp = JITtype2varType(sig->retType);
6478
6479         mflags = callInfo->methodFlags;
6480
6481 #ifdef DEBUG
6482         if (verbose)
6483         {
6484             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6485             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6486                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6487         }
6488 #endif
6489         if (compIsForInlining())
6490         {
6491             /* Does this call site have security boundary restrictions? */
6492
6493             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6494             {
6495                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6496                 return callRetTyp;
6497             }
6498
6499             /* Does the inlinee need a security check token on the frame */
6500
6501             if (mflags & CORINFO_FLG_SECURITYCHECK)
6502             {
6503                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6504                 return callRetTyp;
6505             }
6506
6507             /* Does the inlinee use StackCrawlMark */
6508
6509             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6510             {
6511                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6512                 return callRetTyp;
6513             }
6514
6515             /* For now ignore delegate invoke */
6516
6517             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6518             {
6519                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6520                 return callRetTyp;
6521             }
6522
6523             /* For now ignore varargs */
6524             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6525             {
6526                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6527                 return callRetTyp;
6528             }
6529
6530             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6531             {
6532                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6533                 return callRetTyp;
6534             }
6535
6536             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6537             {
6538                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6539                 return callRetTyp;
6540             }
6541         }
6542
6543         clsHnd = pResolvedToken->hClass;
6544
6545         clsFlags = callInfo->classFlags;
6546
6547 #ifdef DEBUG
6548         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6549
6550         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6551         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6552         const char* modName;
6553         const char* className;
6554         const char* methodName;
6555         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6556             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6557             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6558         {
6559             return impImportJitTestLabelMark(sig->numArgs);
6560         }
6561 #endif // DEBUG
6562
6563         // <NICE> Factor this into getCallInfo </NICE>
6564         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6565         {
6566             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6567                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6568
6569             if (call != nullptr)
6570             {
6571                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6572                        (clsFlags & CORINFO_FLG_FINAL));
6573
6574 #ifdef FEATURE_READYTORUN_COMPILER
6575                 if (call->OperGet() == GT_INTRINSIC)
6576                 {
6577                     if (opts.IsReadyToRun())
6578                     {
6579                         noway_assert(callInfo->kind == CORINFO_CALL);
6580                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6581                     }
6582                     else
6583                     {
6584                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6585                     }
6586                 }
6587 #endif
6588
6589                 bIntrinsicImported = true;
6590                 goto DONE_CALL;
6591             }
6592         }
6593
6594 #ifdef FEATURE_SIMD
6595         if (featureSIMD)
6596         {
6597             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6598             if (call != nullptr)
6599             {
6600                 bIntrinsicImported = true;
6601                 goto DONE_CALL;
6602             }
6603         }
6604 #endif // FEATURE_SIMD
6605
6606         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6607         {
6608             NO_WAY("Virtual call to a function added via EnC is not supported");
6609             goto DONE_CALL;
6610         }
6611
6612         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6613             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6614             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6615         {
6616             BADCODE("Bad calling convention");
6617         }
6618
6619         //-------------------------------------------------------------------------
6620         //  Construct the call node
6621         //
6622         // Work out what sort of call we're making.
6623         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6624
6625         constraintCallThisTransform = callInfo->thisTransform;
6626
6627         exactContextHnd                = callInfo->contextHandle;
6628         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6629
6630         // Recursive call is treaded as a loop to the begining of the method.
6631         if (methHnd == info.compMethodHnd)
6632         {
6633 #ifdef DEBUG
6634             if (verbose)
6635             {
6636                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6637                         fgFirstBB->bbNum, compCurBB->bbNum);
6638             }
6639 #endif
6640             fgMarkBackwardJump(fgFirstBB, compCurBB);
6641         }
6642
6643         switch (callInfo->kind)
6644         {
6645
6646             case CORINFO_VIRTUALCALL_STUB:
6647             {
6648                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6649                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6650                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6651                 {
6652
6653                     if (compIsForInlining())
6654                     {
6655                         // Don't import runtime lookups when inlining
6656                         // Inlining has to be aborted in such a case
6657                         /* XXX Fri 3/20/2009
6658                          * By the way, this would never succeed.  If the handle lookup is into the generic
6659                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6660                          * inlined code will crash.
6661                          *
6662                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6663                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6664                          * failing here.
6665                          */
6666                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6667                         return callRetTyp;
6668                     }
6669
6670                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6671                     assert(!compDonotInline());
6672
6673                     // This is the rough code to set up an indirect stub call
6674                     assert(stubAddr != nullptr);
6675
6676                     // The stubAddr may be a
6677                     // complex expression. As it is evaluated after the args,
6678                     // it may cause registered args to be spilled. Simply spill it.
6679
6680                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6681                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6682                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6683
6684                     // Create the actual call node
6685
6686                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6687                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6688
6689                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6690
6691                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6692                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6693
6694 #ifdef _TARGET_X86_
6695                     // No tailcalls allowed for these yet...
6696                     canTailCall             = false;
6697                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6698 #endif
6699                 }
6700                 else
6701                 {
6702                     // ok, the stub is available at compile type.
6703
6704                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6705                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6706                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6707                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6708                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6709                     {
6710                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6711                     }
6712                 }
6713
6714 #ifdef FEATURE_READYTORUN_COMPILER
6715                 if (opts.IsReadyToRun())
6716                 {
6717                     // Null check is sometimes needed for ready to run to handle
6718                     // non-virtual <-> virtual changes between versions
6719                     if (callInfo->nullInstanceCheck)
6720                     {
6721                         call->gtFlags |= GTF_CALL_NULLCHECK;
6722                     }
6723                 }
6724 #endif
6725
6726                 break;
6727             }
6728
6729             case CORINFO_VIRTUALCALL_VTABLE:
6730             {
6731                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6732                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6733                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6734                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6735                 break;
6736             }
6737
6738             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6739             {
6740                 if (compIsForInlining())
6741                 {
6742                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6743                     return callRetTyp;
6744                 }
6745
6746                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6747                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6748                 // OK, We've been told to call via LDVIRTFTN, so just
6749                 // take the call now....
6750
6751                 args = impPopList(sig->numArgs, &argFlags, sig);
6752
6753                 GenTreePtr thisPtr = impPopStack().val;
6754                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6755                 if (compDonotInline())
6756                 {
6757                     return callRetTyp;
6758                 }
6759
6760                 // Clone the (possibly transformed) "this" pointer
6761                 GenTreePtr thisPtrCopy;
6762                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6763                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6764
6765                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6766                 if (compDonotInline())
6767                 {
6768                     return callRetTyp;
6769                 }
6770
6771                 thisPtr = nullptr; // can't reuse it
6772
6773                 // Now make an indirect call through the function pointer
6774
6775                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6776                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6777                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6778
6779                 // Create the actual call node
6780
6781                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6782                 call->gtCall.gtCallObjp = thisPtrCopy;
6783                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6784
6785 #ifdef FEATURE_READYTORUN_COMPILER
6786                 if (opts.IsReadyToRun())
6787                 {
6788                     // Null check is needed for ready to run to handle
6789                     // non-virtual <-> virtual changes between versions
6790                     call->gtFlags |= GTF_CALL_NULLCHECK;
6791                 }
6792 #endif
6793
6794                 // Sine we are jumping over some code, check that its OK to skip that code
6795                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6796                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6797                 goto DONE;
6798             }
6799
6800             case CORINFO_CALL:
6801             {
6802                 // This is for a non-virtual, non-interface etc. call
6803                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6804
6805                 // We remove the nullcheck for the GetType call instrinsic.
6806                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6807                 // and instrinsics.
6808                 if (callInfo->nullInstanceCheck &&
6809                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6810                 {
6811                     call->gtFlags |= GTF_CALL_NULLCHECK;
6812                 }
6813
6814 #ifdef FEATURE_READYTORUN_COMPILER
6815                 if (opts.IsReadyToRun())
6816                 {
6817                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6818                 }
6819 #endif
6820                 break;
6821             }
6822
6823             case CORINFO_CALL_CODE_POINTER:
6824             {
6825                 // The EE has asked us to call by computing a code pointer and then doing an
6826                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6827
6828                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6829                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6830
6831                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6832                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6833
6834                 GenTreePtr fptr =
6835                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6836
6837                 if (compDonotInline())
6838                 {
6839                     return callRetTyp;
6840                 }
6841
6842                 // Now make an indirect call through the function pointer
6843
6844                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6845                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6846                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6847
6848                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6849                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6850                 if (callInfo->nullInstanceCheck)
6851                 {
6852                     call->gtFlags |= GTF_CALL_NULLCHECK;
6853                 }
6854
6855                 break;
6856             }
6857
6858             default:
6859                 assert(!"unknown call kind");
6860                 break;
6861         }
6862
6863         //-------------------------------------------------------------------------
6864         // Set more flags
6865
6866         PREFIX_ASSUME(call != nullptr);
6867
6868         if (mflags & CORINFO_FLG_NOGCCHECK)
6869         {
6870             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6871         }
6872
6873         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6874         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6875             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6876             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6877         {
6878             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6879         }
6880     }
6881     assert(sig);
6882     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6883
6884     /* Some sanity checks */
6885
6886     // CALL_VIRT and NEWOBJ must have a THIS pointer
6887     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6888     // static bit and hasThis are negations of one another
6889     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6890     assert(call != nullptr);
6891
6892     /*-------------------------------------------------------------------------
6893      * Check special-cases etc
6894      */
6895
6896     /* Special case - Check if it is a call to Delegate.Invoke(). */
6897
6898     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6899     {
6900         assert(!compIsForInlining());
6901         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6902         assert(mflags & CORINFO_FLG_FINAL);
6903
6904         /* Set the delegate flag */
6905         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6906
6907         if (callInfo->secureDelegateInvoke)
6908         {
6909             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6910         }
6911
6912         if (opcode == CEE_CALLVIRT)
6913         {
6914             assert(mflags & CORINFO_FLG_FINAL);
6915
6916             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6917             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6918             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6919         }
6920     }
6921
6922     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6923     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6924     if (varTypeIsStruct(callRetTyp))
6925     {
6926         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6927         call->gtType = callRetTyp;
6928     }
6929
6930 #if !FEATURE_VARARG
6931     /* Check for varargs */
6932     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6933         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6934     {
6935         BADCODE("Varargs not supported.");
6936     }
6937 #endif // !FEATURE_VARARG
6938
6939     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6940         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6941     {
6942         assert(!compIsForInlining());
6943
6944         /* Set the right flags */
6945
6946         call->gtFlags |= GTF_CALL_POP_ARGS;
6947         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6948
6949         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6950            will be expecting to pop a certain number of arguments, but if we
6951            tailcall to a function with a different number of arguments, we
6952            are hosed. There are ways around this (caller remembers esp value,
6953            varargs is not caller-pop, etc), but not worth it. */
6954         CLANG_FORMAT_COMMENT_ANCHOR;
6955
6956 #ifdef _TARGET_X86_
6957         if (canTailCall)
6958         {
6959             canTailCall             = false;
6960             szCanTailCallFailReason = "Callee is varargs";
6961         }
6962 #endif
6963
6964         /* Get the total number of arguments - this is already correct
6965          * for CALLI - for methods we have to get it from the call site */
6966
6967         if (opcode != CEE_CALLI)
6968         {
6969 #ifdef DEBUG
6970             unsigned numArgsDef = sig->numArgs;
6971 #endif
6972             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6973
6974 #ifdef DEBUG
6975             // We cannot lazily obtain the signature of a vararg call because using its method
6976             // handle will give us only the declared argument list, not the full argument list.
6977             assert(call->gtCall.callSig == nullptr);
6978             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6979             *call->gtCall.callSig = *sig;
6980 #endif
6981
6982             // For vararg calls we must be sure to load the return type of the
6983             // method actually being called, as well as the return types of the
6984             // specified in the vararg signature. With type equivalency, these types
6985             // may not be the same.
6986             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6987             {
6988                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6989                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6990                     sig->retType != CORINFO_TYPE_VAR)
6991                 {
6992                     // Make sure that all valuetypes (including enums) that we push are loaded.
6993                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6994                     // all valuetypes in the method signature are already loaded.
6995                     // We need to be able to find the size of the valuetypes, but we cannot
6996                     // do a class-load from within GC.
6997                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6998                 }
6999             }
7000
7001             assert(numArgsDef <= sig->numArgs);
7002         }
7003
7004         /* We will have "cookie" as the last argument but we cannot push
7005          * it on the operand stack because we may overflow, so we append it
7006          * to the arg list next after we pop them */
7007     }
7008
7009     if (mflags & CORINFO_FLG_SECURITYCHECK)
7010     {
7011         assert(!compIsForInlining());
7012
7013         // Need security prolog/epilog callouts when there is
7014         // imperative security in the method. This is to give security a
7015         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7016
7017         if (compIsForInlining())
7018         {
7019             // Cannot handle this if the method being imported is an inlinee by itself.
7020             // Because inlinee method does not have its own frame.
7021
7022             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7023             return callRetTyp;
7024         }
7025         else
7026         {
7027             tiSecurityCalloutNeeded = true;
7028
7029             // If the current method calls a method which needs a security check,
7030             // (i.e. the method being compiled has imperative security)
7031             // we need to reserve a slot for the security object in
7032             // the current method's stack frame
7033             opts.compNeedSecurityCheck = true;
7034         }
7035     }
7036
7037     //--------------------------- Inline NDirect ------------------------------
7038
7039     // For inline cases we technically should look at both the current
7040     // block and the call site block (or just the latter if we've
7041     // fused the EH trees). However the block-related checks pertain to
7042     // EH and we currently won't inline a method with EH. So for
7043     // inlinees, just checking the call site block is sufficient.
7044     {
7045         // New lexical block here to avoid compilation errors because of GOTOs.
7046         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7047         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7048     }
7049
7050     if (call->gtFlags & GTF_CALL_UNMANAGED)
7051     {
7052         // We set up the unmanaged call by linking the frame, disabling GC, etc
7053         // This needs to be cleaned up on return
7054         if (canTailCall)
7055         {
7056             canTailCall             = false;
7057             szCanTailCallFailReason = "Callee is native";
7058         }
7059
7060         checkForSmallType = true;
7061
7062         impPopArgsForUnmanagedCall(call, sig);
7063
7064         goto DONE;
7065     }
7066     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7067                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7068                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7069                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7070     {
7071         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7072         {
7073             // Normally this only happens with inlining.
7074             // However, a generic method (or type) being NGENd into another module
7075             // can run into this issue as well.  There's not an easy fall-back for NGEN
7076             // so instead we fallback to JIT.
7077             if (compIsForInlining())
7078             {
7079                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7080             }
7081             else
7082             {
7083                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7084             }
7085
7086             return callRetTyp;
7087         }
7088
7089         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7090
7091         // This cookie is required to be either a simple GT_CNS_INT or
7092         // an indirection of a GT_CNS_INT
7093         //
7094         GenTreePtr cookieConst = cookie;
7095         if (cookie->gtOper == GT_IND)
7096         {
7097             cookieConst = cookie->gtOp.gtOp1;
7098         }
7099         assert(cookieConst->gtOper == GT_CNS_INT);
7100
7101         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7102         // we won't allow this tree to participate in any CSE logic
7103         //
7104         cookie->gtFlags |= GTF_DONT_CSE;
7105         cookieConst->gtFlags |= GTF_DONT_CSE;
7106
7107         call->gtCall.gtCallCookie = cookie;
7108
7109         if (canTailCall)
7110         {
7111             canTailCall             = false;
7112             szCanTailCallFailReason = "PInvoke calli";
7113         }
7114     }
7115
7116     /*-------------------------------------------------------------------------
7117      * Create the argument list
7118      */
7119
7120     //-------------------------------------------------------------------------
7121     // Special case - for varargs we have an implicit last argument
7122
7123     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7124     {
7125         assert(!compIsForInlining());
7126
7127         void *varCookie, *pVarCookie;
7128         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7129         {
7130             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7131             return callRetTyp;
7132         }
7133
7134         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7135         assert((!varCookie) != (!pVarCookie));
7136         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7137
7138         assert(extraArg == nullptr);
7139         extraArg = gtNewArgList(cookie);
7140     }
7141
7142     //-------------------------------------------------------------------------
7143     // Extra arg for shared generic code and array methods
7144     //
7145     // Extra argument containing instantiation information is passed in the
7146     // following circumstances:
7147     // (a) To the "Address" method on array classes; the extra parameter is
7148     //     the array's type handle (a TypeDesc)
7149     // (b) To shared-code instance methods in generic structs; the extra parameter
7150     //     is the struct's type handle (a vtable ptr)
7151     // (c) To shared-code per-instantiation non-generic static methods in generic
7152     //     classes and structs; the extra parameter is the type handle
7153     // (d) To shared-code generic methods; the extra parameter is an
7154     //     exact-instantiation MethodDesc
7155     //
7156     // We also set the exact type context associated with the call so we can
7157     // inline the call correctly later on.
7158
7159     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7160     {
7161         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7162         if (clsHnd == nullptr)
7163         {
7164             NO_WAY("CALLI on parameterized type");
7165         }
7166
7167         assert(opcode != CEE_CALLI);
7168
7169         GenTreePtr instParam;
7170         BOOL       runtimeLookup;
7171
7172         // Instantiated generic method
7173         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7174         {
7175             CORINFO_METHOD_HANDLE exactMethodHandle =
7176                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7177
7178             if (!exactContextNeedsRuntimeLookup)
7179             {
7180 #ifdef FEATURE_READYTORUN_COMPILER
7181                 if (opts.IsReadyToRun())
7182                 {
7183                     instParam =
7184                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7185                     if (instParam == nullptr)
7186                     {
7187                         return callRetTyp;
7188                     }
7189                 }
7190                 else
7191 #endif
7192                 {
7193                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7194                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7195                 }
7196             }
7197             else
7198             {
7199                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7200                 if (instParam == nullptr)
7201                 {
7202                     return callRetTyp;
7203                 }
7204             }
7205         }
7206
7207         // otherwise must be an instance method in a generic struct,
7208         // a static method in a generic type, or a runtime-generated array method
7209         else
7210         {
7211             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7212             CORINFO_CLASS_HANDLE exactClassHandle =
7213                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7214
7215             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7216             {
7217                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7218                 return callRetTyp;
7219             }
7220
7221             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7222             {
7223                 // We indicate "readonly" to the Address operation by using a null
7224                 // instParam.
7225                 instParam = gtNewIconNode(0, TYP_REF);
7226             }
7227
7228             if (!exactContextNeedsRuntimeLookup)
7229             {
7230 #ifdef FEATURE_READYTORUN_COMPILER
7231                 if (opts.IsReadyToRun())
7232                 {
7233                     instParam =
7234                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7235                     if (instParam == nullptr)
7236                     {
7237                         return callRetTyp;
7238                     }
7239                 }
7240                 else
7241 #endif
7242                 {
7243                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7244                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7245                 }
7246             }
7247             else
7248             {
7249                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7250                 if (instParam == nullptr)
7251                 {
7252                     return callRetTyp;
7253                 }
7254             }
7255         }
7256
7257         assert(extraArg == nullptr);
7258         extraArg = gtNewArgList(instParam);
7259     }
7260
7261     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7262     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7263     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7264     // exactContextHnd is not currently required when inlining shared generic code into shared
7265     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7266     // (e.g. anything marked needsRuntimeLookup)
7267     if (exactContextNeedsRuntimeLookup)
7268     {
7269         exactContextHnd = nullptr;
7270     }
7271
7272     //-------------------------------------------------------------------------
7273     // The main group of arguments
7274
7275     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7276
7277     if (args)
7278     {
7279         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7280     }
7281
7282     //-------------------------------------------------------------------------
7283     // The "this" pointer
7284
7285     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7286     {
7287         GenTreePtr obj;
7288
7289         if (opcode == CEE_NEWOBJ)
7290         {
7291             obj = newobjThis;
7292         }
7293         else
7294         {
7295             obj = impPopStack().val;
7296             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7297             if (compDonotInline())
7298             {
7299                 return callRetTyp;
7300             }
7301         }
7302
7303         /* Is this a virtual or interface call? */
7304
7305         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7306         {
7307             /* only true object pointers can be virtual */
7308
7309             assert(obj->gtType == TYP_REF);
7310         }
7311         else
7312         {
7313             if (impIsThis(obj))
7314             {
7315                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7316             }
7317         }
7318
7319         /* Store the "this" value in the call */
7320
7321         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7322         call->gtCall.gtCallObjp = obj;
7323     }
7324
7325     //-------------------------------------------------------------------------
7326     // The "this" pointer for "newobj"
7327
7328     if (opcode == CEE_NEWOBJ)
7329     {
7330         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7331         {
7332             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7333             // This is a 'new' of a variable sized object, wher
7334             // the constructor is to return the object.  In this case
7335             // the constructor claims to return VOID but we know it
7336             // actually returns the new object
7337             assert(callRetTyp == TYP_VOID);
7338             callRetTyp   = TYP_REF;
7339             call->gtType = TYP_REF;
7340             impSpillSpecialSideEff();
7341
7342             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7343         }
7344         else
7345         {
7346             if (clsFlags & CORINFO_FLG_DELEGATE)
7347             {
7348                 // New inliner morph it in impImportCall.
7349                 // This will allow us to inline the call to the delegate constructor.
7350                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7351             }
7352
7353             if (!bIntrinsicImported)
7354             {
7355
7356 #if defined(DEBUG) || defined(INLINE_DATA)
7357
7358                 // Keep track of the raw IL offset of the call
7359                 call->gtCall.gtRawILOffset = rawILOffset;
7360
7361 #endif // defined(DEBUG) || defined(INLINE_DATA)
7362
7363                 // Is it an inline candidate?
7364                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7365             }
7366
7367             // append the call node.
7368             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7369
7370             // Now push the value of the 'new onto the stack
7371
7372             // This is a 'new' of a non-variable sized object.
7373             // Append the new node (op1) to the statement list,
7374             // and then push the local holding the value of this
7375             // new instruction on the stack.
7376
7377             if (clsFlags & CORINFO_FLG_VALUECLASS)
7378             {
7379                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7380
7381                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7382                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7383             }
7384             else
7385             {
7386                 if (newobjThis->gtOper == GT_COMMA)
7387                 {
7388                     // In coreclr the callout can be inserted even if verification is disabled
7389                     // so we cannot rely on tiVerificationNeeded alone
7390
7391                     // We must have inserted the callout. Get the real newobj.
7392                     newobjThis = newobjThis->gtOp.gtOp2;
7393                 }
7394
7395                 assert(newobjThis->gtOper == GT_LCL_VAR);
7396                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7397             }
7398         }
7399         return callRetTyp;
7400     }
7401
7402 DONE:
7403
7404     if (tailCall)
7405     {
7406         // This check cannot be performed for implicit tail calls for the reason
7407         // that impIsImplicitTailCallCandidate() is not checking whether return
7408         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7409         // As a result it is possible that in the following case, we find that
7410         // the type stack is non-empty if Callee() is considered for implicit
7411         // tail calling.
7412         //      int Caller(..) { .... void Callee(); ret val; ... }
7413         //
7414         // Note that we cannot check return type compatibility before ImpImportCall()
7415         // as we don't have required info or need to duplicate some of the logic of
7416         // ImpImportCall().
7417         //
7418         // For implicit tail calls, we perform this check after return types are
7419         // known to be compatible.
7420         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7421         {
7422             BADCODE("Stack should be empty after tailcall");
7423         }
7424
7425         // Note that we can not relax this condition with genActualType() as
7426         // the calling convention dictates that the caller of a function with
7427         // a small-typed return value is responsible for normalizing the return val
7428
7429         if (canTailCall &&
7430             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7431                                           callInfo->sig.retTypeClass))
7432         {
7433             canTailCall             = false;
7434             szCanTailCallFailReason = "Return types are not tail call compatible";
7435         }
7436
7437         // Stack empty check for implicit tail calls.
7438         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7439         {
7440 #ifdef _TARGET_AMD64_
7441             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7442             // in JIT64, not an InvalidProgramException.
7443             Verify(false, "Stack should be empty after tailcall");
7444 #else  // _TARGET_64BIT_
7445             BADCODE("Stack should be empty after tailcall");
7446 #endif //!_TARGET_64BIT_
7447         }
7448
7449         // assert(compCurBB is not a catch, finally or filter block);
7450         // assert(compCurBB is not a try block protected by a finally block);
7451
7452         // Check for permission to tailcall
7453         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7454
7455         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7456
7457         if (canTailCall)
7458         {
7459             // True virtual or indirect calls, shouldn't pass in a callee handle.
7460             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7461                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7462                                                        ? nullptr
7463                                                        : methHnd;
7464             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7465
7466             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7467             {
7468                 canTailCall = true;
7469                 if (explicitTailCall)
7470                 {
7471                     // In case of explicit tail calls, mark it so that it is not considered
7472                     // for in-lining.
7473                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7474 #ifdef DEBUG
7475                     if (verbose)
7476                     {
7477                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7478                         printTreeID(call);
7479                         printf("\n");
7480                     }
7481 #endif
7482                 }
7483                 else
7484                 {
7485 #if FEATURE_TAILCALL_OPT
7486                     // Must be an implicit tail call.
7487                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7488
7489                     // It is possible that a call node is both an inline candidate and marked
7490                     // for opportunistic tail calling.  In-lining happens before morhphing of
7491                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7492                     // reason, it will survive to the morphing stage at which point it will be
7493                     // transformed into a tail call after performing additional checks.
7494
7495                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7496 #ifdef DEBUG
7497                     if (verbose)
7498                     {
7499                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7500                         printTreeID(call);
7501                         printf("\n");
7502                     }
7503 #endif
7504
7505 #else //! FEATURE_TAILCALL_OPT
7506                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7507
7508 #endif // FEATURE_TAILCALL_OPT
7509                 }
7510
7511                 // we can't report success just yet...
7512             }
7513             else
7514             {
7515                 canTailCall = false;
7516 // canTailCall reported its reasons already
7517 #ifdef DEBUG
7518                 if (verbose)
7519                 {
7520                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7521                     printTreeID(call);
7522                     printf("\n");
7523                 }
7524 #endif
7525             }
7526         }
7527         else
7528         {
7529             // If this assert fires it means that canTailCall was set to false without setting a reason!
7530             assert(szCanTailCallFailReason != nullptr);
7531
7532 #ifdef DEBUG
7533             if (verbose)
7534             {
7535                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7536                 printTreeID(call);
7537                 printf(": %s\n", szCanTailCallFailReason);
7538             }
7539 #endif
7540             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7541                                                      szCanTailCallFailReason);
7542         }
7543     }
7544
7545 // Note: we assume that small return types are already normalized by the managed callee
7546 // or by the pinvoke stub for calls to unmanaged code.
7547
7548 DONE_CALL:
7549
7550     if (!bIntrinsicImported)
7551     {
7552         //
7553         // Things needed to be checked when bIntrinsicImported is false.
7554         //
7555
7556         assert(call->gtOper == GT_CALL);
7557         assert(sig != nullptr);
7558
7559         // Tail calls require us to save the call site's sig info so we can obtain an argument
7560         // copying thunk from the EE later on.
7561         if (call->gtCall.callSig == nullptr)
7562         {
7563             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7564             *call->gtCall.callSig = *sig;
7565         }
7566
7567         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7568         {
7569             GenTreePtr callObj = call->gtCall.gtCallObjp;
7570             assert(callObj != nullptr);
7571
7572             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7573
7574             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7575                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7576                                                                    impInlineInfo->inlArgInfo))
7577             {
7578                 impInlineInfo->thisDereferencedFirst = true;
7579             }
7580         }
7581
7582 #if defined(DEBUG) || defined(INLINE_DATA)
7583
7584         // Keep track of the raw IL offset of the call
7585         call->gtCall.gtRawILOffset = rawILOffset;
7586
7587 #endif // defined(DEBUG) || defined(INLINE_DATA)
7588
7589         // Is it an inline candidate?
7590         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7591     }
7592
7593     // Push or append the result of the call
7594     if (callRetTyp == TYP_VOID)
7595     {
7596         if (opcode == CEE_NEWOBJ)
7597         {
7598             // we actually did push something, so don't spill the thing we just pushed.
7599             assert(verCurrentState.esStackDepth > 0);
7600             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7601         }
7602         else
7603         {
7604             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7605         }
7606     }
7607     else
7608     {
7609         impSpillSpecialSideEff();
7610
7611         if (clsFlags & CORINFO_FLG_ARRAY)
7612         {
7613             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7614         }
7615
7616         // Find the return type used for verification by interpreting the method signature.
7617         // NB: we are clobbering the already established sig.
7618         if (tiVerificationNeeded)
7619         {
7620             // Actually, we never get the sig for the original method.
7621             sig = &(callInfo->verSig);
7622         }
7623
7624         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7625         tiRetVal.NormaliseForStack();
7626
7627         // The CEE_READONLY prefix modifies the verification semantics of an Address
7628         // operation on an array type.
7629         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7630         {
7631             tiRetVal.SetIsReadonlyByRef();
7632         }
7633
7634         if (tiVerificationNeeded)
7635         {
7636             // We assume all calls return permanent home byrefs. If they
7637             // didn't they wouldn't be verifiable. This is also covering
7638             // the Address() helper for multidimensional arrays.
7639             if (tiRetVal.IsByRef())
7640             {
7641                 tiRetVal.SetIsPermanentHomeByRef();
7642             }
7643         }
7644
7645         if (call->gtOper == GT_CALL)
7646         {
7647             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7648             if (varTypeIsStruct(callRetTyp))
7649             {
7650                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7651             }
7652
7653             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7654             {
7655                 assert(opts.OptEnabled(CLFLG_INLINING));
7656
7657                 // Make the call its own tree (spill the stack if needed).
7658                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7659
7660                 // TODO: Still using the widened type.
7661                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7662             }
7663             else
7664             {
7665                 // For non-candidates we must also spill, since we
7666                 // might have locals live on the eval stack that this
7667                 // call can modify.
7668                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7669             }
7670         }
7671
7672         if (!bIntrinsicImported)
7673         {
7674             //-------------------------------------------------------------------------
7675             //
7676             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7677                 before returning.
7678                 However, we need to normalize small type values returned by unmanaged
7679                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7680                 if we use the shorter inlined pinvoke stub. */
7681
7682             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7683             {
7684                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7685             }
7686         }
7687
7688         impPushOnStack(call, tiRetVal);
7689     }
7690
7691     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7692     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7693     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7694     //  callInfoCache.uncacheCallInfo();
7695
7696     return callRetTyp;
7697 }
7698 #ifdef _PREFAST_
7699 #pragma warning(pop)
7700 #endif
7701
7702 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7703 {
7704     CorInfoType corType = methInfo->args.retType;
7705
7706     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7707     {
7708         // We have some kind of STRUCT being returned
7709
7710         structPassingKind howToReturnStruct = SPK_Unknown;
7711
7712         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7713
7714         if (howToReturnStruct == SPK_ByReference)
7715         {
7716             return true;
7717         }
7718     }
7719
7720     return false;
7721 }
7722
7723 #ifdef DEBUG
7724 //
7725 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7726 {
7727     TestLabelAndNum tlAndN;
7728     if (numArgs == 2)
7729     {
7730         tlAndN.m_num  = 0;
7731         StackEntry se = impPopStack();
7732         assert(se.seTypeInfo.GetType() == TI_INT);
7733         GenTreePtr val = se.val;
7734         assert(val->IsCnsIntOrI());
7735         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7736     }
7737     else if (numArgs == 3)
7738     {
7739         StackEntry se = impPopStack();
7740         assert(se.seTypeInfo.GetType() == TI_INT);
7741         GenTreePtr val = se.val;
7742         assert(val->IsCnsIntOrI());
7743         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7744         se           = impPopStack();
7745         assert(se.seTypeInfo.GetType() == TI_INT);
7746         val = se.val;
7747         assert(val->IsCnsIntOrI());
7748         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7749     }
7750     else
7751     {
7752         assert(false);
7753     }
7754
7755     StackEntry expSe = impPopStack();
7756     GenTreePtr node  = expSe.val;
7757
7758     // There are a small number of special cases, where we actually put the annotation on a subnode.
7759     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7760     {
7761         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7762         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7763         // offset within the the static field block whose address is returned by the helper call.
7764         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7765         GenTreePtr helperCall = nullptr;
7766         assert(node->OperGet() == GT_IND);
7767         tlAndN.m_num -= 100;
7768         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7769         GetNodeTestData()->Remove(node);
7770     }
7771     else
7772     {
7773         GetNodeTestData()->Set(node, tlAndN);
7774     }
7775
7776     impPushOnStack(node, expSe.seTypeInfo);
7777     return node->TypeGet();
7778 }
7779 #endif // DEBUG
7780
7781 //-----------------------------------------------------------------------------------
7782 //  impFixupCallStructReturn: For a call node that returns a struct type either
7783 //  adjust the return type to an enregisterable type, or set the flag to indicate
7784 //  struct return via retbuf arg.
7785 //
7786 //  Arguments:
7787 //    call       -  GT_CALL GenTree node
7788 //    retClsHnd  -  Class handle of return type of the call
7789 //
7790 //  Return Value:
7791 //    Returns new GenTree node after fixing struct return of call node
7792 //
7793 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7794 {
7795     assert(call->gtOper == GT_CALL);
7796
7797     if (!varTypeIsStruct(call))
7798     {
7799         return call;
7800     }
7801
7802     call->gtCall.gtRetClsHnd = retClsHnd;
7803
7804     GenTreeCall* callNode = call->AsCall();
7805
7806 #if FEATURE_MULTIREG_RET
7807     // Initialize Return type descriptor of call node
7808     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7809     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7810 #endif // FEATURE_MULTIREG_RET
7811
7812 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7813
7814     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7815     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7816
7817     // The return type will remain as the incoming struct type unless normalized to a
7818     // single eightbyte return type below.
7819     callNode->gtReturnType = call->gtType;
7820
7821     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7822     if (retRegCount != 0)
7823     {
7824         if (retRegCount == 1)
7825         {
7826             // struct returned in a single register
7827             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7828         }
7829         else
7830         {
7831             // must be a struct returned in two registers
7832             assert(retRegCount == 2);
7833
7834             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7835             {
7836                 // Force a call returning multi-reg struct to be always of the IR form
7837                 //   tmp = call
7838                 //
7839                 // No need to assign a multi-reg struct to a local var if:
7840                 //  - It is a tail call or
7841                 //  - The call is marked for in-lining later
7842                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7843             }
7844         }
7845     }
7846     else
7847     {
7848         // struct not returned in registers i.e returned via hiddden retbuf arg.
7849         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7850     }
7851
7852 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7853
7854 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7855     // There is no fixup necessary if the return type is a HFA struct.
7856     // HFA structs are returned in registers for ARM32 and ARM64
7857     //
7858     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7859     {
7860         if (call->gtCall.CanTailCall())
7861         {
7862             if (info.compIsVarArgs)
7863             {
7864                 // We cannot tail call because control needs to return to fixup the calling
7865                 // convention for result return.
7866                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7867             }
7868             else
7869             {
7870                 // If we can tail call returning HFA, then don't assign it to
7871                 // a variable back and forth.
7872                 return call;
7873             }
7874         }
7875
7876         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7877         {
7878             return call;
7879         }
7880
7881         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7882         if (retRegCount >= 2)
7883         {
7884             return impAssignMultiRegTypeToVar(call, retClsHnd);
7885         }
7886     }
7887 #endif // _TARGET_ARM_
7888
7889     // Check for TYP_STRUCT type that wraps a primitive type
7890     // Such structs are returned using a single register
7891     // and we change the return type on those calls here.
7892     //
7893     structPassingKind howToReturnStruct;
7894     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7895
7896     if (howToReturnStruct == SPK_ByReference)
7897     {
7898         assert(returnType == TYP_UNKNOWN);
7899         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7900     }
7901     else
7902     {
7903         assert(returnType != TYP_UNKNOWN);
7904         call->gtCall.gtReturnType = returnType;
7905
7906         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7907         if ((returnType == TYP_LONG) && (compLongUsed == false))
7908         {
7909             compLongUsed = true;
7910         }
7911         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7912         {
7913             compFloatingPointUsed = true;
7914         }
7915
7916 #if FEATURE_MULTIREG_RET
7917         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7918         assert(retRegCount != 0);
7919
7920         if (retRegCount >= 2)
7921         {
7922             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7923             {
7924                 // Force a call returning multi-reg struct to be always of the IR form
7925                 //   tmp = call
7926                 //
7927                 // No need to assign a multi-reg struct to a local var if:
7928                 //  - It is a tail call or
7929                 //  - The call is marked for in-lining later
7930                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7931             }
7932         }
7933 #endif // FEATURE_MULTIREG_RET
7934     }
7935
7936 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7937
7938     return call;
7939 }
7940
7941 /*****************************************************************************
7942    For struct return values, re-type the operand in the case where the ABI
7943    does not use a struct return buffer
7944    Note that this method is only call for !_TARGET_X86_
7945  */
7946
7947 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7948 {
7949     assert(varTypeIsStruct(info.compRetType));
7950     assert(info.compRetBuffArg == BAD_VAR_NUM);
7951
7952 #if defined(_TARGET_XARCH_)
7953
7954 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7955     // No VarArgs for CoreCLR on x64 Unix
7956     assert(!info.compIsVarArgs);
7957
7958     // Is method returning a multi-reg struct?
7959     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7960     {
7961         // In case of multi-reg struct return, we force IR to be one of the following:
7962         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7963         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7964
7965         if (op->gtOper == GT_LCL_VAR)
7966         {
7967             // Make sure that this struct stays in memory and doesn't get promoted.
7968             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7969             lvaTable[lclNum].lvIsMultiRegRet = true;
7970
7971             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7972             op->gtFlags |= GTF_DONT_CSE;
7973
7974             return op;
7975         }
7976
7977         if (op->gtOper == GT_CALL)
7978         {
7979             return op;
7980         }
7981
7982         return impAssignMultiRegTypeToVar(op, retClsHnd);
7983     }
7984 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7985     assert(info.compRetNativeType != TYP_STRUCT);
7986 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7987
7988 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7989
7990     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7991     {
7992         if (op->gtOper == GT_LCL_VAR)
7993         {
7994             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7995             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7996             // Make sure this struct type stays as struct so that we can return it as an HFA
7997             lvaTable[lclNum].lvIsMultiRegRet = true;
7998
7999             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8000             op->gtFlags |= GTF_DONT_CSE;
8001
8002             return op;
8003         }
8004
8005         if (op->gtOper == GT_CALL)
8006         {
8007             if (op->gtCall.IsVarargs())
8008             {
8009                 // We cannot tail call because control needs to return to fixup the calling
8010                 // convention for result return.
8011                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8012                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8013             }
8014             else
8015             {
8016                 return op;
8017             }
8018         }
8019         return impAssignMultiRegTypeToVar(op, retClsHnd);
8020     }
8021
8022 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8023
8024     // Is method returning a multi-reg struct?
8025     if (IsMultiRegReturnedType(retClsHnd))
8026     {
8027         if (op->gtOper == GT_LCL_VAR)
8028         {
8029             // This LCL_VAR stays as a TYP_STRUCT
8030             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8031
8032             // Make sure this struct type is not struct promoted
8033             lvaTable[lclNum].lvIsMultiRegRet = true;
8034
8035             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8036             op->gtFlags |= GTF_DONT_CSE;
8037
8038             return op;
8039         }
8040
8041         if (op->gtOper == GT_CALL)
8042         {
8043             if (op->gtCall.IsVarargs())
8044             {
8045                 // We cannot tail call because control needs to return to fixup the calling
8046                 // convention for result return.
8047                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8048                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8049             }
8050             else
8051             {
8052                 return op;
8053             }
8054         }
8055         return impAssignMultiRegTypeToVar(op, retClsHnd);
8056     }
8057
8058 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8059
8060 REDO_RETURN_NODE:
8061     // adjust the type away from struct to integral
8062     // and no normalizing
8063     if (op->gtOper == GT_LCL_VAR)
8064     {
8065         op->ChangeOper(GT_LCL_FLD);
8066     }
8067     else if (op->gtOper == GT_OBJ)
8068     {
8069         GenTreePtr op1 = op->AsObj()->Addr();
8070
8071         // We will fold away OBJ/ADDR
8072         // except for OBJ/ADDR/INDEX
8073         //     as the array type influences the array element's offset
8074         //     Later in this method we change op->gtType to info.compRetNativeType
8075         //     This is not correct when op is a GT_INDEX as the starting offset
8076         //     for the array elements 'elemOffs' is different for an array of
8077         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8078         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8079         //
8080         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8081         {
8082             // Change '*(&X)' to 'X' and see if we can do better
8083             op = op1->gtOp.gtOp1;
8084             goto REDO_RETURN_NODE;
8085         }
8086         op->gtObj.gtClass = NO_CLASS_HANDLE;
8087         op->ChangeOperUnchecked(GT_IND);
8088         op->gtFlags |= GTF_IND_TGTANYWHERE;
8089     }
8090     else if (op->gtOper == GT_CALL)
8091     {
8092         if (op->AsCall()->TreatAsHasRetBufArg(this))
8093         {
8094             // This must be one of those 'special' helpers that don't
8095             // really have a return buffer, but instead use it as a way
8096             // to keep the trees cleaner with fewer address-taken temps.
8097             //
8098             // Well now we have to materialize the the return buffer as
8099             // an address-taken temp. Then we can return the temp.
8100             //
8101             // NOTE: this code assumes that since the call directly
8102             // feeds the return, then the call must be returning the
8103             // same structure/class/type.
8104             //
8105             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8106
8107             // No need to spill anything as we're about to return.
8108             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8109
8110             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8111             // jump directly to a GT_LCL_FLD.
8112             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8113             op->ChangeOper(GT_LCL_FLD);
8114         }
8115         else
8116         {
8117             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8118
8119             // Don't change the gtType of the node just yet, it will get changed later.
8120             return op;
8121         }
8122     }
8123     else if (op->gtOper == GT_COMMA)
8124     {
8125         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8126     }
8127
8128     op->gtType = info.compRetNativeType;
8129
8130     return op;
8131 }
8132
8133 /*****************************************************************************
8134    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8135    finally-protected try. We find the finally blocks protecting the current
8136    offset (in order) by walking over the complete exception table and
8137    finding enclosing clauses. This assumes that the table is sorted.
8138    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8139
8140    If we are leaving a catch handler, we need to attach the
8141    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8142
8143    After this function, the BBJ_LEAVE block has been converted to a different type.
8144  */
8145
8146 #if !FEATURE_EH_FUNCLETS
8147
8148 void Compiler::impImportLeave(BasicBlock* block)
8149 {
8150 #ifdef DEBUG
8151     if (verbose)
8152     {
8153         printf("\nBefore import CEE_LEAVE:\n");
8154         fgDispBasicBlocks();
8155         fgDispHandlerTab();
8156     }
8157 #endif // DEBUG
8158
8159     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8160     unsigned    blkAddr         = block->bbCodeOffs;
8161     BasicBlock* leaveTarget     = block->bbJumpDest;
8162     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8163
8164     // LEAVE clears the stack, spill side effects, and set stack to 0
8165
8166     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8167     verCurrentState.esStackDepth = 0;
8168
8169     assert(block->bbJumpKind == BBJ_LEAVE);
8170     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8171
8172     BasicBlock* step         = DUMMY_INIT(NULL);
8173     unsigned    encFinallies = 0; // Number of enclosing finallies.
8174     GenTreePtr  endCatches   = NULL;
8175     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8176
8177     unsigned  XTnum;
8178     EHblkDsc* HBtab;
8179
8180     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8181     {
8182         // Grab the handler offsets
8183
8184         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8185         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8186         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8187         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8188
8189         /* Is this a catch-handler we are CEE_LEAVEing out of?
8190          * If so, we need to call CORINFO_HELP_ENDCATCH.
8191          */
8192
8193         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8194         {
8195             // Can't CEE_LEAVE out of a finally/fault handler
8196             if (HBtab->HasFinallyOrFaultHandler())
8197                 BADCODE("leave out of fault/finally block");
8198
8199             // Create the call to CORINFO_HELP_ENDCATCH
8200             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8201
8202             // Make a list of all the currently pending endCatches
8203             if (endCatches)
8204                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8205             else
8206                 endCatches = endCatch;
8207
8208 #ifdef DEBUG
8209             if (verbose)
8210             {
8211                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8212                        "CORINFO_HELP_ENDCATCH\n",
8213                        block->bbNum, XTnum);
8214             }
8215 #endif
8216         }
8217         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8218                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8219         {
8220             /* This is a finally-protected try we are jumping out of */
8221
8222             /* If there are any pending endCatches, and we have already
8223                jumped out of a finally-protected try, then the endCatches
8224                have to be put in a block in an outer try for async
8225                exceptions to work correctly.
8226                Else, just use append to the original block */
8227
8228             BasicBlock* callBlock;
8229
8230             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8231
8232             if (encFinallies == 0)
8233             {
8234                 assert(step == DUMMY_INIT(NULL));
8235                 callBlock             = block;
8236                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8237
8238                 if (endCatches)
8239                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8240
8241 #ifdef DEBUG
8242                 if (verbose)
8243                 {
8244                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8245                            "block BB%02u [%08p]\n",
8246                            callBlock->bbNum, dspPtr(callBlock));
8247                 }
8248 #endif
8249             }
8250             else
8251             {
8252                 assert(step != DUMMY_INIT(NULL));
8253
8254                 /* Calling the finally block */
8255                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8256                 assert(step->bbJumpKind == BBJ_ALWAYS);
8257                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8258                                               // finally in the chain)
8259                 step->bbJumpDest->bbRefs++;
8260
8261                 /* The new block will inherit this block's weight */
8262                 callBlock->setBBWeight(block->bbWeight);
8263                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8264
8265 #ifdef DEBUG
8266                 if (verbose)
8267                 {
8268                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8269                            "[%08p]\n",
8270                            callBlock->bbNum, dspPtr(callBlock));
8271                 }
8272 #endif
8273
8274                 GenTreePtr lastStmt;
8275
8276                 if (endCatches)
8277                 {
8278                     lastStmt         = gtNewStmt(endCatches);
8279                     endLFin->gtNext  = lastStmt;
8280                     lastStmt->gtPrev = endLFin;
8281                 }
8282                 else
8283                 {
8284                     lastStmt = endLFin;
8285                 }
8286
8287                 // note that this sets BBF_IMPORTED on the block
8288                 impEndTreeList(callBlock, endLFin, lastStmt);
8289             }
8290
8291             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8292             /* The new block will inherit this block's weight */
8293             step->setBBWeight(block->bbWeight);
8294             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8295
8296 #ifdef DEBUG
8297             if (verbose)
8298             {
8299                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8300                        "BB%02u [%08p]\n",
8301                        step->bbNum, dspPtr(step));
8302             }
8303 #endif
8304
8305             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8306             assert(finallyNesting <= compHndBBtabCount);
8307
8308             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8309             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8310             endLFin               = gtNewStmt(endLFin);
8311             endCatches            = NULL;
8312
8313             encFinallies++;
8314
8315             invalidatePreds = true;
8316         }
8317     }
8318
8319     /* Append any remaining endCatches, if any */
8320
8321     assert(!encFinallies == !endLFin);
8322
8323     if (encFinallies == 0)
8324     {
8325         assert(step == DUMMY_INIT(NULL));
8326         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8327
8328         if (endCatches)
8329             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8330
8331 #ifdef DEBUG
8332         if (verbose)
8333         {
8334             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8335                    "block BB%02u [%08p]\n",
8336                    block->bbNum, dspPtr(block));
8337         }
8338 #endif
8339     }
8340     else
8341     {
8342         // If leaveTarget is the start of another try block, we want to make sure that
8343         // we do not insert finalStep into that try block. Hence, we find the enclosing
8344         // try block.
8345         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8346
8347         // Insert a new BB either in the try region indicated by tryIndex or
8348         // the handler region indicated by leaveTarget->bbHndIndex,
8349         // depending on which is the inner region.
8350         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8351         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8352         step->bbJumpDest = finalStep;
8353
8354         /* The new block will inherit this block's weight */
8355         finalStep->setBBWeight(block->bbWeight);
8356         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8357
8358 #ifdef DEBUG
8359         if (verbose)
8360         {
8361             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8362                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8363         }
8364 #endif
8365
8366         GenTreePtr lastStmt;
8367
8368         if (endCatches)
8369         {
8370             lastStmt         = gtNewStmt(endCatches);
8371             endLFin->gtNext  = lastStmt;
8372             lastStmt->gtPrev = endLFin;
8373         }
8374         else
8375         {
8376             lastStmt = endLFin;
8377         }
8378
8379         impEndTreeList(finalStep, endLFin, lastStmt);
8380
8381         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8382
8383         // Queue up the jump target for importing
8384
8385         impImportBlockPending(leaveTarget);
8386
8387         invalidatePreds = true;
8388     }
8389
8390     if (invalidatePreds && fgComputePredsDone)
8391     {
8392         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8393         fgRemovePreds();
8394     }
8395
8396 #ifdef DEBUG
8397     fgVerifyHandlerTab();
8398
8399     if (verbose)
8400     {
8401         printf("\nAfter import CEE_LEAVE:\n");
8402         fgDispBasicBlocks();
8403         fgDispHandlerTab();
8404     }
8405 #endif // DEBUG
8406 }
8407
8408 #else // FEATURE_EH_FUNCLETS
8409
8410 void Compiler::impImportLeave(BasicBlock* block)
8411 {
8412 #ifdef DEBUG
8413     if (verbose)
8414     {
8415         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8416         fgDispBasicBlocks();
8417         fgDispHandlerTab();
8418     }
8419 #endif // DEBUG
8420
8421     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8422     unsigned    blkAddr         = block->bbCodeOffs;
8423     BasicBlock* leaveTarget     = block->bbJumpDest;
8424     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8425
8426     // LEAVE clears the stack, spill side effects, and set stack to 0
8427
8428     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8429     verCurrentState.esStackDepth = 0;
8430
8431     assert(block->bbJumpKind == BBJ_LEAVE);
8432     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8433
8434     BasicBlock* step = nullptr;
8435
8436     enum StepType
8437     {
8438         // No step type; step == NULL.
8439         ST_None,
8440
8441         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8442         // That is, is step->bbJumpDest where a finally will return to?
8443         ST_FinallyReturn,
8444
8445         // The step block is a catch return.
8446         ST_Catch,
8447
8448         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8449         ST_Try
8450     };
8451     StepType stepType = ST_None;
8452
8453     unsigned  XTnum;
8454     EHblkDsc* HBtab;
8455
8456     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8457     {
8458         // Grab the handler offsets
8459
8460         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8461         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8462         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8463         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8464
8465         /* Is this a catch-handler we are CEE_LEAVEing out of?
8466          */
8467
8468         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8469         {
8470             // Can't CEE_LEAVE out of a finally/fault handler
8471             if (HBtab->HasFinallyOrFaultHandler())
8472             {
8473                 BADCODE("leave out of fault/finally block");
8474             }
8475
8476             /* We are jumping out of a catch */
8477
8478             if (step == nullptr)
8479             {
8480                 step             = block;
8481                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8482                 stepType         = ST_Catch;
8483
8484 #ifdef DEBUG
8485                 if (verbose)
8486                 {
8487                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8488                            "block\n",
8489                            XTnum, step->bbNum);
8490                 }
8491 #endif
8492             }
8493             else
8494             {
8495                 BasicBlock* exitBlock;
8496
8497                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8498                  * scope */
8499                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8500
8501                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8502                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8503                                               // exit) returns to this block
8504                 step->bbJumpDest->bbRefs++;
8505
8506 #if defined(_TARGET_ARM_)
8507                 if (stepType == ST_FinallyReturn)
8508                 {
8509                     assert(step->bbJumpKind == BBJ_ALWAYS);
8510                     // Mark the target of a finally return
8511                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8512                 }
8513 #endif // defined(_TARGET_ARM_)
8514
8515                 /* The new block will inherit this block's weight */
8516                 exitBlock->setBBWeight(block->bbWeight);
8517                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8518
8519                 /* This exit block is the new step */
8520                 step     = exitBlock;
8521                 stepType = ST_Catch;
8522
8523                 invalidatePreds = true;
8524
8525 #ifdef DEBUG
8526                 if (verbose)
8527                 {
8528                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8529                            exitBlock->bbNum);
8530                 }
8531 #endif
8532             }
8533         }
8534         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8535                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8536         {
8537             /* We are jumping out of a finally-protected try */
8538
8539             BasicBlock* callBlock;
8540
8541             if (step == nullptr)
8542             {
8543 #if FEATURE_EH_CALLFINALLY_THUNKS
8544
8545                 // Put the call to the finally in the enclosing region.
8546                 unsigned callFinallyTryIndex =
8547                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8548                 unsigned callFinallyHndIndex =
8549                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8550                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8551
8552                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8553                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8554                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8555                 // next block, and flow optimizations will remove it.
8556                 block->bbJumpKind = BBJ_ALWAYS;
8557                 block->bbJumpDest = callBlock;
8558                 block->bbJumpDest->bbRefs++;
8559
8560                 /* The new block will inherit this block's weight */
8561                 callBlock->setBBWeight(block->bbWeight);
8562                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8563
8564 #ifdef DEBUG
8565                 if (verbose)
8566                 {
8567                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8568                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8569                            XTnum, block->bbNum, callBlock->bbNum);
8570                 }
8571 #endif
8572
8573 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8574
8575                 callBlock             = block;
8576                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8577
8578 #ifdef DEBUG
8579                 if (verbose)
8580                 {
8581                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8582                            "BBJ_CALLFINALLY block\n",
8583                            XTnum, callBlock->bbNum);
8584                 }
8585 #endif
8586
8587 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8588             }
8589             else
8590             {
8591                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8592                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8593                 // a 'finally'), or the step block is the return from a catch.
8594                 //
8595                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8596                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8597                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8598                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8599                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8600                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8601                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8602                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8603                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8604                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8605                 // stack walks.)
8606
8607                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8608
8609 #if FEATURE_EH_CALLFINALLY_THUNKS
8610                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8611                 {
8612                     // Need to create another step block in the 'try' region that will actually branch to the
8613                     // call-to-finally thunk.
8614                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8615                     step->bbJumpDest  = step2;
8616                     step->bbJumpDest->bbRefs++;
8617                     step2->setBBWeight(block->bbWeight);
8618                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8619
8620 #ifdef DEBUG
8621                     if (verbose)
8622                     {
8623                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8624                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8625                                XTnum, step->bbNum, step2->bbNum);
8626                     }
8627 #endif
8628
8629                     step = step2;
8630                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8631                 }
8632 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8633
8634 #if FEATURE_EH_CALLFINALLY_THUNKS
8635                 unsigned callFinallyTryIndex =
8636                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8637                 unsigned callFinallyHndIndex =
8638                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8639 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8640                 unsigned callFinallyTryIndex = XTnum + 1;
8641                 unsigned callFinallyHndIndex = 0; // don't care
8642 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8643
8644                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8645                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8646                                               // finally in the chain)
8647                 step->bbJumpDest->bbRefs++;
8648
8649 #if defined(_TARGET_ARM_)
8650                 if (stepType == ST_FinallyReturn)
8651                 {
8652                     assert(step->bbJumpKind == BBJ_ALWAYS);
8653                     // Mark the target of a finally return
8654                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8655                 }
8656 #endif // defined(_TARGET_ARM_)
8657
8658                 /* The new block will inherit this block's weight */
8659                 callBlock->setBBWeight(block->bbWeight);
8660                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8661
8662 #ifdef DEBUG
8663                 if (verbose)
8664                 {
8665                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8666                            "BB%02u\n",
8667                            XTnum, callBlock->bbNum);
8668                 }
8669 #endif
8670             }
8671
8672             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8673             stepType = ST_FinallyReturn;
8674
8675             /* The new block will inherit this block's weight */
8676             step->setBBWeight(block->bbWeight);
8677             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8678
8679 #ifdef DEBUG
8680             if (verbose)
8681             {
8682                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8683                        "block BB%02u\n",
8684                        XTnum, step->bbNum);
8685             }
8686 #endif
8687
8688             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8689
8690             invalidatePreds = true;
8691         }
8692         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8693                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8694         {
8695             // We are jumping out of a catch-protected try.
8696             //
8697             // If we are returning from a call to a finally, then we must have a step block within a try
8698             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8699             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8700             // and invoke the appropriate catch.
8701             //
8702             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8703             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8704             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8705             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8706             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8707             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8708             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8709             // For example:
8710             //
8711             // try {
8712             //    try {
8713             //       // something here raises ThreadAbortException
8714             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8715             //    } catch (Exception) {
8716             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8717             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8718             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8719             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8720             //       // need to do this transformation if the current EH block is a try/catch that catches
8721             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8722             //       // information, so currently we do it for all catch types.
8723             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8724             //    }
8725             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8726             // } catch (ThreadAbortException) {
8727             // }
8728             // LABEL_1:
8729             //
8730             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8731             // compiler.
8732
8733             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8734             {
8735                 BasicBlock* catchStep;
8736
8737                 assert(step);
8738
8739                 if (stepType == ST_FinallyReturn)
8740                 {
8741                     assert(step->bbJumpKind == BBJ_ALWAYS);
8742                 }
8743                 else
8744                 {
8745                     assert(stepType == ST_Catch);
8746                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8747                 }
8748
8749                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8750                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8751                 step->bbJumpDest = catchStep;
8752                 step->bbJumpDest->bbRefs++;
8753
8754 #if defined(_TARGET_ARM_)
8755                 if (stepType == ST_FinallyReturn)
8756                 {
8757                     // Mark the target of a finally return
8758                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8759                 }
8760 #endif // defined(_TARGET_ARM_)
8761
8762                 /* The new block will inherit this block's weight */
8763                 catchStep->setBBWeight(block->bbWeight);
8764                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8765
8766 #ifdef DEBUG
8767                 if (verbose)
8768                 {
8769                     if (stepType == ST_FinallyReturn)
8770                     {
8771                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8772                                "BBJ_ALWAYS block BB%02u\n",
8773                                XTnum, catchStep->bbNum);
8774                     }
8775                     else
8776                     {
8777                         assert(stepType == ST_Catch);
8778                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8779                                "BBJ_ALWAYS block BB%02u\n",
8780                                XTnum, catchStep->bbNum);
8781                     }
8782                 }
8783 #endif // DEBUG
8784
8785                 /* This block is the new step */
8786                 step     = catchStep;
8787                 stepType = ST_Try;
8788
8789                 invalidatePreds = true;
8790             }
8791         }
8792     }
8793
8794     if (step == nullptr)
8795     {
8796         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8797
8798 #ifdef DEBUG
8799         if (verbose)
8800         {
8801             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8802                    "block BB%02u to BBJ_ALWAYS\n",
8803                    block->bbNum);
8804         }
8805 #endif
8806     }
8807     else
8808     {
8809         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8810
8811 #if defined(_TARGET_ARM_)
8812         if (stepType == ST_FinallyReturn)
8813         {
8814             assert(step->bbJumpKind == BBJ_ALWAYS);
8815             // Mark the target of a finally return
8816             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8817         }
8818 #endif // defined(_TARGET_ARM_)
8819
8820 #ifdef DEBUG
8821         if (verbose)
8822         {
8823             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8824         }
8825 #endif
8826
8827         // Queue up the jump target for importing
8828
8829         impImportBlockPending(leaveTarget);
8830     }
8831
8832     if (invalidatePreds && fgComputePredsDone)
8833     {
8834         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8835         fgRemovePreds();
8836     }
8837
8838 #ifdef DEBUG
8839     fgVerifyHandlerTab();
8840
8841     if (verbose)
8842     {
8843         printf("\nAfter import CEE_LEAVE:\n");
8844         fgDispBasicBlocks();
8845         fgDispHandlerTab();
8846     }
8847 #endif // DEBUG
8848 }
8849
8850 #endif // FEATURE_EH_FUNCLETS
8851
8852 /*****************************************************************************/
8853 // This is called when reimporting a leave block. It resets the JumpKind,
8854 // JumpDest, and bbNext to the original values
8855
8856 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8857 {
8858 #if FEATURE_EH_FUNCLETS
8859     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8860     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8861     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8862     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8863     // only predecessor are also considered orphans and attempted to be deleted.
8864     //
8865     //  try  {
8866     //     ....
8867     //     try
8868     //     {
8869     //         ....
8870     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8871     //     } finally { }
8872     //  } finally { }
8873     //  OUTSIDE:
8874     //
8875     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8876     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8877     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8878     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8879     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8880     // will be treated as pair and handled correctly.
8881     if (block->bbJumpKind == BBJ_CALLFINALLY)
8882     {
8883         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8884         dupBlock->bbFlags    = block->bbFlags;
8885         dupBlock->bbJumpDest = block->bbJumpDest;
8886         dupBlock->copyEHRegion(block);
8887         dupBlock->bbCatchTyp = block->bbCatchTyp;
8888
8889         // Mark this block as
8890         //  a) not referenced by any other block to make sure that it gets deleted
8891         //  b) weight zero
8892         //  c) prevent from being imported
8893         //  d) as internal
8894         //  e) as rarely run
8895         dupBlock->bbRefs   = 0;
8896         dupBlock->bbWeight = 0;
8897         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8898
8899         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8900         // will be next to each other.
8901         fgInsertBBafter(block, dupBlock);
8902
8903 #ifdef DEBUG
8904         if (verbose)
8905         {
8906             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8907         }
8908 #endif
8909     }
8910 #endif // FEATURE_EH_FUNCLETS
8911
8912     block->bbJumpKind = BBJ_LEAVE;
8913     fgInitBBLookup();
8914     block->bbJumpDest = fgLookupBB(jmpAddr);
8915
8916     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8917     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8918     // reason we don't want to remove the block at this point is that if we call
8919     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8920     // added and the linked list length will be different than fgBBcount.
8921 }
8922
8923 /*****************************************************************************/
8924 // Get the first non-prefix opcode. Used for verification of valid combinations
8925 // of prefixes and actual opcodes.
8926
8927 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8928 {
8929     while (codeAddr < codeEndp)
8930     {
8931         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8932         codeAddr += sizeof(__int8);
8933
8934         if (opcode == CEE_PREFIX1)
8935         {
8936             if (codeAddr >= codeEndp)
8937             {
8938                 break;
8939             }
8940             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8941             codeAddr += sizeof(__int8);
8942         }
8943
8944         switch (opcode)
8945         {
8946             case CEE_UNALIGNED:
8947             case CEE_VOLATILE:
8948             case CEE_TAILCALL:
8949             case CEE_CONSTRAINED:
8950             case CEE_READONLY:
8951                 break;
8952             default:
8953                 return opcode;
8954         }
8955
8956         codeAddr += opcodeSizes[opcode];
8957     }
8958
8959     return CEE_ILLEGAL;
8960 }
8961
8962 /*****************************************************************************/
8963 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8964
8965 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8966 {
8967     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8968
8969     if (!(
8970             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8971             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8972             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8973             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8974             // volatile. prefix is allowed with the ldsfld and stsfld
8975             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8976     {
8977         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8978     }
8979 }
8980
8981 /*****************************************************************************/
8982
8983 #ifdef DEBUG
8984
8985 #undef RETURN // undef contracts RETURN macro
8986
8987 enum controlFlow_t
8988 {
8989     NEXT,
8990     CALL,
8991     RETURN,
8992     THROW,
8993     BRANCH,
8994     COND_BRANCH,
8995     BREAK,
8996     PHI,
8997     META,
8998 };
8999
9000 const static controlFlow_t controlFlow[] = {
9001 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9002 #include "opcode.def"
9003 #undef OPDEF
9004 };
9005
9006 #endif // DEBUG
9007
9008 /*****************************************************************************
9009  *  Determine the result type of an arithemetic operation
9010  *  On 64-bit inserts upcasts when native int is mixed with int32
9011  */
9012 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9013 {
9014     var_types  type = TYP_UNDEF;
9015     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9016
9017     // Arithemetic operations are generally only allowed with
9018     // primitive types, but certain operations are allowed
9019     // with byrefs
9020
9021     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9022     {
9023         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9024         {
9025             // byref1-byref2 => gives a native int
9026             type = TYP_I_IMPL;
9027         }
9028         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9029         {
9030             // [native] int - byref => gives a native int
9031
9032             //
9033             // The reason is that it is possible, in managed C++,
9034             // to have a tree like this:
9035             //
9036             //              -
9037             //             / \
9038             //            /   \
9039             //           /     \
9040             //          /       \
9041             // const(h) int     addr byref
9042             //
9043             // <BUGNUM> VSW 318822 </BUGNUM>
9044             //
9045             // So here we decide to make the resulting type to be a native int.
9046             CLANG_FORMAT_COMMENT_ANCHOR;
9047
9048 #ifdef _TARGET_64BIT_
9049             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9050             {
9051                 // insert an explicit upcast
9052                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9053             }
9054 #endif // _TARGET_64BIT_
9055
9056             type = TYP_I_IMPL;
9057         }
9058         else
9059         {
9060             // byref - [native] int => gives a byref
9061             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9062
9063 #ifdef _TARGET_64BIT_
9064             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9065             {
9066                 // insert an explicit upcast
9067                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9068             }
9069 #endif // _TARGET_64BIT_
9070
9071             type = TYP_BYREF;
9072         }
9073     }
9074     else if ((oper == GT_ADD) &&
9075              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9076     {
9077         // byref + [native] int => gives a byref
9078         // (or)
9079         // [native] int + byref => gives a byref
9080
9081         // only one can be a byref : byref op byref not allowed
9082         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9083         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9084
9085 #ifdef _TARGET_64BIT_
9086         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9087         {
9088             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9089             {
9090                 // insert an explicit upcast
9091                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9092             }
9093         }
9094         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9095         {
9096             // insert an explicit upcast
9097             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9098         }
9099 #endif // _TARGET_64BIT_
9100
9101         type = TYP_BYREF;
9102     }
9103 #ifdef _TARGET_64BIT_
9104     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9105     {
9106         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9107
9108         // int + long => gives long
9109         // long + int => gives long
9110         // we get this because in the IL the long isn't Int64, it's just IntPtr
9111
9112         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9113         {
9114             // insert an explicit upcast
9115             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9116         }
9117         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9118         {
9119             // insert an explicit upcast
9120             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9121         }
9122
9123         type = TYP_I_IMPL;
9124     }
9125 #else  // 32-bit TARGET
9126     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9127     {
9128         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9129
9130         // int + long => gives long
9131         // long + int => gives long
9132
9133         type = TYP_LONG;
9134     }
9135 #endif // _TARGET_64BIT_
9136     else
9137     {
9138         // int + int => gives an int
9139         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9140
9141         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9142                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9143
9144         type = genActualType(op1->gtType);
9145
9146 #if FEATURE_X87_DOUBLES
9147
9148         // For x87, since we only have 1 size of registers, prefer double
9149         // For everybody else, be more precise
9150         if (type == TYP_FLOAT)
9151             type = TYP_DOUBLE;
9152
9153 #else // !FEATURE_X87_DOUBLES
9154
9155         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9156         // Otherwise, turn floats into doubles
9157         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9158         {
9159             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9160             type = TYP_DOUBLE;
9161         }
9162
9163 #endif // FEATURE_X87_DOUBLES
9164     }
9165
9166 #if FEATURE_X87_DOUBLES
9167     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9168 #else  // FEATURE_X87_DOUBLES
9169     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9170 #endif // FEATURE_X87_DOUBLES
9171
9172     return type;
9173 }
9174
9175 /*****************************************************************************
9176  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9177  *
9178  * typeRef contains the token, op1 to contain the value being cast,
9179  * and op2 to contain code that creates the type handle corresponding to typeRef
9180  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9181  */
9182 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9183                                                 GenTreePtr              op2,
9184                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9185                                                 bool                    isCastClass)
9186 {
9187     bool expandInline;
9188
9189     assert(op1->TypeGet() == TYP_REF);
9190
9191     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9192
9193     if (isCastClass)
9194     {
9195         // We only want to expand inline the normal CHKCASTCLASS helper;
9196         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9197     }
9198     else
9199     {
9200         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9201         {
9202             // Get the Class Handle abd class attributes for the type we are casting to
9203             //
9204             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9205
9206             //
9207             // If the class handle is marked as final we can also expand the IsInst check inline
9208             //
9209             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9210
9211             //
9212             // But don't expand inline these two cases
9213             //
9214             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9215             {
9216                 expandInline = false;
9217             }
9218             else if (flags & CORINFO_FLG_CONTEXTFUL)
9219             {
9220                 expandInline = false;
9221             }
9222         }
9223         else
9224         {
9225             //
9226             // We can't expand inline any other helpers
9227             //
9228             expandInline = false;
9229         }
9230     }
9231
9232     if (expandInline)
9233     {
9234         if (compCurBB->isRunRarely())
9235         {
9236             expandInline = false; // not worth the code expansion in a rarely run block
9237         }
9238
9239         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9240         {
9241             expandInline = false; // not worth creating an untracked local variable
9242         }
9243     }
9244
9245     if (!expandInline)
9246     {
9247         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9248         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9249         //
9250         op2->gtFlags |= GTF_DONT_CSE;
9251
9252         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9253     }
9254
9255     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9256
9257     GenTreePtr temp;
9258     GenTreePtr condMT;
9259     //
9260     // expand the methodtable match:
9261     //
9262     //  condMT ==>   GT_NE
9263     //               /    \
9264     //           GT_IND   op2 (typically CNS_INT)
9265     //              |
9266     //           op1Copy
9267     //
9268
9269     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9270     //
9271     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9272     //
9273     // op1 is now known to be a non-complex tree
9274     // thus we can use gtClone(op1) from now on
9275     //
9276
9277     GenTreePtr op2Var = op2;
9278     if (isCastClass)
9279     {
9280         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9281         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9282     }
9283     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9284     temp->gtFlags |= GTF_EXCEPT;
9285     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9286
9287     GenTreePtr condNull;
9288     //
9289     // expand the null check:
9290     //
9291     //  condNull ==>   GT_EQ
9292     //                 /    \
9293     //             op1Copy CNS_INT
9294     //                      null
9295     //
9296     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9297
9298     //
9299     // expand the true and false trees for the condMT
9300     //
9301     GenTreePtr condFalse = gtClone(op1);
9302     GenTreePtr condTrue;
9303     if (isCastClass)
9304     {
9305         //
9306         // use the special helper that skips the cases checked by our inlined cast
9307         //
9308         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9309
9310         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9311     }
9312     else
9313     {
9314         condTrue = gtNewIconNode(0, TYP_REF);
9315     }
9316
9317 #define USE_QMARK_TREES
9318
9319 #ifdef USE_QMARK_TREES
9320     GenTreePtr qmarkMT;
9321     //
9322     // Generate first QMARK - COLON tree
9323     //
9324     //  qmarkMT ==>   GT_QMARK
9325     //                 /     \
9326     //            condMT   GT_COLON
9327     //                      /     \
9328     //                condFalse  condTrue
9329     //
9330     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9331     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9332     condMT->gtFlags |= GTF_RELOP_QMARK;
9333
9334     GenTreePtr qmarkNull;
9335     //
9336     // Generate second QMARK - COLON tree
9337     //
9338     //  qmarkNull ==>  GT_QMARK
9339     //                 /     \
9340     //           condNull  GT_COLON
9341     //                      /     \
9342     //                qmarkMT   op1Copy
9343     //
9344     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9345     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9346     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9347     condNull->gtFlags |= GTF_RELOP_QMARK;
9348
9349     // Make QMark node a top level node by spilling it.
9350     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9351     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9352     return gtNewLclvNode(tmp, TYP_REF);
9353 #endif
9354 }
9355
9356 #ifndef DEBUG
9357 #define assertImp(cond) ((void)0)
9358 #else
9359 #define assertImp(cond)                                                                                                \
9360     do                                                                                                                 \
9361     {                                                                                                                  \
9362         if (!(cond))                                                                                                   \
9363         {                                                                                                              \
9364             const int cchAssertImpBuf = 600;                                                                           \
9365             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9366             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9367                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9368                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9369                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9370             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9371         }                                                                                                              \
9372     } while (0)
9373 #endif // DEBUG
9374
9375 #ifdef _PREFAST_
9376 #pragma warning(push)
9377 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9378 #endif
9379 /*****************************************************************************
9380  *  Import the instr for the given basic block
9381  */
9382 void Compiler::impImportBlockCode(BasicBlock* block)
9383 {
9384 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9385
9386 #ifdef DEBUG
9387
9388     if (verbose)
9389     {
9390         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9391     }
9392 #endif
9393
9394     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9395     IL_OFFSET nxtStmtOffs;
9396
9397     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9398     bool                         expandInline;
9399     CorInfoHelpFunc              helper;
9400     CorInfoIsAccessAllowedResult accessAllowedResult;
9401     CORINFO_HELPER_DESC          calloutHelper;
9402     const BYTE*                  lastLoadToken = nullptr;
9403
9404     // reject cyclic constraints
9405     if (tiVerificationNeeded)
9406     {
9407         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9408         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9409     }
9410
9411     /* Get the tree list started */
9412
9413     impBeginTreeList();
9414
9415     /* Walk the opcodes that comprise the basic block */
9416
9417     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9418     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9419
9420     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9421     IL_OFFSET lastSpillOffs = opcodeOffs;
9422
9423     signed jmpDist;
9424
9425     /* remember the start of the delegate creation sequence (used for verification) */
9426     const BYTE* delegateCreateStart = nullptr;
9427
9428     int  prefixFlags = 0;
9429     bool explicitTailCall, constraintCall, readonlyCall;
9430
9431     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9432     typeInfo tiRetVal;
9433
9434     unsigned numArgs = info.compArgsCount;
9435
9436     /* Now process all the opcodes in the block */
9437
9438     var_types callTyp    = TYP_COUNT;
9439     OPCODE    prevOpcode = CEE_ILLEGAL;
9440
9441     if (block->bbCatchTyp)
9442     {
9443         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9444         {
9445             impCurStmtOffsSet(block->bbCodeOffs);
9446         }
9447
9448         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9449         // to a temp. This is a trade off for code simplicity
9450         impSpillSpecialSideEff();
9451     }
9452
9453     while (codeAddr < codeEndp)
9454     {
9455         bool                   usingReadyToRunHelper = false;
9456         CORINFO_RESOLVED_TOKEN resolvedToken;
9457         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9458         CORINFO_CALL_INFO      callInfo;
9459         CORINFO_FIELD_INFO     fieldInfo;
9460
9461         tiRetVal = typeInfo(); // Default type info
9462
9463         //---------------------------------------------------------------------
9464
9465         /* We need to restrict the max tree depth as many of the Compiler
9466            functions are recursive. We do this by spilling the stack */
9467
9468         if (verCurrentState.esStackDepth)
9469         {
9470             /* Has it been a while since we last saw a non-empty stack (which
9471                guarantees that the tree depth isnt accumulating. */
9472
9473             if ((opcodeOffs - lastSpillOffs) > 200)
9474             {
9475                 impSpillStackEnsure();
9476                 lastSpillOffs = opcodeOffs;
9477             }
9478         }
9479         else
9480         {
9481             lastSpillOffs   = opcodeOffs;
9482             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9483         }
9484
9485         /* Compute the current instr offset */
9486
9487         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9488
9489 #ifndef DEBUG
9490         if (opts.compDbgInfo)
9491 #endif
9492         {
9493             if (!compIsForInlining())
9494             {
9495                 nxtStmtOffs =
9496                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9497
9498                 /* Have we reached the next stmt boundary ? */
9499
9500                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9501                 {
9502                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9503
9504                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9505                     {
9506                         /* We need to provide accurate IP-mapping at this point.
9507                            So spill anything on the stack so that it will form
9508                            gtStmts with the correct stmt offset noted */
9509
9510                         impSpillStackEnsure(true);
9511                     }
9512
9513                     // Has impCurStmtOffs been reported in any tree?
9514
9515                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9516                     {
9517                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9518                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9519
9520                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9521                     }
9522
9523                     if (impCurStmtOffs == BAD_IL_OFFSET)
9524                     {
9525                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9526                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9527
9528                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9529                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9530                         {
9531                             nxtStmtIndex++;
9532                         }
9533
9534                         /* Go to the new stmt */
9535
9536                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9537
9538                         /* Update the stmt boundary index */
9539
9540                         nxtStmtIndex++;
9541                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9542
9543                         /* Are there any more line# entries after this one? */
9544
9545                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9546                         {
9547                             /* Remember where the next line# starts */
9548
9549                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9550                         }
9551                         else
9552                         {
9553                             /* No more line# entries */
9554
9555                             nxtStmtOffs = BAD_IL_OFFSET;
9556                         }
9557                     }
9558                 }
9559                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9560                          (verCurrentState.esStackDepth == 0))
9561                 {
9562                     /* At stack-empty locations, we have already added the tree to
9563                        the stmt list with the last offset. We just need to update
9564                        impCurStmtOffs
9565                      */
9566
9567                     impCurStmtOffsSet(opcodeOffs);
9568                 }
9569                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9570                          impOpcodeIsCallSiteBoundary(prevOpcode))
9571                 {
9572                     /* Make sure we have a type cached */
9573                     assert(callTyp != TYP_COUNT);
9574
9575                     if (callTyp == TYP_VOID)
9576                     {
9577                         impCurStmtOffsSet(opcodeOffs);
9578                     }
9579                     else if (opts.compDbgCode)
9580                     {
9581                         impSpillStackEnsure(true);
9582                         impCurStmtOffsSet(opcodeOffs);
9583                     }
9584                 }
9585                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9586                 {
9587                     if (opts.compDbgCode)
9588                     {
9589                         impSpillStackEnsure(true);
9590                     }
9591
9592                     impCurStmtOffsSet(opcodeOffs);
9593                 }
9594
9595                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9596                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9597             }
9598         }
9599
9600         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9601         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9602         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9603
9604         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9605         GenTreePtr      op1           = DUMMY_INIT(NULL);
9606         GenTreePtr      op2           = DUMMY_INIT(NULL);
9607         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9608         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9609         bool            uns           = DUMMY_INIT(false);
9610
9611         /* Get the next opcode and the size of its parameters */
9612
9613         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9614         codeAddr += sizeof(__int8);
9615
9616 #ifdef DEBUG
9617         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9618         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9619 #endif
9620
9621     DECODE_OPCODE:
9622
9623         // Return if any previous code has caused inline to fail.
9624         if (compDonotInline())
9625         {
9626             return;
9627         }
9628
9629         /* Get the size of additional parameters */
9630
9631         signed int sz = opcodeSizes[opcode];
9632
9633 #ifdef DEBUG
9634         clsHnd  = NO_CLASS_HANDLE;
9635         lclTyp  = TYP_COUNT;
9636         callTyp = TYP_COUNT;
9637
9638         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9639         impCurOpcName = opcodeNames[opcode];
9640
9641         if (verbose && (opcode != CEE_PREFIX1))
9642         {
9643             printf("%s", impCurOpcName);
9644         }
9645
9646         /* Use assertImp() to display the opcode */
9647
9648         op1 = op2 = nullptr;
9649 #endif
9650
9651         /* See what kind of an opcode we have, then */
9652
9653         unsigned mflags   = 0;
9654         unsigned clsFlags = 0;
9655
9656         switch (opcode)
9657         {
9658             unsigned  lclNum;
9659             var_types type;
9660
9661             GenTreePtr op3;
9662             genTreeOps oper;
9663             unsigned   size;
9664
9665             int val;
9666
9667             CORINFO_SIG_INFO     sig;
9668             unsigned             flags;
9669             IL_OFFSET            jmpAddr;
9670             bool                 ovfl, unordered, callNode;
9671             bool                 ldstruct;
9672             CORINFO_CLASS_HANDLE tokenType;
9673
9674             union {
9675                 int     intVal;
9676                 float   fltVal;
9677                 __int64 lngVal;
9678                 double  dblVal;
9679             } cval;
9680
9681             case CEE_PREFIX1:
9682                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9683                 codeAddr += sizeof(__int8);
9684                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9685                 goto DECODE_OPCODE;
9686
9687             SPILL_APPEND:
9688
9689                 // We need to call impSpillLclRefs() for a struct type lclVar.
9690                 // This is done for non-block assignments in the handling of stloc.
9691                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9692                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9693                 {
9694                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9695                 }
9696
9697                 /* Append 'op1' to the list of statements */
9698                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9699                 goto DONE_APPEND;
9700
9701             APPEND:
9702
9703                 /* Append 'op1' to the list of statements */
9704
9705                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9706                 goto DONE_APPEND;
9707
9708             DONE_APPEND:
9709
9710 #ifdef DEBUG
9711                 // Remember at which BC offset the tree was finished
9712                 impNoteLastILoffs();
9713 #endif
9714                 break;
9715
9716             case CEE_LDNULL:
9717                 impPushNullObjRefOnStack();
9718                 break;
9719
9720             case CEE_LDC_I4_M1:
9721             case CEE_LDC_I4_0:
9722             case CEE_LDC_I4_1:
9723             case CEE_LDC_I4_2:
9724             case CEE_LDC_I4_3:
9725             case CEE_LDC_I4_4:
9726             case CEE_LDC_I4_5:
9727             case CEE_LDC_I4_6:
9728             case CEE_LDC_I4_7:
9729             case CEE_LDC_I4_8:
9730                 cval.intVal = (opcode - CEE_LDC_I4_0);
9731                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9732                 goto PUSH_I4CON;
9733
9734             case CEE_LDC_I4_S:
9735                 cval.intVal = getI1LittleEndian(codeAddr);
9736                 goto PUSH_I4CON;
9737             case CEE_LDC_I4:
9738                 cval.intVal = getI4LittleEndian(codeAddr);
9739                 goto PUSH_I4CON;
9740             PUSH_I4CON:
9741                 JITDUMP(" %d", cval.intVal);
9742                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9743                 break;
9744
9745             case CEE_LDC_I8:
9746                 cval.lngVal = getI8LittleEndian(codeAddr);
9747                 JITDUMP(" 0x%016llx", cval.lngVal);
9748                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9749                 break;
9750
9751             case CEE_LDC_R8:
9752                 cval.dblVal = getR8LittleEndian(codeAddr);
9753                 JITDUMP(" %#.17g", cval.dblVal);
9754                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9755                 break;
9756
9757             case CEE_LDC_R4:
9758                 cval.dblVal = getR4LittleEndian(codeAddr);
9759                 JITDUMP(" %#.17g", cval.dblVal);
9760                 {
9761                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9762 #if !FEATURE_X87_DOUBLES
9763                     // X87 stack doesn't differentiate between float/double
9764                     // so R4 is treated as R8, but everybody else does
9765                     cnsOp->gtType = TYP_FLOAT;
9766 #endif // FEATURE_X87_DOUBLES
9767                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9768                 }
9769                 break;
9770
9771             case CEE_LDSTR:
9772
9773                 if (compIsForInlining())
9774                 {
9775                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9776                     {
9777                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9778                         return;
9779                     }
9780                 }
9781
9782                 val = getU4LittleEndian(codeAddr);
9783                 JITDUMP(" %08X", val);
9784                 if (tiVerificationNeeded)
9785                 {
9786                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9787                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9788                 }
9789                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9790
9791                 break;
9792
9793             case CEE_LDARG:
9794                 lclNum = getU2LittleEndian(codeAddr);
9795                 JITDUMP(" %u", lclNum);
9796                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9797                 break;
9798
9799             case CEE_LDARG_S:
9800                 lclNum = getU1LittleEndian(codeAddr);
9801                 JITDUMP(" %u", lclNum);
9802                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9803                 break;
9804
9805             case CEE_LDARG_0:
9806             case CEE_LDARG_1:
9807             case CEE_LDARG_2:
9808             case CEE_LDARG_3:
9809                 lclNum = (opcode - CEE_LDARG_0);
9810                 assert(lclNum >= 0 && lclNum < 4);
9811                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9812                 break;
9813
9814             case CEE_LDLOC:
9815                 lclNum = getU2LittleEndian(codeAddr);
9816                 JITDUMP(" %u", lclNum);
9817                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9818                 break;
9819
9820             case CEE_LDLOC_S:
9821                 lclNum = getU1LittleEndian(codeAddr);
9822                 JITDUMP(" %u", lclNum);
9823                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9824                 break;
9825
9826             case CEE_LDLOC_0:
9827             case CEE_LDLOC_1:
9828             case CEE_LDLOC_2:
9829             case CEE_LDLOC_3:
9830                 lclNum = (opcode - CEE_LDLOC_0);
9831                 assert(lclNum >= 0 && lclNum < 4);
9832                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9833                 break;
9834
9835             case CEE_STARG:
9836                 lclNum = getU2LittleEndian(codeAddr);
9837                 goto STARG;
9838
9839             case CEE_STARG_S:
9840                 lclNum = getU1LittleEndian(codeAddr);
9841             STARG:
9842                 JITDUMP(" %u", lclNum);
9843
9844                 if (tiVerificationNeeded)
9845                 {
9846                     Verify(lclNum < info.compILargsCount, "bad arg num");
9847                 }
9848
9849                 if (compIsForInlining())
9850                 {
9851                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9852                     noway_assert(op1->gtOper == GT_LCL_VAR);
9853                     lclNum = op1->AsLclVar()->gtLclNum;
9854
9855                     goto VAR_ST_VALID;
9856                 }
9857
9858                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9859                 assertImp(lclNum < numArgs);
9860
9861                 if (lclNum == info.compThisArg)
9862                 {
9863                     lclNum = lvaArg0Var;
9864                 }
9865                 lvaTable[lclNum].lvArgWrite = 1;
9866
9867                 if (tiVerificationNeeded)
9868                 {
9869                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9870                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9871                            "type mismatch");
9872
9873                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9874                     {
9875                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9876                     }
9877                 }
9878
9879                 goto VAR_ST;
9880
9881             case CEE_STLOC:
9882                 lclNum = getU2LittleEndian(codeAddr);
9883                 JITDUMP(" %u", lclNum);
9884                 goto LOC_ST;
9885
9886             case CEE_STLOC_S:
9887                 lclNum = getU1LittleEndian(codeAddr);
9888                 JITDUMP(" %u", lclNum);
9889                 goto LOC_ST;
9890
9891             case CEE_STLOC_0:
9892             case CEE_STLOC_1:
9893             case CEE_STLOC_2:
9894             case CEE_STLOC_3:
9895                 lclNum = (opcode - CEE_STLOC_0);
9896                 assert(lclNum >= 0 && lclNum < 4);
9897
9898             LOC_ST:
9899                 if (tiVerificationNeeded)
9900                 {
9901                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9902                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9903                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9904                            "type mismatch");
9905                 }
9906
9907                 if (compIsForInlining())
9908                 {
9909                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9910
9911                     /* Have we allocated a temp for this local? */
9912
9913                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9914
9915                     goto _PopValue;
9916                 }
9917
9918                 lclNum += numArgs;
9919
9920             VAR_ST:
9921
9922                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9923                 {
9924                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9925                     BADCODE("Bad IL");
9926                 }
9927
9928             VAR_ST_VALID:
9929
9930                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9931                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9932
9933                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9934                 {
9935                     lclTyp = lvaGetRealType(lclNum);
9936                 }
9937                 else
9938                 {
9939                     lclTyp = lvaGetActualType(lclNum);
9940                 }
9941
9942             _PopValue:
9943                 /* Pop the value being assigned */
9944
9945                 {
9946                     StackEntry se = impPopStack(clsHnd);
9947                     op1           = se.val;
9948                     tiRetVal      = se.seTypeInfo;
9949                 }
9950
9951 #ifdef FEATURE_SIMD
9952                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9953                 {
9954                     assert(op1->TypeGet() == TYP_STRUCT);
9955                     op1->gtType = lclTyp;
9956                 }
9957 #endif // FEATURE_SIMD
9958
9959                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9960
9961 #ifdef _TARGET_64BIT_
9962                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9963                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9964                 {
9965                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9966                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9967                 }
9968 #endif // _TARGET_64BIT_
9969
9970                 // We had better assign it a value of the correct type
9971                 assertImp(
9972                     genActualType(lclTyp) == genActualType(op1->gtType) ||
9973                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9974                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9975                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9976                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9977                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9978
9979                 /* If op1 is "&var" then its type is the transient "*" and it can
9980                    be used either as TYP_BYREF or TYP_I_IMPL */
9981
9982                 if (op1->IsVarAddr())
9983                 {
9984                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9985
9986                     /* When "&var" is created, we assume it is a byref. If it is
9987                        being assigned to a TYP_I_IMPL var, change the type to
9988                        prevent unnecessary GC info */
9989
9990                     if (genActualType(lclTyp) == TYP_I_IMPL)
9991                     {
9992                         op1->gtType = TYP_I_IMPL;
9993                     }
9994                 }
9995
9996                 /* Filter out simple assignments to itself */
9997
9998                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9999                 {
10000                     if (insertLdloc)
10001                     {
10002                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
10003                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
10004                         CLANG_FORMAT_COMMENT_ANCHOR;
10005
10006 #ifdef DEBUG
10007                         if (tiVerificationNeeded)
10008                         {
10009                             assert(
10010                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10011                         }
10012 #endif
10013
10014                         op1         = nullptr;
10015                         insertLdloc = false;
10016
10017                         impLoadVar(lclNum, opcodeOffs + sz + 1);
10018                         break;
10019                     }
10020                     else if (opts.compDbgCode)
10021                     {
10022                         op1 = gtNewNothingNode();
10023                         goto SPILL_APPEND;
10024                     }
10025                     else
10026                     {
10027                         break;
10028                     }
10029                 }
10030
10031                 /* Create the assignment node */
10032
10033                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10034
10035                 /* If the local is aliased, we need to spill calls and
10036                    indirections from the stack. */
10037
10038                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10039                     verCurrentState.esStackDepth > 0)
10040                 {
10041                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10042                 }
10043
10044                 /* Spill any refs to the local from the stack */
10045
10046                 impSpillLclRefs(lclNum);
10047
10048 #if !FEATURE_X87_DOUBLES
10049                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10050                 // We insert a cast to the dest 'op2' type
10051                 //
10052                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10053                     varTypeIsFloating(op2->gtType))
10054                 {
10055                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10056                 }
10057 #endif // !FEATURE_X87_DOUBLES
10058
10059                 if (varTypeIsStruct(lclTyp))
10060                 {
10061                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10062                 }
10063                 else
10064                 {
10065                     // The code generator generates GC tracking information
10066                     // based on the RHS of the assignment.  Later the LHS (which is
10067                     // is a BYREF) gets used and the emitter checks that that variable
10068                     // is being tracked.  It is not (since the RHS was an int and did
10069                     // not need tracking).  To keep this assert happy, we change the RHS
10070                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10071                     {
10072                         op1->gtType = TYP_BYREF;
10073                     }
10074                     op1 = gtNewAssignNode(op2, op1);
10075                 }
10076
10077                 /* If insertLdloc is true, then we need to insert a ldloc following the
10078                    stloc.  This is done when converting a (dup, stloc) sequence into
10079                    a (stloc, ldloc) sequence. */
10080
10081                 if (insertLdloc)
10082                 {
10083                     // From SPILL_APPEND
10084                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10085
10086 #ifdef DEBUG
10087                     // From DONE_APPEND
10088                     impNoteLastILoffs();
10089 #endif
10090                     op1         = nullptr;
10091                     insertLdloc = false;
10092
10093                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10094                     break;
10095                 }
10096
10097                 goto SPILL_APPEND;
10098
10099             case CEE_LDLOCA:
10100                 lclNum = getU2LittleEndian(codeAddr);
10101                 goto LDLOCA;
10102
10103             case CEE_LDLOCA_S:
10104                 lclNum = getU1LittleEndian(codeAddr);
10105             LDLOCA:
10106                 JITDUMP(" %u", lclNum);
10107                 if (tiVerificationNeeded)
10108                 {
10109                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10110                     Verify(info.compInitMem, "initLocals not set");
10111                 }
10112
10113                 if (compIsForInlining())
10114                 {
10115                     // Get the local type
10116                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10117
10118                     /* Have we allocated a temp for this local? */
10119
10120                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10121
10122                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10123
10124                     goto _PUSH_ADRVAR;
10125                 }
10126
10127                 lclNum += numArgs;
10128                 assertImp(lclNum < info.compLocalsCount);
10129                 goto ADRVAR;
10130
10131             case CEE_LDARGA:
10132                 lclNum = getU2LittleEndian(codeAddr);
10133                 goto LDARGA;
10134
10135             case CEE_LDARGA_S:
10136                 lclNum = getU1LittleEndian(codeAddr);
10137             LDARGA:
10138                 JITDUMP(" %u", lclNum);
10139                 Verify(lclNum < info.compILargsCount, "bad arg num");
10140
10141                 if (compIsForInlining())
10142                 {
10143                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10144                     // followed by a ldfld to load the field.
10145
10146                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10147                     if (op1->gtOper != GT_LCL_VAR)
10148                     {
10149                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10150                         return;
10151                     }
10152
10153                     assert(op1->gtOper == GT_LCL_VAR);
10154
10155                     goto _PUSH_ADRVAR;
10156                 }
10157
10158                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10159                 assertImp(lclNum < numArgs);
10160
10161                 if (lclNum == info.compThisArg)
10162                 {
10163                     lclNum = lvaArg0Var;
10164                 }
10165
10166                 goto ADRVAR;
10167
10168             ADRVAR:
10169
10170                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10171
10172             _PUSH_ADRVAR:
10173                 assert(op1->gtOper == GT_LCL_VAR);
10174
10175                 /* Note that this is supposed to create the transient type "*"
10176                    which may be used as a TYP_I_IMPL. However we catch places
10177                    where it is used as a TYP_I_IMPL and change the node if needed.
10178                    Thus we are pessimistic and may report byrefs in the GC info
10179                    where it was not absolutely needed, but it is safer this way.
10180                  */
10181                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10182
10183                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10184                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10185
10186                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10187                 if (tiVerificationNeeded)
10188                 {
10189                     // Don't allow taking address of uninit this ptr.
10190                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10191                     {
10192                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10193                     }
10194
10195                     if (!tiRetVal.IsByRef())
10196                     {
10197                         tiRetVal.MakeByRef();
10198                     }
10199                     else
10200                     {
10201                         Verify(false, "byref to byref");
10202                     }
10203                 }
10204
10205                 impPushOnStack(op1, tiRetVal);
10206                 break;
10207
10208             case CEE_ARGLIST:
10209
10210                 if (!info.compIsVarArgs)
10211                 {
10212                     BADCODE("arglist in non-vararg method");
10213                 }
10214
10215                 if (tiVerificationNeeded)
10216                 {
10217                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10218                 }
10219                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10220
10221                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10222                    adjusted the arg count cos this is like fetching the last param */
10223                 assertImp(0 < numArgs);
10224                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10225                 lclNum = lvaVarargsHandleArg;
10226                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10227                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10228                 impPushOnStack(op1, tiRetVal);
10229                 break;
10230
10231             case CEE_ENDFINALLY:
10232
10233                 if (compIsForInlining())
10234                 {
10235                     assert(!"Shouldn't have exception handlers in the inliner!");
10236                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10237                     return;
10238                 }
10239
10240                 if (verCurrentState.esStackDepth > 0)
10241                 {
10242                     impEvalSideEffects();
10243                 }
10244
10245                 if (info.compXcptnsCount == 0)
10246                 {
10247                     BADCODE("endfinally outside finally");
10248                 }
10249
10250                 assert(verCurrentState.esStackDepth == 0);
10251
10252                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10253                 goto APPEND;
10254
10255             case CEE_ENDFILTER:
10256
10257                 if (compIsForInlining())
10258                 {
10259                     assert(!"Shouldn't have exception handlers in the inliner!");
10260                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10261                     return;
10262                 }
10263
10264                 block->bbSetRunRarely(); // filters are rare
10265
10266                 if (info.compXcptnsCount == 0)
10267                 {
10268                     BADCODE("endfilter outside filter");
10269                 }
10270
10271                 if (tiVerificationNeeded)
10272                 {
10273                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10274                 }
10275
10276                 op1 = impPopStack().val;
10277                 assertImp(op1->gtType == TYP_INT);
10278                 if (!bbInFilterILRange(block))
10279                 {
10280                     BADCODE("EndFilter outside a filter handler");
10281                 }
10282
10283                 /* Mark current bb as end of filter */
10284
10285                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10286                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10287
10288                 /* Mark catch handler as successor */
10289
10290                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10291                 if (verCurrentState.esStackDepth != 0)
10292                 {
10293                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10294                                                 DEBUGARG(__LINE__));
10295                 }
10296                 goto APPEND;
10297
10298             case CEE_RET:
10299                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10300             RET:
10301                 if (!impReturnInstruction(block, prefixFlags, opcode))
10302                 {
10303                     return; // abort
10304                 }
10305                 else
10306                 {
10307                     break;
10308                 }
10309
10310             case CEE_JMP:
10311
10312                 assert(!compIsForInlining());
10313
10314                 if (tiVerificationNeeded)
10315                 {
10316                     Verify(false, "Invalid opcode: CEE_JMP");
10317                 }
10318
10319                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10320                 {
10321                     /* CEE_JMP does not make sense in some "protected" regions. */
10322
10323                     BADCODE("Jmp not allowed in protected region");
10324                 }
10325
10326                 if (verCurrentState.esStackDepth != 0)
10327                 {
10328                     BADCODE("Stack must be empty after CEE_JMPs");
10329                 }
10330
10331                 _impResolveToken(CORINFO_TOKENKIND_Method);
10332
10333                 JITDUMP(" %08X", resolvedToken.token);
10334
10335                 /* The signature of the target has to be identical to ours.
10336                    At least check that argCnt and returnType match */
10337
10338                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10339                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10340                     sig.retType != info.compMethodInfo->args.retType ||
10341                     sig.callConv != info.compMethodInfo->args.callConv)
10342                 {
10343                     BADCODE("Incompatible target for CEE_JMPs");
10344                 }
10345
10346 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10347
10348                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10349
10350                 /* Mark the basic block as being a JUMP instead of RETURN */
10351
10352                 block->bbFlags |= BBF_HAS_JMP;
10353
10354                 /* Set this flag to make sure register arguments have a location assigned
10355                  * even if we don't use them inside the method */
10356
10357                 compJmpOpUsed = true;
10358
10359                 fgNoStructPromotion = true;
10360
10361                 goto APPEND;
10362
10363 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10364
10365                 // Import this just like a series of LDARGs + tail. + call + ret
10366
10367                 if (info.compIsVarArgs)
10368                 {
10369                     // For now we don't implement true tail calls, so this breaks varargs.
10370                     // So warn the user instead of generating bad code.
10371                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10372                     // implement true tail calls.
10373                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10374                 }
10375
10376                 // First load up the arguments (0 - N)
10377                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10378                 {
10379                     impLoadArg(argNum, opcodeOffs + sz + 1);
10380                 }
10381
10382                 // Now generate the tail call
10383                 noway_assert(prefixFlags == 0);
10384                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10385                 opcode      = CEE_CALL;
10386
10387                 eeGetCallInfo(&resolvedToken, NULL,
10388                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10389
10390                 // All calls and delegates need a security callout.
10391                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10392
10393                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10394                                         opcodeOffs);
10395
10396                 // And finish with the ret
10397                 goto RET;
10398
10399 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10400
10401             case CEE_LDELEMA:
10402                 assertImp(sz == sizeof(unsigned));
10403
10404                 _impResolveToken(CORINFO_TOKENKIND_Class);
10405
10406                 JITDUMP(" %08X", resolvedToken.token);
10407
10408                 ldelemClsHnd = resolvedToken.hClass;
10409
10410                 if (tiVerificationNeeded)
10411                 {
10412                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10413                     typeInfo tiIndex = impStackTop().seTypeInfo;
10414
10415                     // As per ECMA 'index' specified can be either int32 or native int.
10416                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10417
10418                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10419                     Verify(tiArray.IsNullObjRef() ||
10420                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10421                            "bad array");
10422
10423                     tiRetVal = arrayElemType;
10424                     tiRetVal.MakeByRef();
10425                     if (prefixFlags & PREFIX_READONLY)
10426                     {
10427                         tiRetVal.SetIsReadonlyByRef();
10428                     }
10429
10430                     // an array interior pointer is always in the heap
10431                     tiRetVal.SetIsPermanentHomeByRef();
10432                 }
10433
10434                 // If it's a value class array we just do a simple address-of
10435                 if (eeIsValueClass(ldelemClsHnd))
10436                 {
10437                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10438                     if (cit == CORINFO_TYPE_UNDEF)
10439                     {
10440                         lclTyp = TYP_STRUCT;
10441                     }
10442                     else
10443                     {
10444                         lclTyp = JITtype2varType(cit);
10445                     }
10446                     goto ARR_LD_POST_VERIFY;
10447                 }
10448
10449                 // Similarly, if its a readonly access, we can do a simple address-of
10450                 // without doing a runtime type-check
10451                 if (prefixFlags & PREFIX_READONLY)
10452                 {
10453                     lclTyp = TYP_REF;
10454                     goto ARR_LD_POST_VERIFY;
10455                 }
10456
10457                 // Otherwise we need the full helper function with run-time type check
10458                 op1 = impTokenToHandle(&resolvedToken);
10459                 if (op1 == nullptr)
10460                 { // compDonotInline()
10461                     return;
10462                 }
10463
10464                 args = gtNewArgList(op1);                      // Type
10465                 args = gtNewListNode(impPopStack().val, args); // index
10466                 args = gtNewListNode(impPopStack().val, args); // array
10467                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10468
10469                 impPushOnStack(op1, tiRetVal);
10470                 break;
10471
10472             // ldelem for reference and value types
10473             case CEE_LDELEM:
10474                 assertImp(sz == sizeof(unsigned));
10475
10476                 _impResolveToken(CORINFO_TOKENKIND_Class);
10477
10478                 JITDUMP(" %08X", resolvedToken.token);
10479
10480                 ldelemClsHnd = resolvedToken.hClass;
10481
10482                 if (tiVerificationNeeded)
10483                 {
10484                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10485                     typeInfo tiIndex = impStackTop().seTypeInfo;
10486
10487                     // As per ECMA 'index' specified can be either int32 or native int.
10488                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10489                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10490
10491                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10492                            "type of array incompatible with type operand");
10493                     tiRetVal.NormaliseForStack();
10494                 }
10495
10496                 // If it's a reference type or generic variable type
10497                 // then just generate code as though it's a ldelem.ref instruction
10498                 if (!eeIsValueClass(ldelemClsHnd))
10499                 {
10500                     lclTyp = TYP_REF;
10501                     opcode = CEE_LDELEM_REF;
10502                 }
10503                 else
10504                 {
10505                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10506                     lclTyp             = JITtype2varType(jitTyp);
10507                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10508                     tiRetVal.NormaliseForStack();
10509                 }
10510                 goto ARR_LD_POST_VERIFY;
10511
10512             case CEE_LDELEM_I1:
10513                 lclTyp = TYP_BYTE;
10514                 goto ARR_LD;
10515             case CEE_LDELEM_I2:
10516                 lclTyp = TYP_SHORT;
10517                 goto ARR_LD;
10518             case CEE_LDELEM_I:
10519                 lclTyp = TYP_I_IMPL;
10520                 goto ARR_LD;
10521
10522             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10523             // and treating it as TYP_INT avoids other asserts.
10524             case CEE_LDELEM_U4:
10525                 lclTyp = TYP_INT;
10526                 goto ARR_LD;
10527
10528             case CEE_LDELEM_I4:
10529                 lclTyp = TYP_INT;
10530                 goto ARR_LD;
10531             case CEE_LDELEM_I8:
10532                 lclTyp = TYP_LONG;
10533                 goto ARR_LD;
10534             case CEE_LDELEM_REF:
10535                 lclTyp = TYP_REF;
10536                 goto ARR_LD;
10537             case CEE_LDELEM_R4:
10538                 lclTyp = TYP_FLOAT;
10539                 goto ARR_LD;
10540             case CEE_LDELEM_R8:
10541                 lclTyp = TYP_DOUBLE;
10542                 goto ARR_LD;
10543             case CEE_LDELEM_U1:
10544                 lclTyp = TYP_UBYTE;
10545                 goto ARR_LD;
10546             case CEE_LDELEM_U2:
10547                 lclTyp = TYP_CHAR;
10548                 goto ARR_LD;
10549
10550             ARR_LD:
10551
10552                 if (tiVerificationNeeded)
10553                 {
10554                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10555                     typeInfo tiIndex = impStackTop().seTypeInfo;
10556
10557                     // As per ECMA 'index' specified can be either int32 or native int.
10558                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10559                     if (tiArray.IsNullObjRef())
10560                     {
10561                         if (lclTyp == TYP_REF)
10562                         { // we will say a deref of a null array yields a null ref
10563                             tiRetVal = typeInfo(TI_NULL);
10564                         }
10565                         else
10566                         {
10567                             tiRetVal = typeInfo(lclTyp);
10568                         }
10569                     }
10570                     else
10571                     {
10572                         tiRetVal             = verGetArrayElemType(tiArray);
10573                         typeInfo arrayElemTi = typeInfo(lclTyp);
10574 #ifdef _TARGET_64BIT_
10575                         if (opcode == CEE_LDELEM_I)
10576                         {
10577                             arrayElemTi = typeInfo::nativeInt();
10578                         }
10579
10580                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10581                         {
10582                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10583                         }
10584                         else
10585 #endif // _TARGET_64BIT_
10586                         {
10587                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10588                         }
10589                     }
10590                     tiRetVal.NormaliseForStack();
10591                 }
10592             ARR_LD_POST_VERIFY:
10593
10594                 /* Pull the index value and array address */
10595                 op2 = impPopStack().val;
10596                 op1 = impPopStack().val;
10597                 assertImp(op1->gtType == TYP_REF);
10598
10599                 /* Check for null pointer - in the inliner case we simply abort */
10600
10601                 if (compIsForInlining())
10602                 {
10603                     if (op1->gtOper == GT_CNS_INT)
10604                     {
10605                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10606                         return;
10607                     }
10608                 }
10609
10610                 op1 = impCheckForNullPointer(op1);
10611
10612                 /* Mark the block as containing an index expression */
10613
10614                 if (op1->gtOper == GT_LCL_VAR)
10615                 {
10616                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10617                     {
10618                         block->bbFlags |= BBF_HAS_IDX_LEN;
10619                         optMethodFlags |= OMF_HAS_ARRAYREF;
10620                     }
10621                 }
10622
10623                 /* Create the index node and push it on the stack */
10624
10625                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10626
10627                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10628
10629                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10630                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10631                 {
10632                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10633
10634                     // remember the element size
10635                     if (lclTyp == TYP_REF)
10636                     {
10637                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10638                     }
10639                     else
10640                     {
10641                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10642                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10643                         {
10644                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10645                         }
10646                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10647                         if (lclTyp == TYP_STRUCT)
10648                         {
10649                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10650                             op1->gtIndex.gtIndElemSize = size;
10651                             op1->gtType                = lclTyp;
10652                         }
10653                     }
10654
10655                     if ((opcode == CEE_LDELEMA) || ldstruct)
10656                     {
10657                         // wrap it in a &
10658                         lclTyp = TYP_BYREF;
10659
10660                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10661                     }
10662                     else
10663                     {
10664                         assert(lclTyp != TYP_STRUCT);
10665                     }
10666                 }
10667
10668                 if (ldstruct)
10669                 {
10670                     // Create an OBJ for the result
10671                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10672                     op1->gtFlags |= GTF_EXCEPT;
10673                 }
10674                 impPushOnStack(op1, tiRetVal);
10675                 break;
10676
10677             // stelem for reference and value types
10678             case CEE_STELEM:
10679
10680                 assertImp(sz == sizeof(unsigned));
10681
10682                 _impResolveToken(CORINFO_TOKENKIND_Class);
10683
10684                 JITDUMP(" %08X", resolvedToken.token);
10685
10686                 stelemClsHnd = resolvedToken.hClass;
10687
10688                 if (tiVerificationNeeded)
10689                 {
10690                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10691                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10692                     typeInfo tiValue = impStackTop().seTypeInfo;
10693
10694                     // As per ECMA 'index' specified can be either int32 or native int.
10695                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10696                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10697
10698                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10699                            "type operand incompatible with array element type");
10700                     arrayElem.NormaliseForStack();
10701                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10702                 }
10703
10704                 // If it's a reference type just behave as though it's a stelem.ref instruction
10705                 if (!eeIsValueClass(stelemClsHnd))
10706                 {
10707                     goto STELEM_REF_POST_VERIFY;
10708                 }
10709
10710                 // Otherwise extract the type
10711                 {
10712                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10713                     lclTyp             = JITtype2varType(jitTyp);
10714                     goto ARR_ST_POST_VERIFY;
10715                 }
10716
10717             case CEE_STELEM_REF:
10718
10719                 if (tiVerificationNeeded)
10720                 {
10721                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10722                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10723                     typeInfo tiValue = impStackTop().seTypeInfo;
10724
10725                     // As per ECMA 'index' specified can be either int32 or native int.
10726                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10727                     Verify(tiValue.IsObjRef(), "bad value");
10728
10729                     // we only check that it is an object referece, The helper does additional checks
10730                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10731                 }
10732
10733                 arrayNodeTo      = impStackTop(2).val;
10734                 arrayNodeToIndex = impStackTop(1).val;
10735                 arrayNodeFrom    = impStackTop().val;
10736
10737                 //
10738                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10739                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10740                 //
10741
10742                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10743                 // This does not need CORINFO_HELP_ARRADDR_ST
10744
10745                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10746                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10747                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10748                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10749                 {
10750                     lclTyp = TYP_REF;
10751                     goto ARR_ST_POST_VERIFY;
10752                 }
10753
10754                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10755
10756                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10757                 {
10758                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10759
10760                     lclTyp = TYP_REF;
10761                     goto ARR_ST_POST_VERIFY;
10762                 }
10763
10764             STELEM_REF_POST_VERIFY:
10765
10766                 /* Call a helper function to do the assignment */
10767                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10768
10769                 goto SPILL_APPEND;
10770
10771             case CEE_STELEM_I1:
10772                 lclTyp = TYP_BYTE;
10773                 goto ARR_ST;
10774             case CEE_STELEM_I2:
10775                 lclTyp = TYP_SHORT;
10776                 goto ARR_ST;
10777             case CEE_STELEM_I:
10778                 lclTyp = TYP_I_IMPL;
10779                 goto ARR_ST;
10780             case CEE_STELEM_I4:
10781                 lclTyp = TYP_INT;
10782                 goto ARR_ST;
10783             case CEE_STELEM_I8:
10784                 lclTyp = TYP_LONG;
10785                 goto ARR_ST;
10786             case CEE_STELEM_R4:
10787                 lclTyp = TYP_FLOAT;
10788                 goto ARR_ST;
10789             case CEE_STELEM_R8:
10790                 lclTyp = TYP_DOUBLE;
10791                 goto ARR_ST;
10792
10793             ARR_ST:
10794
10795                 if (tiVerificationNeeded)
10796                 {
10797                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10798                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10799                     typeInfo tiValue = impStackTop().seTypeInfo;
10800
10801                     // As per ECMA 'index' specified can be either int32 or native int.
10802                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10803                     typeInfo arrayElem = typeInfo(lclTyp);
10804 #ifdef _TARGET_64BIT_
10805                     if (opcode == CEE_STELEM_I)
10806                     {
10807                         arrayElem = typeInfo::nativeInt();
10808                     }
10809 #endif // _TARGET_64BIT_
10810                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10811                            "bad array");
10812
10813                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10814                            "bad value");
10815                 }
10816
10817             ARR_ST_POST_VERIFY:
10818                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10819                    range-check, and then assignment. However, codegen currently
10820                    does the range-check before evaluation the RHS-operands. So to
10821                    maintain strict ordering, we spill the stack. */
10822
10823                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10824                 {
10825                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10826                                                    "Strict ordering of exceptions for Array store"));
10827                 }
10828
10829                 /* Pull the new value from the stack */
10830                 op2 = impPopStack().val;
10831
10832                 /* Pull the index value */
10833                 op1 = impPopStack().val;
10834
10835                 /* Pull the array address */
10836                 op3 = impPopStack().val;
10837
10838                 assertImp(op3->gtType == TYP_REF);
10839                 if (op2->IsVarAddr())
10840                 {
10841                     op2->gtType = TYP_I_IMPL;
10842                 }
10843
10844                 op3 = impCheckForNullPointer(op3);
10845
10846                 // Mark the block as containing an index expression
10847
10848                 if (op3->gtOper == GT_LCL_VAR)
10849                 {
10850                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10851                     {
10852                         block->bbFlags |= BBF_HAS_IDX_LEN;
10853                         optMethodFlags |= OMF_HAS_ARRAYREF;
10854                     }
10855                 }
10856
10857                 /* Create the index node */
10858
10859                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10860
10861                 /* Create the assignment node and append it */
10862
10863                 if (lclTyp == TYP_STRUCT)
10864                 {
10865                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10866
10867                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10868                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10869                 }
10870                 if (varTypeIsStruct(op1))
10871                 {
10872                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10873                 }
10874                 else
10875                 {
10876                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10877                     op1 = gtNewAssignNode(op1, op2);
10878                 }
10879
10880                 /* Mark the expression as containing an assignment */
10881
10882                 op1->gtFlags |= GTF_ASG;
10883
10884                 goto SPILL_APPEND;
10885
10886             case CEE_ADD:
10887                 oper = GT_ADD;
10888                 goto MATH_OP2;
10889
10890             case CEE_ADD_OVF:
10891                 uns = false;
10892                 goto ADD_OVF;
10893             case CEE_ADD_OVF_UN:
10894                 uns = true;
10895                 goto ADD_OVF;
10896
10897             ADD_OVF:
10898                 ovfl     = true;
10899                 callNode = false;
10900                 oper     = GT_ADD;
10901                 goto MATH_OP2_FLAGS;
10902
10903             case CEE_SUB:
10904                 oper = GT_SUB;
10905                 goto MATH_OP2;
10906
10907             case CEE_SUB_OVF:
10908                 uns = false;
10909                 goto SUB_OVF;
10910             case CEE_SUB_OVF_UN:
10911                 uns = true;
10912                 goto SUB_OVF;
10913
10914             SUB_OVF:
10915                 ovfl     = true;
10916                 callNode = false;
10917                 oper     = GT_SUB;
10918                 goto MATH_OP2_FLAGS;
10919
10920             case CEE_MUL:
10921                 oper = GT_MUL;
10922                 goto MATH_MAYBE_CALL_NO_OVF;
10923
10924             case CEE_MUL_OVF:
10925                 uns = false;
10926                 goto MUL_OVF;
10927             case CEE_MUL_OVF_UN:
10928                 uns = true;
10929                 goto MUL_OVF;
10930
10931             MUL_OVF:
10932                 ovfl = true;
10933                 oper = GT_MUL;
10934                 goto MATH_MAYBE_CALL_OVF;
10935
10936             // Other binary math operations
10937
10938             case CEE_DIV:
10939                 oper = GT_DIV;
10940                 goto MATH_MAYBE_CALL_NO_OVF;
10941
10942             case CEE_DIV_UN:
10943                 oper = GT_UDIV;
10944                 goto MATH_MAYBE_CALL_NO_OVF;
10945
10946             case CEE_REM:
10947                 oper = GT_MOD;
10948                 goto MATH_MAYBE_CALL_NO_OVF;
10949
10950             case CEE_REM_UN:
10951                 oper = GT_UMOD;
10952                 goto MATH_MAYBE_CALL_NO_OVF;
10953
10954             MATH_MAYBE_CALL_NO_OVF:
10955                 ovfl = false;
10956             MATH_MAYBE_CALL_OVF:
10957                 // Morpher has some complex logic about when to turn different
10958                 // typed nodes on different platforms into helper calls. We
10959                 // need to either duplicate that logic here, or just
10960                 // pessimistically make all the nodes large enough to become
10961                 // call nodes.  Since call nodes aren't that much larger and
10962                 // these opcodes are infrequent enough I chose the latter.
10963                 callNode = true;
10964                 goto MATH_OP2_FLAGS;
10965
10966             case CEE_AND:
10967                 oper = GT_AND;
10968                 goto MATH_OP2;
10969             case CEE_OR:
10970                 oper = GT_OR;
10971                 goto MATH_OP2;
10972             case CEE_XOR:
10973                 oper = GT_XOR;
10974                 goto MATH_OP2;
10975
10976             MATH_OP2: // For default values of 'ovfl' and 'callNode'
10977
10978                 ovfl     = false;
10979                 callNode = false;
10980
10981             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10982
10983                 /* Pull two values and push back the result */
10984
10985                 if (tiVerificationNeeded)
10986                 {
10987                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10988                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10989
10990                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10991                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10992                     {
10993                         Verify(tiOp1.IsNumberType(), "not number");
10994                     }
10995                     else
10996                     {
10997                         Verify(tiOp1.IsIntegerType(), "not integer");
10998                     }
10999
11000                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11001
11002                     tiRetVal = tiOp1;
11003
11004 #ifdef _TARGET_64BIT_
11005                     if (tiOp2.IsNativeIntType())
11006                     {
11007                         tiRetVal = tiOp2;
11008                     }
11009 #endif // _TARGET_64BIT_
11010                 }
11011
11012                 op2 = impPopStack().val;
11013                 op1 = impPopStack().val;
11014
11015 #if !CPU_HAS_FP_SUPPORT
11016                 if (varTypeIsFloating(op1->gtType))
11017                 {
11018                     callNode = true;
11019                 }
11020 #endif
11021                 /* Can't do arithmetic with references */
11022                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11023
11024                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11025                 // if it is in the stack)
11026                 impBashVarAddrsToI(op1, op2);
11027
11028                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11029
11030                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11031
11032                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11033
11034                 if (op2->gtOper == GT_CNS_INT)
11035                 {
11036                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11037                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11038
11039                     {
11040                         impPushOnStack(op1, tiRetVal);
11041                         break;
11042                     }
11043                 }
11044
11045 #if !FEATURE_X87_DOUBLES
11046                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11047                 //
11048                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11049                 {
11050                     if (op1->TypeGet() != type)
11051                     {
11052                         // We insert a cast of op1 to 'type'
11053                         op1 = gtNewCastNode(type, op1, type);
11054                     }
11055                     if (op2->TypeGet() != type)
11056                     {
11057                         // We insert a cast of op2 to 'type'
11058                         op2 = gtNewCastNode(type, op2, type);
11059                     }
11060                 }
11061 #endif // !FEATURE_X87_DOUBLES
11062
11063 #if SMALL_TREE_NODES
11064                 if (callNode)
11065                 {
11066                     /* These operators can later be transformed into 'GT_CALL' */
11067
11068                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11069 #ifndef _TARGET_ARM_
11070                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11071                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11072                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11073                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11074 #endif
11075                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11076                     // that we'll need to transform into a general large node, but rather specifically
11077                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11078                     // and a CALL is no longer the largest.
11079                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11080                     // than an "if".
11081                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11082                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11083                 }
11084                 else
11085 #endif // SMALL_TREE_NODES
11086                 {
11087                     op1 = gtNewOperNode(oper, type, op1, op2);
11088                 }
11089
11090                 /* Special case: integer/long division may throw an exception */
11091
11092                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11093                 {
11094                     op1->gtFlags |= GTF_EXCEPT;
11095                 }
11096
11097                 if (ovfl)
11098                 {
11099                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11100                     if (ovflType != TYP_UNKNOWN)
11101                     {
11102                         op1->gtType = ovflType;
11103                     }
11104                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11105                     if (uns)
11106                     {
11107                         op1->gtFlags |= GTF_UNSIGNED;
11108                     }
11109                 }
11110
11111                 impPushOnStack(op1, tiRetVal);
11112                 break;
11113
11114             case CEE_SHL:
11115                 oper = GT_LSH;
11116                 goto CEE_SH_OP2;
11117
11118             case CEE_SHR:
11119                 oper = GT_RSH;
11120                 goto CEE_SH_OP2;
11121             case CEE_SHR_UN:
11122                 oper = GT_RSZ;
11123                 goto CEE_SH_OP2;
11124
11125             CEE_SH_OP2:
11126                 if (tiVerificationNeeded)
11127                 {
11128                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11129                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11130                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11131                     tiRetVal = tiVal;
11132                 }
11133                 op2 = impPopStack().val;
11134                 op1 = impPopStack().val; // operand to be shifted
11135                 impBashVarAddrsToI(op1, op2);
11136
11137                 type = genActualType(op1->TypeGet());
11138                 op1  = gtNewOperNode(oper, type, op1, op2);
11139
11140                 impPushOnStack(op1, tiRetVal);
11141                 break;
11142
11143             case CEE_NOT:
11144                 if (tiVerificationNeeded)
11145                 {
11146                     tiRetVal = impStackTop().seTypeInfo;
11147                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11148                 }
11149
11150                 op1 = impPopStack().val;
11151                 impBashVarAddrsToI(op1, nullptr);
11152                 type = genActualType(op1->TypeGet());
11153                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11154                 break;
11155
11156             case CEE_CKFINITE:
11157                 if (tiVerificationNeeded)
11158                 {
11159                     tiRetVal = impStackTop().seTypeInfo;
11160                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11161                 }
11162                 op1  = impPopStack().val;
11163                 type = op1->TypeGet();
11164                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11165                 op1->gtFlags |= GTF_EXCEPT;
11166
11167                 impPushOnStack(op1, tiRetVal);
11168                 break;
11169
11170             case CEE_LEAVE:
11171
11172                 val     = getI4LittleEndian(codeAddr); // jump distance
11173                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11174                 goto LEAVE;
11175
11176             case CEE_LEAVE_S:
11177                 val     = getI1LittleEndian(codeAddr); // jump distance
11178                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11179
11180             LEAVE:
11181
11182                 if (compIsForInlining())
11183                 {
11184                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11185                     return;
11186                 }
11187
11188                 JITDUMP(" %04X", jmpAddr);
11189                 if (block->bbJumpKind != BBJ_LEAVE)
11190                 {
11191                     impResetLeaveBlock(block, jmpAddr);
11192                 }
11193
11194                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11195                 impImportLeave(block);
11196                 impNoteBranchOffs();
11197
11198                 break;
11199
11200             case CEE_BR:
11201             case CEE_BR_S:
11202                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11203
11204                 if (compIsForInlining() && jmpDist == 0)
11205                 {
11206                     break; /* NOP */
11207                 }
11208
11209                 impNoteBranchOffs();
11210                 break;
11211
11212             case CEE_BRTRUE:
11213             case CEE_BRTRUE_S:
11214             case CEE_BRFALSE:
11215             case CEE_BRFALSE_S:
11216
11217                 /* Pop the comparand (now there's a neat term) from the stack */
11218                 if (tiVerificationNeeded)
11219                 {
11220                     typeInfo& tiVal = impStackTop().seTypeInfo;
11221                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11222                            "bad value");
11223                 }
11224
11225                 op1  = impPopStack().val;
11226                 type = op1->TypeGet();
11227
11228                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11229                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11230                 {
11231                     block->bbJumpKind = BBJ_NONE;
11232
11233                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11234                     {
11235                         op1 = gtUnusedValNode(op1);
11236                         goto SPILL_APPEND;
11237                     }
11238                     else
11239                     {
11240                         break;
11241                     }
11242                 }
11243
11244                 if (op1->OperIsCompare())
11245                 {
11246                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11247                     {
11248                         // Flip the sense of the compare
11249
11250                         op1 = gtReverseCond(op1);
11251                     }
11252                 }
11253                 else
11254                 {
11255                     /* We'll compare against an equally-sized integer 0 */
11256                     /* For small types, we always compare against int   */
11257                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11258
11259                     /* Create the comparison operator and try to fold it */
11260
11261                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11262                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11263                 }
11264
11265             // fall through
11266
11267             COND_JUMP:
11268
11269                 /* Fold comparison if we can */
11270
11271                 op1 = gtFoldExpr(op1);
11272
11273                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11274                 /* Don't make any blocks unreachable in import only mode */
11275
11276                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11277                 {
11278                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11279                        unreachable under compDbgCode */
11280                     assert(!opts.compDbgCode);
11281
11282                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11283                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11284                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11285                                                                          // block for the second time
11286
11287                     block->bbJumpKind = foldedJumpKind;
11288 #ifdef DEBUG
11289                     if (verbose)
11290                     {
11291                         if (op1->gtIntCon.gtIconVal)
11292                         {
11293                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11294                                    block->bbJumpDest->bbNum);
11295                         }
11296                         else
11297                         {
11298                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11299                         }
11300                     }
11301 #endif
11302                     break;
11303                 }
11304
11305                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11306
11307                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11308                    in impImportBlock(block). For correct line numbers, spill stack. */
11309
11310                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11311                 {
11312                     impSpillStackEnsure(true);
11313                 }
11314
11315                 goto SPILL_APPEND;
11316
11317             case CEE_CEQ:
11318                 oper = GT_EQ;
11319                 uns  = false;
11320                 goto CMP_2_OPs;
11321             case CEE_CGT_UN:
11322                 oper = GT_GT;
11323                 uns  = true;
11324                 goto CMP_2_OPs;
11325             case CEE_CGT:
11326                 oper = GT_GT;
11327                 uns  = false;
11328                 goto CMP_2_OPs;
11329             case CEE_CLT_UN:
11330                 oper = GT_LT;
11331                 uns  = true;
11332                 goto CMP_2_OPs;
11333             case CEE_CLT:
11334                 oper = GT_LT;
11335                 uns  = false;
11336                 goto CMP_2_OPs;
11337
11338             CMP_2_OPs:
11339                 if (tiVerificationNeeded)
11340                 {
11341                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11342                     tiRetVal = typeInfo(TI_INT);
11343                 }
11344
11345                 op2 = impPopStack().val;
11346                 op1 = impPopStack().val;
11347
11348 #ifdef _TARGET_64BIT_
11349                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11350                 {
11351                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11352                 }
11353                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11354                 {
11355                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11356                 }
11357 #endif // _TARGET_64BIT_
11358
11359                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11360                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11361                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11362
11363                 /* Create the comparison node */
11364
11365                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11366
11367                 /* TODO: setting both flags when only one is appropriate */
11368                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11369                 {
11370                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11371                 }
11372
11373                 impPushOnStack(op1, tiRetVal);
11374                 break;
11375
11376             case CEE_BEQ_S:
11377             case CEE_BEQ:
11378                 oper = GT_EQ;
11379                 goto CMP_2_OPs_AND_BR;
11380
11381             case CEE_BGE_S:
11382             case CEE_BGE:
11383                 oper = GT_GE;
11384                 goto CMP_2_OPs_AND_BR;
11385
11386             case CEE_BGE_UN_S:
11387             case CEE_BGE_UN:
11388                 oper = GT_GE;
11389                 goto CMP_2_OPs_AND_BR_UN;
11390
11391             case CEE_BGT_S:
11392             case CEE_BGT:
11393                 oper = GT_GT;
11394                 goto CMP_2_OPs_AND_BR;
11395
11396             case CEE_BGT_UN_S:
11397             case CEE_BGT_UN:
11398                 oper = GT_GT;
11399                 goto CMP_2_OPs_AND_BR_UN;
11400
11401             case CEE_BLE_S:
11402             case CEE_BLE:
11403                 oper = GT_LE;
11404                 goto CMP_2_OPs_AND_BR;
11405
11406             case CEE_BLE_UN_S:
11407             case CEE_BLE_UN:
11408                 oper = GT_LE;
11409                 goto CMP_2_OPs_AND_BR_UN;
11410
11411             case CEE_BLT_S:
11412             case CEE_BLT:
11413                 oper = GT_LT;
11414                 goto CMP_2_OPs_AND_BR;
11415
11416             case CEE_BLT_UN_S:
11417             case CEE_BLT_UN:
11418                 oper = GT_LT;
11419                 goto CMP_2_OPs_AND_BR_UN;
11420
11421             case CEE_BNE_UN_S:
11422             case CEE_BNE_UN:
11423                 oper = GT_NE;
11424                 goto CMP_2_OPs_AND_BR_UN;
11425
11426             CMP_2_OPs_AND_BR_UN:
11427                 uns       = true;
11428                 unordered = true;
11429                 goto CMP_2_OPs_AND_BR_ALL;
11430             CMP_2_OPs_AND_BR:
11431                 uns       = false;
11432                 unordered = false;
11433                 goto CMP_2_OPs_AND_BR_ALL;
11434             CMP_2_OPs_AND_BR_ALL:
11435
11436                 if (tiVerificationNeeded)
11437                 {
11438                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11439                 }
11440
11441                 /* Pull two values */
11442                 op2 = impPopStack().val;
11443                 op1 = impPopStack().val;
11444
11445 #ifdef _TARGET_64BIT_
11446                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11447                 {
11448                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11449                 }
11450                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11451                 {
11452                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11453                 }
11454 #endif // _TARGET_64BIT_
11455
11456                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11457                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11458                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11459
11460                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11461                 {
11462                     block->bbJumpKind = BBJ_NONE;
11463
11464                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11465                     {
11466                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11467                                                        "Branch to next Optimization, op1 side effect"));
11468                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11469                     }
11470                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11471                     {
11472                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11473                                                        "Branch to next Optimization, op2 side effect"));
11474                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11475                     }
11476
11477 #ifdef DEBUG
11478                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11479                     {
11480                         impNoteLastILoffs();
11481                     }
11482 #endif
11483                     break;
11484                 }
11485 #if !FEATURE_X87_DOUBLES
11486                 // We can generate an compare of different sized floating point op1 and op2
11487                 // We insert a cast
11488                 //
11489                 if (varTypeIsFloating(op1->TypeGet()))
11490                 {
11491                     if (op1->TypeGet() != op2->TypeGet())
11492                     {
11493                         assert(varTypeIsFloating(op2->TypeGet()));
11494
11495                         // say op1=double, op2=float. To avoid loss of precision
11496                         // while comparing, op2 is converted to double and double
11497                         // comparison is done.
11498                         if (op1->TypeGet() == TYP_DOUBLE)
11499                         {
11500                             // We insert a cast of op2 to TYP_DOUBLE
11501                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11502                         }
11503                         else if (op2->TypeGet() == TYP_DOUBLE)
11504                         {
11505                             // We insert a cast of op1 to TYP_DOUBLE
11506                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11507                         }
11508                     }
11509                 }
11510 #endif // !FEATURE_X87_DOUBLES
11511
11512                 /* Create and append the operator */
11513
11514                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11515
11516                 if (uns)
11517                 {
11518                     op1->gtFlags |= GTF_UNSIGNED;
11519                 }
11520
11521                 if (unordered)
11522                 {
11523                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11524                 }
11525
11526                 goto COND_JUMP;
11527
11528             case CEE_SWITCH:
11529                 assert(!compIsForInlining());
11530
11531                 if (tiVerificationNeeded)
11532                 {
11533                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11534                 }
11535                 /* Pop the switch value off the stack */
11536                 op1 = impPopStack().val;
11537                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11538
11539 #ifdef _TARGET_64BIT_
11540                 // Widen 'op1' on 64-bit targets
11541                 if (op1->TypeGet() != TYP_I_IMPL)
11542                 {
11543                     if (op1->OperGet() == GT_CNS_INT)
11544                     {
11545                         op1->gtType = TYP_I_IMPL;
11546                     }
11547                     else
11548                     {
11549                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11550                     }
11551                 }
11552 #endif // _TARGET_64BIT_
11553                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11554
11555                 /* We can create a switch node */
11556
11557                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11558
11559                 val = (int)getU4LittleEndian(codeAddr);
11560                 codeAddr += 4 + val * 4; // skip over the switch-table
11561
11562                 goto SPILL_APPEND;
11563
11564             /************************** Casting OPCODES ***************************/
11565
11566             case CEE_CONV_OVF_I1:
11567                 lclTyp = TYP_BYTE;
11568                 goto CONV_OVF;
11569             case CEE_CONV_OVF_I2:
11570                 lclTyp = TYP_SHORT;
11571                 goto CONV_OVF;
11572             case CEE_CONV_OVF_I:
11573                 lclTyp = TYP_I_IMPL;
11574                 goto CONV_OVF;
11575             case CEE_CONV_OVF_I4:
11576                 lclTyp = TYP_INT;
11577                 goto CONV_OVF;
11578             case CEE_CONV_OVF_I8:
11579                 lclTyp = TYP_LONG;
11580                 goto CONV_OVF;
11581
11582             case CEE_CONV_OVF_U1:
11583                 lclTyp = TYP_UBYTE;
11584                 goto CONV_OVF;
11585             case CEE_CONV_OVF_U2:
11586                 lclTyp = TYP_CHAR;
11587                 goto CONV_OVF;
11588             case CEE_CONV_OVF_U:
11589                 lclTyp = TYP_U_IMPL;
11590                 goto CONV_OVF;
11591             case CEE_CONV_OVF_U4:
11592                 lclTyp = TYP_UINT;
11593                 goto CONV_OVF;
11594             case CEE_CONV_OVF_U8:
11595                 lclTyp = TYP_ULONG;
11596                 goto CONV_OVF;
11597
11598             case CEE_CONV_OVF_I1_UN:
11599                 lclTyp = TYP_BYTE;
11600                 goto CONV_OVF_UN;
11601             case CEE_CONV_OVF_I2_UN:
11602                 lclTyp = TYP_SHORT;
11603                 goto CONV_OVF_UN;
11604             case CEE_CONV_OVF_I_UN:
11605                 lclTyp = TYP_I_IMPL;
11606                 goto CONV_OVF_UN;
11607             case CEE_CONV_OVF_I4_UN:
11608                 lclTyp = TYP_INT;
11609                 goto CONV_OVF_UN;
11610             case CEE_CONV_OVF_I8_UN:
11611                 lclTyp = TYP_LONG;
11612                 goto CONV_OVF_UN;
11613
11614             case CEE_CONV_OVF_U1_UN:
11615                 lclTyp = TYP_UBYTE;
11616                 goto CONV_OVF_UN;
11617             case CEE_CONV_OVF_U2_UN:
11618                 lclTyp = TYP_CHAR;
11619                 goto CONV_OVF_UN;
11620             case CEE_CONV_OVF_U_UN:
11621                 lclTyp = TYP_U_IMPL;
11622                 goto CONV_OVF_UN;
11623             case CEE_CONV_OVF_U4_UN:
11624                 lclTyp = TYP_UINT;
11625                 goto CONV_OVF_UN;
11626             case CEE_CONV_OVF_U8_UN:
11627                 lclTyp = TYP_ULONG;
11628                 goto CONV_OVF_UN;
11629
11630             CONV_OVF_UN:
11631                 uns = true;
11632                 goto CONV_OVF_COMMON;
11633             CONV_OVF:
11634                 uns = false;
11635                 goto CONV_OVF_COMMON;
11636
11637             CONV_OVF_COMMON:
11638                 ovfl = true;
11639                 goto _CONV;
11640
11641             case CEE_CONV_I1:
11642                 lclTyp = TYP_BYTE;
11643                 goto CONV;
11644             case CEE_CONV_I2:
11645                 lclTyp = TYP_SHORT;
11646                 goto CONV;
11647             case CEE_CONV_I:
11648                 lclTyp = TYP_I_IMPL;
11649                 goto CONV;
11650             case CEE_CONV_I4:
11651                 lclTyp = TYP_INT;
11652                 goto CONV;
11653             case CEE_CONV_I8:
11654                 lclTyp = TYP_LONG;
11655                 goto CONV;
11656
11657             case CEE_CONV_U1:
11658                 lclTyp = TYP_UBYTE;
11659                 goto CONV;
11660             case CEE_CONV_U2:
11661                 lclTyp = TYP_CHAR;
11662                 goto CONV;
11663 #if (REGSIZE_BYTES == 8)
11664             case CEE_CONV_U:
11665                 lclTyp = TYP_U_IMPL;
11666                 goto CONV_UN;
11667 #else
11668             case CEE_CONV_U:
11669                 lclTyp = TYP_U_IMPL;
11670                 goto CONV;
11671 #endif
11672             case CEE_CONV_U4:
11673                 lclTyp = TYP_UINT;
11674                 goto CONV;
11675             case CEE_CONV_U8:
11676                 lclTyp = TYP_ULONG;
11677                 goto CONV_UN;
11678
11679             case CEE_CONV_R4:
11680                 lclTyp = TYP_FLOAT;
11681                 goto CONV;
11682             case CEE_CONV_R8:
11683                 lclTyp = TYP_DOUBLE;
11684                 goto CONV;
11685
11686             case CEE_CONV_R_UN:
11687                 lclTyp = TYP_DOUBLE;
11688                 goto CONV_UN;
11689
11690             CONV_UN:
11691                 uns  = true;
11692                 ovfl = false;
11693                 goto _CONV;
11694
11695             CONV:
11696                 uns  = false;
11697                 ovfl = false;
11698                 goto _CONV;
11699
11700             _CONV:
11701                 // just check that we have a number on the stack
11702                 if (tiVerificationNeeded)
11703                 {
11704                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11705                     Verify(tiVal.IsNumberType(), "bad arg");
11706
11707 #ifdef _TARGET_64BIT_
11708                     bool isNative = false;
11709
11710                     switch (opcode)
11711                     {
11712                         case CEE_CONV_OVF_I:
11713                         case CEE_CONV_OVF_I_UN:
11714                         case CEE_CONV_I:
11715                         case CEE_CONV_OVF_U:
11716                         case CEE_CONV_OVF_U_UN:
11717                         case CEE_CONV_U:
11718                             isNative = true;
11719                         default:
11720                             // leave 'isNative' = false;
11721                             break;
11722                     }
11723                     if (isNative)
11724                     {
11725                         tiRetVal = typeInfo::nativeInt();
11726                     }
11727                     else
11728 #endif // _TARGET_64BIT_
11729                     {
11730                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11731                     }
11732                 }
11733
11734                 // only converts from FLOAT or DOUBLE to an integer type
11735                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11736
11737                 if (varTypeIsFloating(lclTyp))
11738                 {
11739                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11740 #ifdef _TARGET_64BIT_
11741                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11742                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11743                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11744                                // and generate SSE2 code instead of going through helper calls.
11745                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11746 #endif
11747                         ;
11748                 }
11749                 else
11750                 {
11751                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11752                 }
11753
11754                 // At this point uns, ovf, callNode all set
11755
11756                 op1 = impPopStack().val;
11757                 impBashVarAddrsToI(op1);
11758
11759                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11760                 {
11761                     op2 = op1->gtOp.gtOp2;
11762
11763                     if (op2->gtOper == GT_CNS_INT)
11764                     {
11765                         ssize_t ival = op2->gtIntCon.gtIconVal;
11766                         ssize_t mask, umask;
11767
11768                         switch (lclTyp)
11769                         {
11770                             case TYP_BYTE:
11771                             case TYP_UBYTE:
11772                                 mask  = 0x00FF;
11773                                 umask = 0x007F;
11774                                 break;
11775                             case TYP_CHAR:
11776                             case TYP_SHORT:
11777                                 mask  = 0xFFFF;
11778                                 umask = 0x7FFF;
11779                                 break;
11780
11781                             default:
11782                                 assert(!"unexpected type");
11783                                 return;
11784                         }
11785
11786                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11787                         {
11788                             /* Toss the cast, it's a waste of time */
11789
11790                             impPushOnStack(op1, tiRetVal);
11791                             break;
11792                         }
11793                         else if (ival == mask)
11794                         {
11795                             /* Toss the masking, it's a waste of time, since
11796                                we sign-extend from the small value anyways */
11797
11798                             op1 = op1->gtOp.gtOp1;
11799                         }
11800                     }
11801                 }
11802
11803                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11804                     since the result of a cast to one of the 'small' integer
11805                     types is an integer.
11806                  */
11807
11808                 type = genActualType(lclTyp);
11809
11810 #if SMALL_TREE_NODES
11811                 if (callNode)
11812                 {
11813                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11814                 }
11815                 else
11816 #endif // SMALL_TREE_NODES
11817                 {
11818                     op1 = gtNewCastNode(type, op1, lclTyp);
11819                 }
11820
11821                 if (ovfl)
11822                 {
11823                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11824                 }
11825                 if (uns)
11826                 {
11827                     op1->gtFlags |= GTF_UNSIGNED;
11828                 }
11829                 impPushOnStack(op1, tiRetVal);
11830                 break;
11831
11832             case CEE_NEG:
11833                 if (tiVerificationNeeded)
11834                 {
11835                     tiRetVal = impStackTop().seTypeInfo;
11836                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11837                 }
11838
11839                 op1 = impPopStack().val;
11840                 impBashVarAddrsToI(op1, nullptr);
11841                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11842                 break;
11843
11844             case CEE_POP:
11845                 if (tiVerificationNeeded)
11846                 {
11847                     impStackTop(0);
11848                 }
11849
11850                 /* Pull the top value from the stack */
11851
11852                 op1 = impPopStack(clsHnd).val;
11853
11854                 /* Get hold of the type of the value being duplicated */
11855
11856                 lclTyp = genActualType(op1->gtType);
11857
11858                 /* Does the value have any side effects? */
11859
11860                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11861                 {
11862                     // Since we are throwing away the value, just normalize
11863                     // it to its address.  This is more efficient.
11864
11865                     if (varTypeIsStruct(op1))
11866                     {
11867 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11868                         // Non-calls, such as obj or ret_expr, have to go through this.
11869                         // Calls with large struct return value have to go through this.
11870                         // Helper calls with small struct return value also have to go
11871                         // through this since they do not follow Unix calling convention.
11872                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11873                             op1->AsCall()->gtCallType == CT_HELPER)
11874 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11875                         {
11876                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11877                         }
11878                     }
11879
11880                     // If op1 is non-overflow cast, throw it away since it is useless.
11881                     // Another reason for throwing away the useless cast is in the context of
11882                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11883                     // The cast gets added as part of importing GT_CALL, which gets in the way
11884                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11885                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11886                     {
11887                         op1 = op1->gtOp.gtOp1;
11888                     }
11889
11890                     // If 'op1' is an expression, create an assignment node.
11891                     // Helps analyses (like CSE) to work fine.
11892
11893                     if (op1->gtOper != GT_CALL)
11894                     {
11895                         op1 = gtUnusedValNode(op1);
11896                     }
11897
11898                     /* Append the value to the tree list */
11899                     goto SPILL_APPEND;
11900                 }
11901
11902                 /* No side effects - just throw the <BEEP> thing away */
11903                 break;
11904
11905             case CEE_DUP:
11906
11907                 if (tiVerificationNeeded)
11908                 {
11909                     // Dup could start the begining of delegate creation sequence, remember that
11910                     delegateCreateStart = codeAddr - 1;
11911                     impStackTop(0);
11912                 }
11913
11914                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11915                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11916                 //   This helps eliminate a redundant bounds check in cases such as:
11917                 //       ariba[i+3] += some_value;
11918                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11919
11920                 if (codeAddr < codeEndp)
11921                 {
11922                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11923                     if (impIsAnySTLOC(nextOpcode))
11924                     {
11925                         if (!opts.compDbgCode)
11926                         {
11927                             insertLdloc = true;
11928                             break;
11929                         }
11930                         GenTree* stackTop = impStackTop().val;
11931                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11932                         {
11933                             insertLdloc = true;
11934                             break;
11935                         }
11936                     }
11937                 }
11938
11939                 /* Pull the top value from the stack */
11940                 op1 = impPopStack(tiRetVal);
11941
11942                 /* Clone the value */
11943                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11944                                    nullptr DEBUGARG("DUP instruction"));
11945
11946                 /* Either the tree started with no global effects, or impCloneExpr
11947                    evaluated the tree to a temp and returned two copies of that
11948                    temp. Either way, neither op1 nor op2 should have side effects.
11949                 */
11950                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11951
11952                 /* Push the tree/temp back on the stack */
11953                 impPushOnStack(op1, tiRetVal);
11954
11955                 /* Push the copy on the stack */
11956                 impPushOnStack(op2, tiRetVal);
11957
11958                 break;
11959
11960             case CEE_STIND_I1:
11961                 lclTyp = TYP_BYTE;
11962                 goto STIND;
11963             case CEE_STIND_I2:
11964                 lclTyp = TYP_SHORT;
11965                 goto STIND;
11966             case CEE_STIND_I4:
11967                 lclTyp = TYP_INT;
11968                 goto STIND;
11969             case CEE_STIND_I8:
11970                 lclTyp = TYP_LONG;
11971                 goto STIND;
11972             case CEE_STIND_I:
11973                 lclTyp = TYP_I_IMPL;
11974                 goto STIND;
11975             case CEE_STIND_REF:
11976                 lclTyp = TYP_REF;
11977                 goto STIND;
11978             case CEE_STIND_R4:
11979                 lclTyp = TYP_FLOAT;
11980                 goto STIND;
11981             case CEE_STIND_R8:
11982                 lclTyp = TYP_DOUBLE;
11983                 goto STIND;
11984             STIND:
11985
11986                 if (tiVerificationNeeded)
11987                 {
11988                     typeInfo instrType(lclTyp);
11989 #ifdef _TARGET_64BIT_
11990                     if (opcode == CEE_STIND_I)
11991                     {
11992                         instrType = typeInfo::nativeInt();
11993                     }
11994 #endif // _TARGET_64BIT_
11995                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11996                 }
11997                 else
11998                 {
11999                     compUnsafeCastUsed = true; // Have to go conservative
12000                 }
12001
12002             STIND_POST_VERIFY:
12003
12004                 op2 = impPopStack().val; // value to store
12005                 op1 = impPopStack().val; // address to store to
12006
12007                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12008                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12009
12010                 impBashVarAddrsToI(op1, op2);
12011
12012                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12013
12014 #ifdef _TARGET_64BIT_
12015                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12016                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12017                 {
12018                     op2->gtType = TYP_I_IMPL;
12019                 }
12020                 else
12021                 {
12022                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12023                     //
12024                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12025                     {
12026                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12027                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12028                     }
12029                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12030                     //
12031                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12032                     {
12033                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12034                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12035                     }
12036                 }
12037 #endif // _TARGET_64BIT_
12038
12039                 if (opcode == CEE_STIND_REF)
12040                 {
12041                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12042                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12043                     lclTyp = genActualType(op2->TypeGet());
12044                 }
12045
12046 // Check target type.
12047 #ifdef DEBUG
12048                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12049                 {
12050                     if (op2->gtType == TYP_BYREF)
12051                     {
12052                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12053                     }
12054                     else if (lclTyp == TYP_BYREF)
12055                     {
12056                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12057                     }
12058                 }
12059                 else
12060                 {
12061                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12062                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12063                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12064                 }
12065 #endif
12066
12067                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12068
12069                 // stind could point anywhere, example a boxed class static int
12070                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12071
12072                 if (prefixFlags & PREFIX_VOLATILE)
12073                 {
12074                     assert(op1->OperGet() == GT_IND);
12075                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12076                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12077                     op1->gtFlags |= GTF_IND_VOLATILE;
12078                 }
12079
12080                 if (prefixFlags & PREFIX_UNALIGNED)
12081                 {
12082                     assert(op1->OperGet() == GT_IND);
12083                     op1->gtFlags |= GTF_IND_UNALIGNED;
12084                 }
12085
12086                 op1 = gtNewAssignNode(op1, op2);
12087                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12088
12089                 // Spill side-effects AND global-data-accesses
12090                 if (verCurrentState.esStackDepth > 0)
12091                 {
12092                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12093                 }
12094
12095                 goto APPEND;
12096
12097             case CEE_LDIND_I1:
12098                 lclTyp = TYP_BYTE;
12099                 goto LDIND;
12100             case CEE_LDIND_I2:
12101                 lclTyp = TYP_SHORT;
12102                 goto LDIND;
12103             case CEE_LDIND_U4:
12104             case CEE_LDIND_I4:
12105                 lclTyp = TYP_INT;
12106                 goto LDIND;
12107             case CEE_LDIND_I8:
12108                 lclTyp = TYP_LONG;
12109                 goto LDIND;
12110             case CEE_LDIND_REF:
12111                 lclTyp = TYP_REF;
12112                 goto LDIND;
12113             case CEE_LDIND_I:
12114                 lclTyp = TYP_I_IMPL;
12115                 goto LDIND;
12116             case CEE_LDIND_R4:
12117                 lclTyp = TYP_FLOAT;
12118                 goto LDIND;
12119             case CEE_LDIND_R8:
12120                 lclTyp = TYP_DOUBLE;
12121                 goto LDIND;
12122             case CEE_LDIND_U1:
12123                 lclTyp = TYP_UBYTE;
12124                 goto LDIND;
12125             case CEE_LDIND_U2:
12126                 lclTyp = TYP_CHAR;
12127                 goto LDIND;
12128             LDIND:
12129
12130                 if (tiVerificationNeeded)
12131                 {
12132                     typeInfo lclTiType(lclTyp);
12133 #ifdef _TARGET_64BIT_
12134                     if (opcode == CEE_LDIND_I)
12135                     {
12136                         lclTiType = typeInfo::nativeInt();
12137                     }
12138 #endif // _TARGET_64BIT_
12139                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12140                     tiRetVal.NormaliseForStack();
12141                 }
12142                 else
12143                 {
12144                     compUnsafeCastUsed = true; // Have to go conservative
12145                 }
12146
12147             LDIND_POST_VERIFY:
12148
12149                 op1 = impPopStack().val; // address to load from
12150                 impBashVarAddrsToI(op1);
12151
12152 #ifdef _TARGET_64BIT_
12153                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12154                 //
12155                 if (genActualType(op1->gtType) == TYP_INT)
12156                 {
12157                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12158                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12159                 }
12160 #endif
12161
12162                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12163
12164                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12165
12166                 // ldind could point anywhere, example a boxed class static int
12167                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12168
12169                 if (prefixFlags & PREFIX_VOLATILE)
12170                 {
12171                     assert(op1->OperGet() == GT_IND);
12172                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12173                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12174                     op1->gtFlags |= GTF_IND_VOLATILE;
12175                 }
12176
12177                 if (prefixFlags & PREFIX_UNALIGNED)
12178                 {
12179                     assert(op1->OperGet() == GT_IND);
12180                     op1->gtFlags |= GTF_IND_UNALIGNED;
12181                 }
12182
12183                 impPushOnStack(op1, tiRetVal);
12184
12185                 break;
12186
12187             case CEE_UNALIGNED:
12188
12189                 assert(sz == 1);
12190                 val = getU1LittleEndian(codeAddr);
12191                 ++codeAddr;
12192                 JITDUMP(" %u", val);
12193                 if ((val != 1) && (val != 2) && (val != 4))
12194                 {
12195                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12196                 }
12197
12198                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12199                 prefixFlags |= PREFIX_UNALIGNED;
12200
12201                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12202
12203             PREFIX:
12204                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12205                 codeAddr += sizeof(__int8);
12206                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12207                 goto DECODE_OPCODE;
12208
12209             case CEE_VOLATILE:
12210
12211                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12212                 prefixFlags |= PREFIX_VOLATILE;
12213
12214                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12215
12216                 assert(sz == 0);
12217                 goto PREFIX;
12218
12219             case CEE_LDFTN:
12220             {
12221                 // Need to do a lookup here so that we perform an access check
12222                 // and do a NOWAY if protections are violated
12223                 _impResolveToken(CORINFO_TOKENKIND_Method);
12224
12225                 JITDUMP(" %08X", resolvedToken.token);
12226
12227                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12228                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12229                               &callInfo);
12230
12231                 // This check really only applies to intrinsic Array.Address methods
12232                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12233                 {
12234                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12235                 }
12236
12237                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12238                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12239
12240                 if (tiVerificationNeeded)
12241                 {
12242                     // LDFTN could start the begining of delegate creation sequence, remember that
12243                     delegateCreateStart = codeAddr - 2;
12244
12245                     // check any constraints on the callee's class and type parameters
12246                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12247                                    "method has unsatisfied class constraints");
12248                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12249                                                                                 resolvedToken.hMethod),
12250                                    "method has unsatisfied method constraints");
12251
12252                     mflags = callInfo.verMethodFlags;
12253                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12254                 }
12255
12256             DO_LDFTN:
12257                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12258                 if (compDonotInline())
12259                 {
12260                     return;
12261                 }
12262
12263                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12264
12265                 break;
12266             }
12267
12268             case CEE_LDVIRTFTN:
12269             {
12270                 /* Get the method token */
12271
12272                 _impResolveToken(CORINFO_TOKENKIND_Method);
12273
12274                 JITDUMP(" %08X", resolvedToken.token);
12275
12276                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12277                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12278                                                     CORINFO_CALLINFO_CALLVIRT)),
12279                               &callInfo);
12280
12281                 // This check really only applies to intrinsic Array.Address methods
12282                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12283                 {
12284                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12285                 }
12286
12287                 mflags = callInfo.methodFlags;
12288
12289                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12290
12291                 if (compIsForInlining())
12292                 {
12293                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12294                     {
12295                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12296                         return;
12297                     }
12298                 }
12299
12300                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12301
12302                 if (tiVerificationNeeded)
12303                 {
12304
12305                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12306                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12307
12308                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12309                     typeInfo declType =
12310                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12311
12312                     typeInfo arg = impStackTop().seTypeInfo;
12313                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12314                            "bad ldvirtftn");
12315
12316                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12317                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12318                     {
12319                         instanceClassHnd = arg.GetClassHandleForObjRef();
12320                     }
12321
12322                     // check any constraints on the method's class and type parameters
12323                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12324                                    "method has unsatisfied class constraints");
12325                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12326                                                                                 resolvedToken.hMethod),
12327                                    "method has unsatisfied method constraints");
12328
12329                     if (mflags & CORINFO_FLG_PROTECTED)
12330                     {
12331                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12332                                "Accessing protected method through wrong type.");
12333                     }
12334                 }
12335
12336                 /* Get the object-ref */
12337                 op1 = impPopStack().val;
12338                 assertImp(op1->gtType == TYP_REF);
12339
12340                 if (opts.IsReadyToRun())
12341                 {
12342                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12343                     {
12344                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12345                         {
12346                             op1 = gtUnusedValNode(op1);
12347                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12348                         }
12349                         goto DO_LDFTN;
12350                     }
12351                 }
12352                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12353                 {
12354                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12355                     {
12356                         op1 = gtUnusedValNode(op1);
12357                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12358                     }
12359                     goto DO_LDFTN;
12360                 }
12361
12362                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12363                 if (compDonotInline())
12364                 {
12365                     return;
12366                 }
12367
12368                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12369
12370                 break;
12371             }
12372
12373             case CEE_CONSTRAINED:
12374
12375                 assertImp(sz == sizeof(unsigned));
12376                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12377                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12378                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12379
12380                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12381                 prefixFlags |= PREFIX_CONSTRAINED;
12382
12383                 {
12384                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12385                     if (actualOpcode != CEE_CALLVIRT)
12386                     {
12387                         BADCODE("constrained. has to be followed by callvirt");
12388                     }
12389                 }
12390
12391                 goto PREFIX;
12392
12393             case CEE_READONLY:
12394                 JITDUMP(" readonly.");
12395
12396                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12397                 prefixFlags |= PREFIX_READONLY;
12398
12399                 {
12400                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12401                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12402                     {
12403                         BADCODE("readonly. has to be followed by ldelema or call");
12404                     }
12405                 }
12406
12407                 assert(sz == 0);
12408                 goto PREFIX;
12409
12410             case CEE_TAILCALL:
12411                 JITDUMP(" tail.");
12412
12413                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12414                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12415
12416                 {
12417                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12418                     if (!impOpcodeIsCallOpcode(actualOpcode))
12419                     {
12420                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12421                     }
12422                 }
12423                 assert(sz == 0);
12424                 goto PREFIX;
12425
12426             case CEE_NEWOBJ:
12427
12428                 /* Since we will implicitly insert newObjThisPtr at the start of the
12429                    argument list, spill any GTF_ORDER_SIDEEFF */
12430                 impSpillSpecialSideEff();
12431
12432                 /* NEWOBJ does not respond to TAIL */
12433                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12434
12435                 /* NEWOBJ does not respond to CONSTRAINED */
12436                 prefixFlags &= ~PREFIX_CONSTRAINED;
12437
12438 #if COR_JIT_EE_VERSION > 460
12439                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12440 #else
12441                 _impResolveToken(CORINFO_TOKENKIND_Method);
12442 #endif
12443
12444                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12445                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12446                               &callInfo);
12447
12448                 if (compIsForInlining())
12449                 {
12450                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12451                     {
12452                         // Check to see if this call violates the boundary.
12453                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12454                         return;
12455                     }
12456                 }
12457
12458                 mflags = callInfo.methodFlags;
12459
12460                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12461                 {
12462                     BADCODE("newobj on static or abstract method");
12463                 }
12464
12465                 // Insert the security callout before any actual code is generated
12466                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12467
12468                 // There are three different cases for new
12469                 // Object size is variable (depends on arguments)
12470                 //      1) Object is an array (arrays treated specially by the EE)
12471                 //      2) Object is some other variable sized object (e.g. String)
12472                 //      3) Class Size can be determined beforehand (normal case)
12473                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12474                 // in the second case we call the constructor with a '0' this pointer
12475                 // In the third case we alloc the memory, then call the constuctor
12476
12477                 clsFlags = callInfo.classFlags;
12478                 if (clsFlags & CORINFO_FLG_ARRAY)
12479                 {
12480                     if (tiVerificationNeeded)
12481                     {
12482                         CORINFO_CLASS_HANDLE elemTypeHnd;
12483                         INDEBUG(CorInfoType corType =)
12484                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12485                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12486                         Verify(elemTypeHnd == nullptr ||
12487                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12488                                "newarr of byref-like objects");
12489                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12490                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12491                                       &callInfo DEBUGARG(info.compFullName));
12492                     }
12493                     // Arrays need to call the NEWOBJ helper.
12494                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12495
12496                     impImportNewObjArray(&resolvedToken, &callInfo);
12497                     if (compDonotInline())
12498                     {
12499                         return;
12500                     }
12501
12502                     callTyp = TYP_REF;
12503                     break;
12504                 }
12505                 // At present this can only be String
12506                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12507                 {
12508                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12509                     {
12510                         // The dummy argument does not exist in CoreRT
12511                         newObjThisPtr = nullptr;
12512                     }
12513                     else
12514                     {
12515                         // This is the case for variable-sized objects that are not
12516                         // arrays.  In this case, call the constructor with a null 'this'
12517                         // pointer
12518                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12519                     }
12520
12521                     /* Remember that this basic block contains 'new' of an object */
12522                     block->bbFlags |= BBF_HAS_NEWOBJ;
12523                     optMethodFlags |= OMF_HAS_NEWOBJ;
12524                 }
12525                 else
12526                 {
12527                     // This is the normal case where the size of the object is
12528                     // fixed.  Allocate the memory and call the constructor.
12529
12530                     // Note: We cannot add a peep to avoid use of temp here
12531                     // becase we don't have enough interference info to detect when
12532                     // sources and destination interfere, example: s = new S(ref);
12533
12534                     // TODO: We find the correct place to introduce a general
12535                     // reverse copy prop for struct return values from newobj or
12536                     // any function returning structs.
12537
12538                     /* get a temporary for the new object */
12539                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12540
12541                     // In the value class case we only need clsHnd for size calcs.
12542                     //
12543                     // The lookup of the code pointer will be handled by CALL in this case
12544                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12545                     {
12546                         if (compIsForInlining())
12547                         {
12548                             // If value class has GC fields, inform the inliner. It may choose to
12549                             // bail out on the inline.
12550                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12551                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12552                             {
12553                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12554                                 if (compInlineResult->IsFailure())
12555                                 {
12556                                     return;
12557                                 }
12558
12559                                 // Do further notification in the case where the call site is rare;
12560                                 // some policies do not track the relative hotness of call sites for
12561                                 // "always" inline cases.
12562                                 if (impInlineInfo->iciBlock->isRunRarely())
12563                                 {
12564                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12565                                     if (compInlineResult->IsFailure())
12566                                     {
12567                                         return;
12568                                     }
12569                                 }
12570                             }
12571                         }
12572
12573                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12574                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12575
12576                         if (impIsPrimitive(jitTyp))
12577                         {
12578                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12579                         }
12580                         else
12581                         {
12582                             // The local variable itself is the allocated space.
12583                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12584                             // and potentially exploitable.
12585                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12586                         }
12587
12588                         // Append a tree to zero-out the temp
12589                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12590
12591                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12592                                                        gtNewIconNode(0), // Value
12593                                                        size,             // Size
12594                                                        false,            // isVolatile
12595                                                        false);           // not copyBlock
12596                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12597
12598                         // Obtain the address of the temp
12599                         newObjThisPtr =
12600                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12601                     }
12602                     else
12603                     {
12604 #ifdef FEATURE_READYTORUN_COMPILER
12605                         if (opts.IsReadyToRun())
12606                         {
12607                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12608                             usingReadyToRunHelper = (op1 != nullptr);
12609                         }
12610
12611                         if (!usingReadyToRunHelper)
12612 #endif
12613                         {
12614                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12615                             if (op1 == nullptr)
12616                             { // compDonotInline()
12617                                 return;
12618                             }
12619
12620                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12621                             // and the newfast call with a single call to a dynamic R2R cell that will:
12622                             //      1) Load the context
12623                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12624                             //      stub
12625                             //      3) Allocate and return the new object
12626                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12627
12628                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12629                                                     resolvedToken.hClass, TYP_REF, op1);
12630                         }
12631
12632                         // Remember that this basic block contains 'new' of an object
12633                         block->bbFlags |= BBF_HAS_NEWOBJ;
12634                         optMethodFlags |= OMF_HAS_NEWOBJ;
12635
12636                         // Append the assignment to the temp/local. Dont need to spill
12637                         // at all as we are just calling an EE-Jit helper which can only
12638                         // cause an (async) OutOfMemoryException.
12639
12640                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12641                         // to a temp. Note that the pattern "temp = allocObj" is required
12642                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12643                         // without exhaustive walk over all expressions.
12644
12645                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12646
12647                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12648                     }
12649                 }
12650                 goto CALL;
12651
12652             case CEE_CALLI:
12653
12654                 /* CALLI does not respond to CONSTRAINED */
12655                 prefixFlags &= ~PREFIX_CONSTRAINED;
12656
12657                 if (compIsForInlining())
12658                 {
12659                     // CALLI doesn't have a method handle, so assume the worst.
12660                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12661                     {
12662                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12663                         return;
12664                     }
12665                 }
12666
12667             // fall through
12668
12669             case CEE_CALLVIRT:
12670             case CEE_CALL:
12671
12672                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12673                 // many other places.  We unfortunately embed that knowledge here.
12674                 if (opcode != CEE_CALLI)
12675                 {
12676                     _impResolveToken(CORINFO_TOKENKIND_Method);
12677
12678                     eeGetCallInfo(&resolvedToken,
12679                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12680                                   // this is how impImportCall invokes getCallInfo
12681                                   addVerifyFlag(
12682                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12683                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12684                                                                        : CORINFO_CALLINFO_NONE)),
12685                                   &callInfo);
12686                 }
12687                 else
12688                 {
12689                     // Suppress uninitialized use warning.
12690                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12691                     memset(&callInfo, 0, sizeof(callInfo));
12692
12693                     resolvedToken.token = getU4LittleEndian(codeAddr);
12694                 }
12695
12696             CALL: // memberRef should be set.
12697                 // newObjThisPtr should be set for CEE_NEWOBJ
12698
12699                 JITDUMP(" %08X", resolvedToken.token);
12700                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12701
12702                 bool newBBcreatedForTailcallStress;
12703
12704                 newBBcreatedForTailcallStress = false;
12705
12706                 if (compIsForInlining())
12707                 {
12708                     if (compDonotInline())
12709                     {
12710                         return;
12711                     }
12712                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12713                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12714                 }
12715                 else
12716                 {
12717                     if (compTailCallStress())
12718                     {
12719                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12720                         // Tail call stress only recognizes call+ret patterns and forces them to be
12721                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12722                         // doesn't import 'ret' opcode following the call into the basic block containing
12723                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12724                         // is already checking that there is an opcode following call and hence it is
12725                         // safe here to read next opcode without bounds check.
12726                         newBBcreatedForTailcallStress =
12727                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12728                                                              // make it jump to RET.
12729                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12730
12731                         if (newBBcreatedForTailcallStress &&
12732                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12733                             verCheckTailCallConstraint(opcode, &resolvedToken,
12734                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12735                                                        true) // Is it legal to do talcall?
12736                             )
12737                         {
12738                             // Stress the tailcall.
12739                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12740                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12741                         }
12742                     }
12743
12744                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12745                     // hence will not be considered for implicit tail calling.
12746                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12747                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12748                     {
12749                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12750                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12751                     }
12752                 }
12753
12754                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12755                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12756                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12757
12758                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12759                 {
12760                     // All calls and delegates need a security callout.
12761                     // For delegates, this is the call to the delegate constructor, not the access check on the
12762                     // LD(virt)FTN.
12763                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12764
12765 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12766      
12767                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12768                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12769                 // ldtoken <filed token>, and we now check accessibility
12770                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12771                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12772                 {
12773                     if (prevOpcode != CEE_LDTOKEN)
12774                     {
12775                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12776                     }
12777                     else
12778                     {
12779                         assert(lastLoadToken != NULL);
12780                         // Now that we know we have a token, verify that it is accessible for loading
12781                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12782                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12783                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12784                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12785                     }
12786                 }
12787
12788 #endif // DevDiv 410397
12789                 }
12790
12791                 if (tiVerificationNeeded)
12792                 {
12793                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12794                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12795                                   &callInfo DEBUGARG(info.compFullName));
12796                 }
12797
12798                 // Insert delegate callout here.
12799                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12800                 {
12801 #ifdef DEBUG
12802                     // We should do this only if verification is enabled
12803                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12804                     if (tiVerificationNeeded)
12805                     {
12806                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12807                         // We should get here only for well formed delegate creation.
12808                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12809                     }
12810 #endif
12811
12812 #ifdef FEATURE_CORECLR
12813                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12814                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12815                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12816
12817                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12818 #endif // FEATURE_CORECLR
12819                 }
12820
12821                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12822                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12823                 if (compDonotInline())
12824                 {
12825                     return;
12826                 }
12827
12828                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12829                                                                        // have created a new BB after the "call"
12830                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12831                 {
12832                     assert(!compIsForInlining());
12833                     goto RET;
12834                 }
12835
12836                 break;
12837
12838             case CEE_LDFLD:
12839             case CEE_LDSFLD:
12840             case CEE_LDFLDA:
12841             case CEE_LDSFLDA:
12842             {
12843
12844                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12845                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12846
12847                 /* Get the CP_Fieldref index */
12848                 assertImp(sz == sizeof(unsigned));
12849
12850                 _impResolveToken(CORINFO_TOKENKIND_Field);
12851
12852                 JITDUMP(" %08X", resolvedToken.token);
12853
12854                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12855
12856                 GenTreePtr           obj     = nullptr;
12857                 typeInfo*            tiObj   = nullptr;
12858                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12859
12860                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12861                 {
12862                     tiObj = &impStackTop().seTypeInfo;
12863                     obj   = impPopStack(objType).val;
12864
12865                     if (impIsThis(obj))
12866                     {
12867                         aflags |= CORINFO_ACCESS_THIS;
12868
12869                         // An optimization for Contextful classes:
12870                         // we unwrap the proxy when we have a 'this reference'
12871
12872                         if (info.compUnwrapContextful)
12873                         {
12874                             aflags |= CORINFO_ACCESS_UNWRAP;
12875                         }
12876                     }
12877                 }
12878
12879                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12880
12881                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12882                 // handle
12883                 CorInfoType ciType = fieldInfo.fieldType;
12884                 clsHnd             = fieldInfo.structType;
12885
12886                 lclTyp = JITtype2varType(ciType);
12887
12888 #ifdef _TARGET_AMD64
12889                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12890 #endif // _TARGET_AMD64
12891
12892                 if (compIsForInlining())
12893                 {
12894                     switch (fieldInfo.fieldAccessor)
12895                     {
12896                         case CORINFO_FIELD_INSTANCE_HELPER:
12897                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12898                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12899                         case CORINFO_FIELD_STATIC_TLS:
12900
12901                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12902                             return;
12903
12904                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12905 #if COR_JIT_EE_VERSION > 460
12906                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12907 #endif
12908                             /* We may be able to inline the field accessors in specific instantiations of generic
12909                              * methods */
12910                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12911                             return;
12912
12913                         default:
12914                             break;
12915                     }
12916
12917                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12918                         clsHnd)
12919                     {
12920                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12921                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12922                         {
12923                             // Loading a static valuetype field usually will cause a JitHelper to be called
12924                             // for the static base. This will bloat the code.
12925                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12926
12927                             if (compInlineResult->IsFailure())
12928                             {
12929                                 return;
12930                             }
12931                         }
12932                     }
12933                 }
12934
12935                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12936                 if (isLoadAddress)
12937                 {
12938                     tiRetVal.MakeByRef();
12939                 }
12940                 else
12941                 {
12942                     tiRetVal.NormaliseForStack();
12943                 }
12944
12945                 // Perform this check always to ensure that we get field access exceptions even with
12946                 // SkipVerification.
12947                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12948
12949                 if (tiVerificationNeeded)
12950                 {
12951                     // You can also pass the unboxed struct to  LDFLD
12952                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12953                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12954                     {
12955                         bAllowPlainValueTypeAsThis = TRUE;
12956                     }
12957
12958                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12959
12960                     // If we're doing this on a heap object or from a 'safe' byref
12961                     // then the result is a safe byref too
12962                     if (isLoadAddress) // load address
12963                     {
12964                         if (fieldInfo.fieldFlags &
12965                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12966                         {
12967                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12968                             {
12969                                 tiRetVal.SetIsPermanentHomeByRef();
12970                             }
12971                         }
12972                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12973                         {
12974                             // ldflda of byref is safe if done on a gc object or on  a
12975                             // safe byref
12976                             tiRetVal.SetIsPermanentHomeByRef();
12977                         }
12978                     }
12979                 }
12980                 else
12981                 {
12982                     // tiVerificationNeeded is false.
12983                     // Raise InvalidProgramException if static load accesses non-static field
12984                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12985                     {
12986                         BADCODE("static access on an instance field");
12987                     }
12988                 }
12989
12990                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12991                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12992                 {
12993                     if (obj->gtFlags & GTF_SIDE_EFFECT)
12994                     {
12995                         obj = gtUnusedValNode(obj);
12996                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12997                     }
12998                     obj = nullptr;
12999                 }
13000
13001                 /* Preserve 'small' int types */
13002                 if (lclTyp > TYP_INT)
13003                 {
13004                     lclTyp = genActualType(lclTyp);
13005                 }
13006
13007                 bool usesHelper = false;
13008
13009                 switch (fieldInfo.fieldAccessor)
13010                 {
13011                     case CORINFO_FIELD_INSTANCE:
13012 #ifdef FEATURE_READYTORUN_COMPILER
13013                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13014 #endif
13015                     {
13016                         bool nullcheckNeeded = false;
13017
13018                         obj = impCheckForNullPointer(obj);
13019
13020                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13021                         {
13022                             nullcheckNeeded = true;
13023                         }
13024
13025                         // If the object is a struct, what we really want is
13026                         // for the field to operate on the address of the struct.
13027                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13028                         {
13029                             assert(opcode == CEE_LDFLD && objType != nullptr);
13030
13031                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13032                         }
13033
13034                         /* Create the data member node */
13035                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13036
13037 #ifdef FEATURE_READYTORUN_COMPILER
13038                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13039                         {
13040                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13041                         }
13042 #endif
13043
13044                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13045
13046                         if (fgAddrCouldBeNull(obj))
13047                         {
13048                             op1->gtFlags |= GTF_EXCEPT;
13049                         }
13050
13051                         // If gtFldObj is a BYREF then our target is a value class and
13052                         // it could point anywhere, example a boxed class static int
13053                         if (obj->gtType == TYP_BYREF)
13054                         {
13055                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13056                         }
13057
13058                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13059                         if (StructHasOverlappingFields(typeFlags))
13060                         {
13061                             op1->gtField.gtFldMayOverlap = true;
13062                         }
13063
13064                         // wrap it in a address of operator if necessary
13065                         if (isLoadAddress)
13066                         {
13067                             op1 = gtNewOperNode(GT_ADDR,
13068                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13069                         }
13070                         else
13071                         {
13072                             if (compIsForInlining() &&
13073                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13074                                                                                    impInlineInfo->inlArgInfo))
13075                             {
13076                                 impInlineInfo->thisDereferencedFirst = true;
13077                             }
13078                         }
13079                     }
13080                     break;
13081
13082                     case CORINFO_FIELD_STATIC_TLS:
13083 #ifdef _TARGET_X86_
13084                         // Legacy TLS access is implemented as intrinsic on x86 only
13085
13086                         /* Create the data member node */
13087                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13088                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13089
13090                         if (isLoadAddress)
13091                         {
13092                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13093                         }
13094                         break;
13095 #else
13096                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13097
13098                         __fallthrough;
13099 #endif
13100
13101                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13102                     case CORINFO_FIELD_INSTANCE_HELPER:
13103                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13104                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13105                                                clsHnd, nullptr);
13106                         usesHelper = true;
13107                         break;
13108
13109                     case CORINFO_FIELD_STATIC_ADDRESS:
13110                         // Replace static read-only fields with constant if possible
13111                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13112                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13113                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13114                         {
13115                             CorInfoInitClassResult initClassResult =
13116                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13117                                                             impTokenLookupContextHandle);
13118
13119                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13120                             {
13121                                 void** pFldAddr = nullptr;
13122                                 void*  fldAddr =
13123                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13124
13125                                 // We should always be able to access this static's address directly
13126                                 assert(pFldAddr == nullptr);
13127
13128                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13129                                 goto FIELD_DONE;
13130                             }
13131                         }
13132
13133                         __fallthrough;
13134
13135                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13136                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13137                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13138 #if COR_JIT_EE_VERSION > 460
13139                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13140 #endif
13141                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13142                                                          lclTyp);
13143                         break;
13144
13145                     case CORINFO_FIELD_INTRINSIC_ZERO:
13146                     {
13147                         assert(aflags & CORINFO_ACCESS_GET);
13148                         op1 = gtNewIconNode(0, lclTyp);
13149                         goto FIELD_DONE;
13150                     }
13151                     break;
13152
13153                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13154                     {
13155                         assert(aflags & CORINFO_ACCESS_GET);
13156
13157                         LPVOID         pValue;
13158                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13159                         op1                = gtNewStringLiteralNode(iat, pValue);
13160                         goto FIELD_DONE;
13161                     }
13162                     break;
13163
13164                     default:
13165                         assert(!"Unexpected fieldAccessor");
13166                 }
13167
13168                 if (!isLoadAddress)
13169                 {
13170
13171                     if (prefixFlags & PREFIX_VOLATILE)
13172                     {
13173                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13174                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13175
13176                         if (!usesHelper)
13177                         {
13178                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13179                                    (op1->OperGet() == GT_OBJ));
13180                             op1->gtFlags |= GTF_IND_VOLATILE;
13181                         }
13182                     }
13183
13184                     if (prefixFlags & PREFIX_UNALIGNED)
13185                     {
13186                         if (!usesHelper)
13187                         {
13188                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13189                                    (op1->OperGet() == GT_OBJ));
13190                             op1->gtFlags |= GTF_IND_UNALIGNED;
13191                         }
13192                     }
13193                 }
13194
13195                 /* Check if the class needs explicit initialization */
13196
13197                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13198                 {
13199                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13200                     if (compDonotInline())
13201                     {
13202                         return;
13203                     }
13204                     if (helperNode != nullptr)
13205                     {
13206                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13207                     }
13208                 }
13209
13210             FIELD_DONE:
13211                 impPushOnStack(op1, tiRetVal);
13212             }
13213             break;
13214
13215             case CEE_STFLD:
13216             case CEE_STSFLD:
13217             {
13218
13219                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13220
13221                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13222
13223                 /* Get the CP_Fieldref index */
13224
13225                 assertImp(sz == sizeof(unsigned));
13226
13227                 _impResolveToken(CORINFO_TOKENKIND_Field);
13228
13229                 JITDUMP(" %08X", resolvedToken.token);
13230
13231                 int        aflags = CORINFO_ACCESS_SET;
13232                 GenTreePtr obj    = nullptr;
13233                 typeInfo*  tiObj  = nullptr;
13234                 typeInfo   tiVal;
13235
13236                 /* Pull the value from the stack */
13237                 op2    = impPopStack(tiVal);
13238                 clsHnd = tiVal.GetClassHandle();
13239
13240                 if (opcode == CEE_STFLD)
13241                 {
13242                     tiObj = &impStackTop().seTypeInfo;
13243                     obj   = impPopStack().val;
13244
13245                     if (impIsThis(obj))
13246                     {
13247                         aflags |= CORINFO_ACCESS_THIS;
13248
13249                         // An optimization for Contextful classes:
13250                         // we unwrap the proxy when we have a 'this reference'
13251
13252                         if (info.compUnwrapContextful)
13253                         {
13254                             aflags |= CORINFO_ACCESS_UNWRAP;
13255                         }
13256                     }
13257                 }
13258
13259                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13260
13261                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13262                 // handle
13263                 CorInfoType ciType = fieldInfo.fieldType;
13264                 fieldClsHnd        = fieldInfo.structType;
13265
13266                 lclTyp = JITtype2varType(ciType);
13267
13268                 if (compIsForInlining())
13269                 {
13270                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13271                      * per-inst static? */
13272
13273                     switch (fieldInfo.fieldAccessor)
13274                     {
13275                         case CORINFO_FIELD_INSTANCE_HELPER:
13276                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13277                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13278                         case CORINFO_FIELD_STATIC_TLS:
13279
13280                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13281                             return;
13282
13283                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13284 #if COR_JIT_EE_VERSION > 460
13285                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13286 #endif
13287
13288                             /* We may be able to inline the field accessors in specific instantiations of generic
13289                              * methods */
13290                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13291                             return;
13292
13293                         default:
13294                             break;
13295                     }
13296                 }
13297
13298                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13299
13300                 if (tiVerificationNeeded)
13301                 {
13302                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13303                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13304                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13305                 }
13306                 else
13307                 {
13308                     // tiVerificationNeed is false.
13309                     // Raise InvalidProgramException if static store accesses non-static field
13310                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13311                     {
13312                         BADCODE("static access on an instance field");
13313                     }
13314                 }
13315
13316                 // We are using stfld on a static field.
13317                 // We allow it, but need to eval any side-effects for obj
13318                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13319                 {
13320                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13321                     {
13322                         obj = gtUnusedValNode(obj);
13323                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13324                     }
13325                     obj = nullptr;
13326                 }
13327
13328                 /* Preserve 'small' int types */
13329                 if (lclTyp > TYP_INT)
13330                 {
13331                     lclTyp = genActualType(lclTyp);
13332                 }
13333
13334                 switch (fieldInfo.fieldAccessor)
13335                 {
13336                     case CORINFO_FIELD_INSTANCE:
13337 #ifdef FEATURE_READYTORUN_COMPILER
13338                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13339 #endif
13340                     {
13341                         obj = impCheckForNullPointer(obj);
13342
13343                         /* Create the data member node */
13344                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13345                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13346                         if (StructHasOverlappingFields(typeFlags))
13347                         {
13348                             op1->gtField.gtFldMayOverlap = true;
13349                         }
13350
13351 #ifdef FEATURE_READYTORUN_COMPILER
13352                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13353                         {
13354                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13355                         }
13356 #endif
13357
13358                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13359
13360                         if (fgAddrCouldBeNull(obj))
13361                         {
13362                             op1->gtFlags |= GTF_EXCEPT;
13363                         }
13364
13365                         // If gtFldObj is a BYREF then our target is a value class and
13366                         // it could point anywhere, example a boxed class static int
13367                         if (obj->gtType == TYP_BYREF)
13368                         {
13369                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13370                         }
13371
13372                         if (compIsForInlining() &&
13373                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13374                         {
13375                             impInlineInfo->thisDereferencedFirst = true;
13376                         }
13377                     }
13378                     break;
13379
13380                     case CORINFO_FIELD_STATIC_TLS:
13381 #ifdef _TARGET_X86_
13382                         // Legacy TLS access is implemented as intrinsic on x86 only
13383
13384                         /* Create the data member node */
13385                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13386                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13387
13388                         break;
13389 #else
13390                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13391
13392                         __fallthrough;
13393 #endif
13394
13395                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13396                     case CORINFO_FIELD_INSTANCE_HELPER:
13397                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13398                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13399                                                clsHnd, op2);
13400                         goto SPILL_APPEND;
13401
13402                     case CORINFO_FIELD_STATIC_ADDRESS:
13403                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13404                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13405                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13406 #if COR_JIT_EE_VERSION > 460
13407                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13408 #endif
13409                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13410                                                          lclTyp);
13411                         break;
13412
13413                     default:
13414                         assert(!"Unexpected fieldAccessor");
13415                 }
13416
13417                 // Create the member assignment, unless we have a struct.
13418                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13419                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13420
13421                 if (!deferStructAssign)
13422                 {
13423                     if (prefixFlags & PREFIX_VOLATILE)
13424                     {
13425                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13426                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13427                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13428                         op1->gtFlags |= GTF_IND_VOLATILE;
13429                     }
13430                     if (prefixFlags & PREFIX_UNALIGNED)
13431                     {
13432                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13433                         op1->gtFlags |= GTF_IND_UNALIGNED;
13434                     }
13435
13436                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13437                        trust
13438                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13439                        importation
13440                        and reads from the union as if it were a long during code generation. Though this can potentially
13441                        read garbage, one can get lucky to have this working correctly.
13442
13443                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13444                        /O2
13445                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13446                        on
13447                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13448                        correctly
13449                        always.
13450
13451                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13452                        V4.0.
13453                     */
13454                     CLANG_FORMAT_COMMENT_ANCHOR;
13455
13456 #ifdef _TARGET_X86_
13457                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13458                         varTypeIsLong(op1->TypeGet()))
13459                     {
13460                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13461                     }
13462 #endif
13463
13464 #ifdef _TARGET_64BIT_
13465                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13466                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13467                     {
13468                         op2->gtType = TYP_I_IMPL;
13469                     }
13470                     else
13471                     {
13472                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13473                         //
13474                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13475                         {
13476                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13477                         }
13478                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13479                         //
13480                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13481                         {
13482                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13483                         }
13484                     }
13485 #endif
13486
13487 #if !FEATURE_X87_DOUBLES
13488                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13489                     // We insert a cast to the dest 'op1' type
13490                     //
13491                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13492                         varTypeIsFloating(op2->gtType))
13493                     {
13494                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13495                     }
13496 #endif // !FEATURE_X87_DOUBLES
13497
13498                     op1 = gtNewAssignNode(op1, op2);
13499
13500                     /* Mark the expression as containing an assignment */
13501
13502                     op1->gtFlags |= GTF_ASG;
13503                 }
13504
13505                 /* Check if the class needs explicit initialization */
13506
13507                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13508                 {
13509                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13510                     if (compDonotInline())
13511                     {
13512                         return;
13513                     }
13514                     if (helperNode != nullptr)
13515                     {
13516                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13517                     }
13518                 }
13519
13520                 /* stfld can interfere with value classes (consider the sequence
13521                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13522                    spill all value class references from the stack. */
13523
13524                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13525                 {
13526                     assert(tiObj);
13527
13528                     if (impIsValueType(tiObj))
13529                     {
13530                         impSpillEvalStack();
13531                     }
13532                     else
13533                     {
13534                         impSpillValueClasses();
13535                     }
13536                 }
13537
13538                 /* Spill any refs to the same member from the stack */
13539
13540                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13541
13542                 /* stsfld also interferes with indirect accesses (for aliased
13543                    statics) and calls. But don't need to spill other statics
13544                    as we have explicitly spilled this particular static field. */
13545
13546                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13547
13548                 if (deferStructAssign)
13549                 {
13550                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13551                 }
13552             }
13553                 goto APPEND;
13554
13555             case CEE_NEWARR:
13556             {
13557
13558                 /* Get the class type index operand */
13559
13560                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13561
13562                 JITDUMP(" %08X", resolvedToken.token);
13563
13564                 if (!opts.IsReadyToRun())
13565                 {
13566                     // Need to restore array classes before creating array objects on the heap
13567                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13568                     if (op1 == nullptr)
13569                     { // compDonotInline()
13570                         return;
13571                     }
13572                 }
13573
13574                 if (tiVerificationNeeded)
13575                 {
13576                     // As per ECMA 'numElems' specified can be either int32 or native int.
13577                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13578
13579                     CORINFO_CLASS_HANDLE elemTypeHnd;
13580                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13581                     Verify(elemTypeHnd == nullptr ||
13582                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13583                            "array of byref-like type");
13584                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13585                 }
13586
13587                 accessAllowedResult =
13588                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13589                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13590
13591                 /* Form the arglist: array class handle, size */
13592                 op2 = impPopStack().val;
13593                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13594
13595 #ifdef FEATURE_READYTORUN_COMPILER
13596                 if (opts.IsReadyToRun())
13597                 {
13598                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13599                                                     gtNewArgList(op2));
13600                     usingReadyToRunHelper = (op1 != nullptr);
13601
13602                     if (!usingReadyToRunHelper)
13603                     {
13604                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13605                         // and the newarr call with a single call to a dynamic R2R cell that will:
13606                         //      1) Load the context
13607                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13608                         //      3) Allocate the new array
13609                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13610
13611                         // Need to restore array classes before creating array objects on the heap
13612                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13613                         if (op1 == nullptr)
13614                         { // compDonotInline()
13615                             return;
13616                         }
13617                     }
13618                 }
13619
13620                 if (!usingReadyToRunHelper)
13621 #endif
13622                 {
13623                     args = gtNewArgList(op1, op2);
13624
13625                     /* Create a call to 'new' */
13626
13627                     // Note that this only works for shared generic code because the same helper is used for all
13628                     // reference array types
13629                     op1 =
13630                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13631                 }
13632
13633                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13634
13635                 /* Remember that this basic block contains 'new' of an sd array */
13636
13637                 block->bbFlags |= BBF_HAS_NEWARRAY;
13638                 optMethodFlags |= OMF_HAS_NEWARRAY;
13639
13640                 /* Push the result of the call on the stack */
13641
13642                 impPushOnStack(op1, tiRetVal);
13643
13644                 callTyp = TYP_REF;
13645             }
13646             break;
13647
13648             case CEE_LOCALLOC:
13649                 assert(!compIsForInlining());
13650
13651                 if (tiVerificationNeeded)
13652                 {
13653                     Verify(false, "bad opcode");
13654                 }
13655
13656                 // We don't allow locallocs inside handlers
13657                 if (block->hasHndIndex())
13658                 {
13659                     BADCODE("Localloc can't be inside handler");
13660                 }
13661
13662                 /* The FP register may not be back to the original value at the end
13663                    of the method, even if the frame size is 0, as localloc may
13664                    have modified it. So we will HAVE to reset it */
13665
13666                 compLocallocUsed = true;
13667                 setNeedsGSSecurityCookie();
13668
13669                 // Get the size to allocate
13670
13671                 op2 = impPopStack().val;
13672                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13673
13674                 if (verCurrentState.esStackDepth != 0)
13675                 {
13676                     BADCODE("Localloc can only be used when the stack is empty");
13677                 }
13678
13679                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13680
13681                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13682
13683                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13684
13685                 impPushOnStack(op1, tiRetVal);
13686                 break;
13687
13688             case CEE_ISINST:
13689
13690                 /* Get the type token */
13691                 assertImp(sz == sizeof(unsigned));
13692
13693                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13694
13695                 JITDUMP(" %08X", resolvedToken.token);
13696
13697                 if (!opts.IsReadyToRun())
13698                 {
13699                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13700                     if (op2 == nullptr)
13701                     { // compDonotInline()
13702                         return;
13703                     }
13704                 }
13705
13706                 if (tiVerificationNeeded)
13707                 {
13708                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13709                     // Even if this is a value class, we know it is boxed.
13710                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13711                 }
13712                 accessAllowedResult =
13713                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13714                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13715
13716                 op1 = impPopStack().val;
13717
13718 #ifdef FEATURE_READYTORUN_COMPILER
13719                 if (opts.IsReadyToRun())
13720                 {
13721                     GenTreePtr opLookup =
13722                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13723                                                   gtNewArgList(op1));
13724                     usingReadyToRunHelper = (opLookup != nullptr);
13725                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13726
13727                     if (!usingReadyToRunHelper)
13728                     {
13729                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13730                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13731                         //      1) Load the context
13732                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13733                         //      3) Perform the 'is instance' check on the input object
13734                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13735
13736                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13737                         if (op2 == nullptr)
13738                         { // compDonotInline()
13739                             return;
13740                         }
13741                     }
13742                 }
13743
13744                 if (!usingReadyToRunHelper)
13745 #endif
13746                 {
13747                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13748                 }
13749                 if (compDonotInline())
13750                 {
13751                     return;
13752                 }
13753
13754                 impPushOnStack(op1, tiRetVal);
13755
13756                 break;
13757
13758             case CEE_REFANYVAL:
13759
13760                 // get the class handle and make a ICON node out of it
13761
13762                 _impResolveToken(CORINFO_TOKENKIND_Class);
13763
13764                 JITDUMP(" %08X", resolvedToken.token);
13765
13766                 op2 = impTokenToHandle(&resolvedToken);
13767                 if (op2 == nullptr)
13768                 { // compDonotInline()
13769                     return;
13770                 }
13771
13772                 if (tiVerificationNeeded)
13773                 {
13774                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13775                            "need refany");
13776                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13777                 }
13778
13779                 op1 = impPopStack().val;
13780                 // make certain it is normalized;
13781                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13782
13783                 // Call helper GETREFANY(classHandle, op1);
13784                 args = gtNewArgList(op2, op1);
13785                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13786
13787                 impPushOnStack(op1, tiRetVal);
13788                 break;
13789
13790             case CEE_REFANYTYPE:
13791
13792                 if (tiVerificationNeeded)
13793                 {
13794                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13795                            "need refany");
13796                 }
13797
13798                 op1 = impPopStack().val;
13799
13800                 // make certain it is normalized;
13801                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13802
13803                 if (op1->gtOper == GT_OBJ)
13804                 {
13805                     // Get the address of the refany
13806                     op1 = op1->gtOp.gtOp1;
13807
13808                     // Fetch the type from the correct slot
13809                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13810                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13811                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13812                 }
13813                 else
13814                 {
13815                     assertImp(op1->gtOper == GT_MKREFANY);
13816
13817                     // The pointer may have side-effects
13818                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13819                     {
13820                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13821 #ifdef DEBUG
13822                         impNoteLastILoffs();
13823 #endif
13824                     }
13825
13826                     // We already have the class handle
13827                     op1 = op1->gtOp.gtOp2;
13828                 }
13829
13830                 // convert native TypeHandle to RuntimeTypeHandle
13831                 {
13832                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13833
13834                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13835                                               helperArgs);
13836
13837                     // The handle struct is returned in register
13838                     op1->gtCall.gtReturnType = TYP_REF;
13839
13840                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13841                 }
13842
13843                 impPushOnStack(op1, tiRetVal);
13844                 break;
13845
13846             case CEE_LDTOKEN:
13847             {
13848                 /* Get the Class index */
13849                 assertImp(sz == sizeof(unsigned));
13850                 lastLoadToken = codeAddr;
13851                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13852
13853                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13854
13855                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13856                 if (op1 == nullptr)
13857                 { // compDonotInline()
13858                     return;
13859                 }
13860
13861                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13862                 assert(resolvedToken.hClass != nullptr);
13863
13864                 if (resolvedToken.hMethod != nullptr)
13865                 {
13866                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13867                 }
13868                 else if (resolvedToken.hField != nullptr)
13869                 {
13870                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13871                 }
13872
13873                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13874
13875                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13876
13877                 // The handle struct is returned in register
13878                 op1->gtCall.gtReturnType = TYP_REF;
13879
13880                 tiRetVal = verMakeTypeInfo(tokenType);
13881                 impPushOnStack(op1, tiRetVal);
13882             }
13883             break;
13884
13885             case CEE_UNBOX:
13886             case CEE_UNBOX_ANY:
13887             {
13888                 /* Get the Class index */
13889                 assertImp(sz == sizeof(unsigned));
13890
13891                 _impResolveToken(CORINFO_TOKENKIND_Class);
13892
13893                 JITDUMP(" %08X", resolvedToken.token);
13894
13895                 BOOL runtimeLookup;
13896                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13897                 if (op2 == nullptr)
13898                 { // compDonotInline()
13899                     return;
13900                 }
13901
13902                 // Run this always so we can get access exceptions even with SkipVerification.
13903                 accessAllowedResult =
13904                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13905                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13906
13907                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13908                 {
13909                     if (tiVerificationNeeded)
13910                     {
13911                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13912                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13913                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13914                         tiRetVal.NormaliseForStack();
13915                     }
13916                     op1 = impPopStack().val;
13917                     goto CASTCLASS;
13918                 }
13919
13920                 /* Pop the object and create the unbox helper call */
13921                 /* You might think that for UNBOX_ANY we need to push a different */
13922                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13923                 /* for the intermediate pointer which we then transfer onto the OBJ */
13924                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13925                 if (tiVerificationNeeded)
13926                 {
13927                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13928                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13929
13930                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13931                     Verify(tiRetVal.IsValueClass(), "not value class");
13932                     tiRetVal.MakeByRef();
13933
13934                     // We always come from an objref, so this is safe byref
13935                     tiRetVal.SetIsPermanentHomeByRef();
13936                     tiRetVal.SetIsReadonlyByRef();
13937                 }
13938
13939                 op1 = impPopStack().val;
13940                 assertImp(op1->gtType == TYP_REF);
13941
13942                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13943                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13944
13945                 // We only want to expand inline the normal UNBOX helper;
13946                 expandInline = (helper == CORINFO_HELP_UNBOX);
13947
13948                 if (expandInline)
13949                 {
13950                     if (compCurBB->isRunRarely())
13951                     {
13952                         expandInline = false; // not worth the code expansion
13953                     }
13954                 }
13955
13956                 if (expandInline)
13957                 {
13958                     // we are doing normal unboxing
13959                     // inline the common case of the unbox helper
13960                     // UNBOX(exp) morphs into
13961                     // clone = pop(exp);
13962                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13963                     // push(clone + sizeof(void*))
13964                     //
13965                     GenTreePtr cloneOperand;
13966                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13967                                        nullptr DEBUGARG("inline UNBOX clone1"));
13968                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13969
13970                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13971
13972                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13973                                        nullptr DEBUGARG("inline UNBOX clone2"));
13974                     op2 = impTokenToHandle(&resolvedToken);
13975                     if (op2 == nullptr)
13976                     { // compDonotInline()
13977                         return;
13978                     }
13979                     args = gtNewArgList(op2, op1);
13980                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13981
13982                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13983                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13984                     condBox->gtFlags |= GTF_RELOP_QMARK;
13985
13986                     // QMARK nodes cannot reside on the evaluation stack. Because there
13987                     // may be other trees on the evaluation stack that side-effect the
13988                     // sources of the UNBOX operation we must spill the stack.
13989
13990                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13991
13992                     // Create the address-expression to reference past the object header
13993                     // to the beginning of the value-type. Today this means adjusting
13994                     // past the base of the objects vtable field which is pointer sized.
13995
13996                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13997                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13998                 }
13999                 else
14000                 {
14001                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14002
14003                     // Don't optimize, just call the helper and be done with it
14004                     args = gtNewArgList(op2, op1);
14005                     op1  = gtNewHelperCallNode(helper,
14006                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14007                                               callFlags, args);
14008                 }
14009
14010                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14011                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14012                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14013                        );
14014
14015                 /*
14016                   ----------------------------------------------------------------------
14017                   | \ helper  |                         |                              |
14018                   |   \       |                         |                              |
14019                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14020                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14021                   | opcode  \ |                         |                              |
14022                   |---------------------------------------------------------------------
14023                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14024                   |           |                         | push the BYREF to this local |
14025                   |---------------------------------------------------------------------
14026                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14027                   |           | the BYREF               | For Linux when the           |
14028                   |           |                         |  struct is returned in two   |
14029                   |           |                         |  registers create a temp     |
14030                   |           |                         |  which address is passed to  |
14031                   |           |                         |  the unbox_nullable helper.  |
14032                   |---------------------------------------------------------------------
14033                 */
14034
14035                 if (opcode == CEE_UNBOX)
14036                 {
14037                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14038                     {
14039                         // Unbox nullable helper returns a struct type.
14040                         // We need to spill it to a temp so than can take the address of it.
14041                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14042                         // further along and potetially be exploitable.
14043
14044                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14045                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14046
14047                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14048                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14049                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14050
14051                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14052                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14053                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14054                     }
14055
14056                     assert(op1->gtType == TYP_BYREF);
14057                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14058                 }
14059                 else
14060                 {
14061                     assert(opcode == CEE_UNBOX_ANY);
14062
14063                     if (helper == CORINFO_HELP_UNBOX)
14064                     {
14065                         // Normal unbox helper returns a TYP_BYREF.
14066                         impPushOnStack(op1, tiRetVal);
14067                         oper = GT_OBJ;
14068                         goto OBJ;
14069                     }
14070
14071                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14072
14073 #if FEATURE_MULTIREG_RET
14074
14075                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14076                     {
14077                         // Unbox nullable helper returns a TYP_STRUCT.
14078                         // For the multi-reg case we need to spill it to a temp so that
14079                         // we can pass the address to the unbox_nullable jit helper.
14080
14081                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14082                         lvaTable[tmp].lvIsMultiRegArg = true;
14083                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14084
14085                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14086                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14087                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14088
14089                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14090                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14091                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14092
14093                         // In this case the return value of the unbox helper is TYP_BYREF.
14094                         // Make sure the right type is placed on the operand type stack.
14095                         impPushOnStack(op1, tiRetVal);
14096
14097                         // Load the struct.
14098                         oper = GT_OBJ;
14099
14100                         assert(op1->gtType == TYP_BYREF);
14101                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14102
14103                         goto OBJ;
14104                     }
14105                     else
14106
14107 #endif // !FEATURE_MULTIREG_RET
14108
14109                     {
14110                         // If non register passable struct we have it materialized in the RetBuf.
14111                         assert(op1->gtType == TYP_STRUCT);
14112                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14113                         assert(tiRetVal.IsValueClass());
14114                     }
14115                 }
14116
14117                 impPushOnStack(op1, tiRetVal);
14118             }
14119             break;
14120
14121             case CEE_BOX:
14122             {
14123                 /* Get the Class index */
14124                 assertImp(sz == sizeof(unsigned));
14125
14126                 _impResolveToken(CORINFO_TOKENKIND_Box);
14127
14128                 JITDUMP(" %08X", resolvedToken.token);
14129
14130                 if (tiVerificationNeeded)
14131                 {
14132                     typeInfo tiActual = impStackTop().seTypeInfo;
14133                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14134
14135                     Verify(verIsBoxable(tiBox), "boxable type expected");
14136
14137                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14138                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14139                            "boxed type has unsatisfied class constraints");
14140
14141                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14142
14143                     // Observation: the following code introduces a boxed value class on the stack, but,
14144                     // according to the ECMA spec, one would simply expect: tiRetVal =
14145                     // typeInfo(TI_REF,impGetObjectClass());
14146
14147                     // Push the result back on the stack,
14148                     // even if clsHnd is a value class we want the TI_REF
14149                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14150                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14151                 }
14152
14153                 accessAllowedResult =
14154                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14155                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14156
14157                 // Note BOX can be used on things that are not value classes, in which
14158                 // case we get a NOP.  However the verifier's view of the type on the
14159                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14160                 if (!eeIsValueClass(resolvedToken.hClass))
14161                 {
14162                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14163                     break;
14164                 }
14165
14166                 // Look ahead for unbox.any
14167                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14168                 {
14169                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14170                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14171                     {
14172                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14173
14174                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14175
14176                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14177                         {
14178                             // Skip the next unbox.any instruction
14179                             sz += sizeof(mdToken) + 1;
14180                             break;
14181                         }
14182                     }
14183                 }
14184
14185                 impImportAndPushBox(&resolvedToken);
14186                 if (compDonotInline())
14187                 {
14188                     return;
14189                 }
14190             }
14191             break;
14192
14193             case CEE_SIZEOF:
14194
14195                 /* Get the Class index */
14196                 assertImp(sz == sizeof(unsigned));
14197
14198                 _impResolveToken(CORINFO_TOKENKIND_Class);
14199
14200                 JITDUMP(" %08X", resolvedToken.token);
14201
14202                 if (tiVerificationNeeded)
14203                 {
14204                     tiRetVal = typeInfo(TI_INT);
14205                 }
14206
14207                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14208                 impPushOnStack(op1, tiRetVal);
14209                 break;
14210
14211             case CEE_CASTCLASS:
14212
14213                 /* Get the Class index */
14214
14215                 assertImp(sz == sizeof(unsigned));
14216
14217                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14218
14219                 JITDUMP(" %08X", resolvedToken.token);
14220
14221                 if (!opts.IsReadyToRun())
14222                 {
14223                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14224                     if (op2 == nullptr)
14225                     { // compDonotInline()
14226                         return;
14227                     }
14228                 }
14229
14230                 if (tiVerificationNeeded)
14231                 {
14232                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14233                     // box it
14234                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14235                 }
14236
14237                 accessAllowedResult =
14238                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14239                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14240
14241                 op1 = impPopStack().val;
14242
14243             /* Pop the address and create the 'checked cast' helper call */
14244
14245             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14246             // and op2 to contain code that creates the type handle corresponding to typeRef
14247             CASTCLASS:
14248
14249 #ifdef FEATURE_READYTORUN_COMPILER
14250                 if (opts.IsReadyToRun())
14251                 {
14252                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14253                                                                     TYP_REF, gtNewArgList(op1));
14254                     usingReadyToRunHelper = (opLookup != nullptr);
14255                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14256
14257                     if (!usingReadyToRunHelper)
14258                     {
14259                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14260                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14261                         //      1) Load the context
14262                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14263                         //      3) Check the object on the stack for the type-cast
14264                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14265
14266                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14267                         if (op2 == nullptr)
14268                         { // compDonotInline()
14269                             return;
14270                         }
14271                     }
14272                 }
14273
14274                 if (!usingReadyToRunHelper)
14275 #endif
14276                 {
14277                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14278                 }
14279                 if (compDonotInline())
14280                 {
14281                     return;
14282                 }
14283
14284                 /* Push the result back on the stack */
14285                 impPushOnStack(op1, tiRetVal);
14286                 break;
14287
14288             case CEE_THROW:
14289
14290                 if (compIsForInlining())
14291                 {
14292                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14293                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14294                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14295
14296                     /* Do we have just the exception on the stack ?*/
14297
14298                     if (verCurrentState.esStackDepth != 1)
14299                     {
14300                         /* if not, just don't inline the method */
14301
14302                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14303                         return;
14304                     }
14305                 }
14306
14307                 if (tiVerificationNeeded)
14308                 {
14309                     tiRetVal = impStackTop().seTypeInfo;
14310                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14311                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14312                     {
14313                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14314                     }
14315                 }
14316
14317                 block->bbSetRunRarely(); // any block with a throw is rare
14318                 /* Pop the exception object and create the 'throw' helper call */
14319
14320                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14321
14322             EVAL_APPEND:
14323                 if (verCurrentState.esStackDepth > 0)
14324                 {
14325                     impEvalSideEffects();
14326                 }
14327
14328                 assert(verCurrentState.esStackDepth == 0);
14329
14330                 goto APPEND;
14331
14332             case CEE_RETHROW:
14333
14334                 assert(!compIsForInlining());
14335
14336                 if (info.compXcptnsCount == 0)
14337                 {
14338                     BADCODE("rethrow outside catch");
14339                 }
14340
14341                 if (tiVerificationNeeded)
14342                 {
14343                     Verify(block->hasHndIndex(), "rethrow outside catch");
14344                     if (block->hasHndIndex())
14345                     {
14346                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14347                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14348                         if (HBtab->HasFilter())
14349                         {
14350                             // we better be in the handler clause part, not the filter part
14351                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14352                                    "rethrow in filter");
14353                         }
14354                     }
14355                 }
14356
14357                 /* Create the 'rethrow' helper call */
14358
14359                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14360
14361                 goto EVAL_APPEND;
14362
14363             case CEE_INITOBJ:
14364
14365                 assertImp(sz == sizeof(unsigned));
14366
14367                 _impResolveToken(CORINFO_TOKENKIND_Class);
14368
14369                 JITDUMP(" %08X", resolvedToken.token);
14370
14371                 if (tiVerificationNeeded)
14372                 {
14373                     typeInfo tiTo    = impStackTop().seTypeInfo;
14374                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14375
14376                     Verify(tiTo.IsByRef(), "byref expected");
14377                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14378
14379                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14380                            "type operand incompatible with type of address");
14381                 }
14382
14383                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14384                 op2  = gtNewIconNode(0);                                     // Value
14385                 op1  = impPopStack().val;                                    // Dest
14386                 op1  = gtNewBlockVal(op1, size);
14387                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14388                 goto SPILL_APPEND;
14389
14390             case CEE_INITBLK:
14391
14392                 if (tiVerificationNeeded)
14393                 {
14394                     Verify(false, "bad opcode");
14395                 }
14396
14397                 op3 = impPopStack().val; // Size
14398                 op2 = impPopStack().val; // Value
14399                 op1 = impPopStack().val; // Dest
14400
14401                 if (op3->IsCnsIntOrI())
14402                 {
14403                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14404                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14405                 }
14406                 else
14407                 {
14408                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14409                     size = 0;
14410                 }
14411                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14412
14413                 goto SPILL_APPEND;
14414
14415             case CEE_CPBLK:
14416
14417                 if (tiVerificationNeeded)
14418                 {
14419                     Verify(false, "bad opcode");
14420                 }
14421                 op3 = impPopStack().val; // Size
14422                 op2 = impPopStack().val; // Src
14423                 op1 = impPopStack().val; // Dest
14424
14425                 if (op3->IsCnsIntOrI())
14426                 {
14427                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14428                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14429                 }
14430                 else
14431                 {
14432                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14433                     size = 0;
14434                 }
14435                 if (op2->OperGet() == GT_ADDR)
14436                 {
14437                     op2 = op2->gtOp.gtOp1;
14438                 }
14439                 else
14440                 {
14441                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14442                 }
14443
14444                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14445                 goto SPILL_APPEND;
14446
14447             case CEE_CPOBJ:
14448
14449                 assertImp(sz == sizeof(unsigned));
14450
14451                 _impResolveToken(CORINFO_TOKENKIND_Class);
14452
14453                 JITDUMP(" %08X", resolvedToken.token);
14454
14455                 if (tiVerificationNeeded)
14456                 {
14457                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14458                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14459                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14460
14461                     Verify(tiFrom.IsByRef(), "expected byref source");
14462                     Verify(tiTo.IsByRef(), "expected byref destination");
14463
14464                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14465                            "type of source address incompatible with type operand");
14466                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14467                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14468                            "type operand incompatible with type of destination address");
14469                 }
14470
14471                 if (!eeIsValueClass(resolvedToken.hClass))
14472                 {
14473                     op1 = impPopStack().val; // address to load from
14474
14475                     impBashVarAddrsToI(op1);
14476
14477                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14478
14479                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14480                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14481
14482                     impPushOnStackNoType(op1);
14483                     opcode = CEE_STIND_REF;
14484                     lclTyp = TYP_REF;
14485                     goto STIND_POST_VERIFY;
14486                 }
14487
14488                 op2 = impPopStack().val; // Src
14489                 op1 = impPopStack().val; // Dest
14490                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14491                 goto SPILL_APPEND;
14492
14493             case CEE_STOBJ:
14494             {
14495                 assertImp(sz == sizeof(unsigned));
14496
14497                 _impResolveToken(CORINFO_TOKENKIND_Class);
14498
14499                 JITDUMP(" %08X", resolvedToken.token);
14500
14501                 if (eeIsValueClass(resolvedToken.hClass))
14502                 {
14503                     lclTyp = TYP_STRUCT;
14504                 }
14505                 else
14506                 {
14507                     lclTyp = TYP_REF;
14508                 }
14509
14510                 if (tiVerificationNeeded)
14511                 {
14512
14513                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14514
14515                     // Make sure we have a good looking byref
14516                     Verify(tiPtr.IsByRef(), "pointer not byref");
14517                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14518                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14519                     {
14520                         compUnsafeCastUsed = true;
14521                     }
14522
14523                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14524                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14525
14526                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14527                     {
14528                         Verify(false, "type of value incompatible with type operand");
14529                         compUnsafeCastUsed = true;
14530                     }
14531
14532                     if (!tiCompatibleWith(argVal, ptrVal, false))
14533                     {
14534                         Verify(false, "type operand incompatible with type of address");
14535                         compUnsafeCastUsed = true;
14536                     }
14537                 }
14538                 else
14539                 {
14540                     compUnsafeCastUsed = true;
14541                 }
14542
14543                 if (lclTyp == TYP_REF)
14544                 {
14545                     opcode = CEE_STIND_REF;
14546                     goto STIND_POST_VERIFY;
14547                 }
14548
14549                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14550                 if (impIsPrimitive(jitTyp))
14551                 {
14552                     lclTyp = JITtype2varType(jitTyp);
14553                     goto STIND_POST_VERIFY;
14554                 }
14555
14556                 op2 = impPopStack().val; // Value
14557                 op1 = impPopStack().val; // Ptr
14558
14559                 assertImp(varTypeIsStruct(op2));
14560
14561                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14562                 goto SPILL_APPEND;
14563             }
14564
14565             case CEE_MKREFANY:
14566
14567                 assert(!compIsForInlining());
14568
14569                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14570                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14571
14572                 JITDUMP("disabling struct promotion because of mkrefany\n");
14573                 fgNoStructPromotion = true;
14574
14575                 oper = GT_MKREFANY;
14576                 assertImp(sz == sizeof(unsigned));
14577
14578                 _impResolveToken(CORINFO_TOKENKIND_Class);
14579
14580                 JITDUMP(" %08X", resolvedToken.token);
14581
14582                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14583                 if (op2 == nullptr)
14584                 { // compDonotInline()
14585                     return;
14586                 }
14587
14588                 if (tiVerificationNeeded)
14589                 {
14590                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14591                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14592
14593                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14594                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14595                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14596                 }
14597
14598                 accessAllowedResult =
14599                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14600                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14601
14602                 op1 = impPopStack().val;
14603
14604                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14605                 // But JIT32 allowed it, so we continue to allow it.
14606                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14607
14608                 // MKREFANY returns a struct.  op2 is the class token.
14609                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14610
14611                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14612                 break;
14613
14614             case CEE_LDOBJ:
14615             {
14616                 oper = GT_OBJ;
14617                 assertImp(sz == sizeof(unsigned));
14618
14619                 _impResolveToken(CORINFO_TOKENKIND_Class);
14620
14621                 JITDUMP(" %08X", resolvedToken.token);
14622
14623             OBJ:
14624
14625                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14626
14627                 if (tiVerificationNeeded)
14628                 {
14629                     typeInfo tiPtr = impStackTop().seTypeInfo;
14630
14631                     // Make sure we have a byref
14632                     if (!tiPtr.IsByRef())
14633                     {
14634                         Verify(false, "pointer not byref");
14635                         compUnsafeCastUsed = true;
14636                     }
14637                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14638
14639                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14640                     {
14641                         Verify(false, "type of address incompatible with type operand");
14642                         compUnsafeCastUsed = true;
14643                     }
14644                     tiRetVal.NormaliseForStack();
14645                 }
14646                 else
14647                 {
14648                     compUnsafeCastUsed = true;
14649                 }
14650
14651                 if (eeIsValueClass(resolvedToken.hClass))
14652                 {
14653                     lclTyp = TYP_STRUCT;
14654                 }
14655                 else
14656                 {
14657                     lclTyp = TYP_REF;
14658                     opcode = CEE_LDIND_REF;
14659                     goto LDIND_POST_VERIFY;
14660                 }
14661
14662                 op1 = impPopStack().val;
14663
14664                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14665
14666                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14667                 if (impIsPrimitive(jitTyp))
14668                 {
14669                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14670
14671                     // Could point anywhere, example a boxed class static int
14672                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14673                     assertImp(varTypeIsArithmetic(op1->gtType));
14674                 }
14675                 else
14676                 {
14677                     // OBJ returns a struct
14678                     // and an inline argument which is the class token of the loaded obj
14679                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14680                 }
14681                 op1->gtFlags |= GTF_EXCEPT;
14682
14683                 impPushOnStack(op1, tiRetVal);
14684                 break;
14685             }
14686
14687             case CEE_LDLEN:
14688                 if (tiVerificationNeeded)
14689                 {
14690                     typeInfo tiArray = impStackTop().seTypeInfo;
14691                     Verify(verIsSDArray(tiArray), "bad array");
14692                     tiRetVal = typeInfo(TI_INT);
14693                 }
14694
14695                 op1 = impPopStack().val;
14696                 if (!opts.MinOpts() && !opts.compDbgCode)
14697                 {
14698                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14699                     GenTreeArrLen* arrLen =
14700                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14701
14702                     /* Mark the block as containing a length expression */
14703
14704                     if (op1->gtOper == GT_LCL_VAR)
14705                     {
14706                         block->bbFlags |= BBF_HAS_IDX_LEN;
14707                     }
14708
14709                     op1 = arrLen;
14710                 }
14711                 else
14712                 {
14713                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14714                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14715                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14716                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14717                     op1->gtFlags |= GTF_IND_ARR_LEN;
14718                 }
14719
14720                 /* An indirection will cause a GPF if the address is null */
14721                 op1->gtFlags |= GTF_EXCEPT;
14722
14723                 /* Push the result back on the stack */
14724                 impPushOnStack(op1, tiRetVal);
14725                 break;
14726
14727             case CEE_BREAK:
14728                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14729                 goto SPILL_APPEND;
14730
14731             case CEE_NOP:
14732                 if (opts.compDbgCode)
14733                 {
14734                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14735                     goto SPILL_APPEND;
14736                 }
14737                 break;
14738
14739             /******************************** NYI *******************************/
14740
14741             case 0xCC:
14742                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14743
14744             case CEE_ILLEGAL:
14745             case CEE_MACRO_END:
14746
14747             default:
14748                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14749         }
14750
14751         codeAddr += sz;
14752         prevOpcode = opcode;
14753
14754         prefixFlags = 0;
14755         assert(!insertLdloc || opcode == CEE_DUP);
14756     }
14757
14758     assert(!insertLdloc);
14759
14760     return;
14761 #undef _impResolveToken
14762 }
14763 #ifdef _PREFAST_
14764 #pragma warning(pop)
14765 #endif
14766
14767 // Push a local/argument treeon the operand stack
14768 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14769 {
14770     tiRetVal.NormaliseForStack();
14771
14772     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14773     {
14774         tiRetVal.SetUninitialisedObjRef();
14775     }
14776
14777     impPushOnStack(op, tiRetVal);
14778 }
14779
14780 // Load a local/argument on the operand stack
14781 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14782 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14783 {
14784     var_types lclTyp;
14785
14786     if (lvaTable[lclNum].lvNormalizeOnLoad())
14787     {
14788         lclTyp = lvaGetRealType(lclNum);
14789     }
14790     else
14791     {
14792         lclTyp = lvaGetActualType(lclNum);
14793     }
14794
14795     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14796 }
14797
14798 // Load an argument on the operand stack
14799 // Shared by the various CEE_LDARG opcodes
14800 // ilArgNum is the argument index as specified in IL.
14801 // It will be mapped to the correct lvaTable index
14802 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14803 {
14804     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14805
14806     if (compIsForInlining())
14807     {
14808         if (ilArgNum >= info.compArgsCount)
14809         {
14810             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14811             return;
14812         }
14813
14814         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14815                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14816     }
14817     else
14818     {
14819         if (ilArgNum >= info.compArgsCount)
14820         {
14821             BADCODE("Bad IL");
14822         }
14823
14824         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14825
14826         if (lclNum == info.compThisArg)
14827         {
14828             lclNum = lvaArg0Var;
14829         }
14830
14831         impLoadVar(lclNum, offset);
14832     }
14833 }
14834
14835 // Load a local on the operand stack
14836 // Shared by the various CEE_LDLOC opcodes
14837 // ilLclNum is the local index as specified in IL.
14838 // It will be mapped to the correct lvaTable index
14839 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14840 {
14841     if (tiVerificationNeeded)
14842     {
14843         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14844         Verify(info.compInitMem, "initLocals not set");
14845     }
14846
14847     if (compIsForInlining())
14848     {
14849         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14850         {
14851             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14852             return;
14853         }
14854
14855         // Get the local type
14856         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14857
14858         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14859
14860         /* Have we allocated a temp for this local? */
14861
14862         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14863
14864         // All vars of inlined methods should be !lvNormalizeOnLoad()
14865
14866         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14867         lclTyp = genActualType(lclTyp);
14868
14869         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14870     }
14871     else
14872     {
14873         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14874         {
14875             BADCODE("Bad IL");
14876         }
14877
14878         unsigned lclNum = info.compArgsCount + ilLclNum;
14879
14880         impLoadVar(lclNum, offset);
14881     }
14882 }
14883
14884 #ifdef _TARGET_ARM_
14885 /**************************************************************************************
14886  *
14887  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14888  *  dst struct, because struct promotion will turn it into a float/double variable while
14889  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14890  *  a float, but there is nothing that might prevent us from doing so. The tree however
14891  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14892  *
14893  *  tmpNum - the lcl dst variable num that is a struct.
14894  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14895  *  hClass - the type handle for the struct variable.
14896  *
14897  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14898  *        however, we could do a codegen of transferring from int to float registers
14899  *        (transfer, not a cast.)
14900  *
14901  */
14902 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14903 {
14904     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14905     {
14906         int       hfaSlots = GetHfaCount(hClass);
14907         var_types hfaType  = GetHfaType(hClass);
14908
14909         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14910         // type: struct/float at importer because the ABI calls out return in integer registers.
14911         // We don't want struct promotion to replace an expression like this:
14912         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14913         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14914         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14915             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14916         {
14917             // Make sure this struct type stays as struct so we can receive the call in a struct.
14918             lvaTable[tmpNum].lvIsMultiRegRet = true;
14919         }
14920     }
14921 }
14922 #endif // _TARGET_ARM_
14923
14924 #if FEATURE_MULTIREG_RET
14925 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14926 {
14927     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14928     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14929     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14930
14931     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14932     ret->gtFlags |= GTF_DONT_CSE;
14933
14934     assert(IsMultiRegReturnedType(hClass));
14935
14936     // Mark the var so that fields are not promoted and stay together.
14937     lvaTable[tmpNum].lvIsMultiRegRet = true;
14938
14939     return ret;
14940 }
14941 #endif // FEATURE_MULTIREG_RET
14942
14943 // do import for a return
14944 // returns false if inlining was aborted
14945 // opcode can be ret or call in the case of a tail.call
14946 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14947 {
14948     if (tiVerificationNeeded)
14949     {
14950         verVerifyThisPtrInitialised();
14951
14952         unsigned expectedStack = 0;
14953         if (info.compRetType != TYP_VOID)
14954         {
14955             typeInfo tiVal = impStackTop().seTypeInfo;
14956             typeInfo tiDeclared =
14957                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14958
14959             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14960
14961             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14962             expectedStack = 1;
14963         }
14964         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14965     }
14966
14967     GenTree*             op2       = nullptr;
14968     GenTree*             op1       = nullptr;
14969     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14970
14971     if (info.compRetType != TYP_VOID)
14972     {
14973         StackEntry se = impPopStack(retClsHnd);
14974         op2           = se.val;
14975
14976         if (!compIsForInlining())
14977         {
14978             impBashVarAddrsToI(op2);
14979             op2 = impImplicitIorI4Cast(op2, info.compRetType);
14980             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14981             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14982                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14983                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14984                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14985                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14986
14987 #ifdef DEBUG
14988             if (opts.compGcChecks && info.compRetType == TYP_REF)
14989             {
14990                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
14991                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14992                 // one-return BB.
14993
14994                 assert(op2->gtType == TYP_REF);
14995
14996                 // confirm that the argument is a GC pointer (for debugging (GC stress))
14997                 GenTreeArgList* args = gtNewArgList(op2);
14998                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14999
15000                 if (verbose)
15001                 {
15002                     printf("\ncompGcChecks tree:\n");
15003                     gtDispTree(op2);
15004                 }
15005             }
15006 #endif
15007         }
15008         else
15009         {
15010             // inlinee's stack should be empty now.
15011             assert(verCurrentState.esStackDepth == 0);
15012
15013 #ifdef DEBUG
15014             if (verbose)
15015             {
15016                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15017                 gtDispTree(op2);
15018             }
15019 #endif
15020
15021             // Make sure the type matches the original call.
15022
15023             var_types returnType       = genActualType(op2->gtType);
15024             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15025             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15026             {
15027                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15028             }
15029
15030             if (returnType != originalCallType)
15031             {
15032                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15033                 return false;
15034             }
15035
15036             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15037             // expression. At this point, retExpr could already be set if there are multiple
15038             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15039             // the other blocks already set it. If there is only a single return block,
15040             // retExpr shouldn't be set. However, this is not true if we reimport a block
15041             // with a return. In that case, retExpr will be set, then the block will be
15042             // reimported, but retExpr won't get cleared as part of setting the block to
15043             // be reimported. The reimported retExpr value should be the same, so even if
15044             // we don't unconditionally overwrite it, it shouldn't matter.
15045             if (info.compRetNativeType != TYP_STRUCT)
15046             {
15047                 // compRetNativeType is not TYP_STRUCT.
15048                 // This implies it could be either a scalar type or SIMD vector type or
15049                 // a struct type that can be normalized to a scalar type.
15050
15051                 if (varTypeIsStruct(info.compRetType))
15052                 {
15053                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15054                     // adjust the type away from struct to integral
15055                     // and no normalizing
15056                     op2 = impFixupStructReturnType(op2, retClsHnd);
15057                 }
15058                 else
15059                 {
15060                     // Do we have to normalize?
15061                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15062                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15063                         fgCastNeeded(op2, fncRealRetType))
15064                     {
15065                         // Small-typed return values are normalized by the callee
15066                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15067                     }
15068                 }
15069
15070                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15071                 {
15072                     assert(info.compRetNativeType != TYP_VOID &&
15073                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15074
15075                     // This is a bit of a workaround...
15076                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15077                     // not a struct (for example, the struct is composed of exactly one int, and the native
15078                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15079                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15080                     // to the *native* return type), and at least one of the return blocks is the result of
15081                     // a call, then we have a problem. The situation is like this (from a failed test case):
15082                     //
15083                     // inliner:
15084                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15085                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15086                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15087                     //
15088                     // inlinee:
15089                     //      ...
15090                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15091                     //      ret
15092                     //      ...
15093                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15094                     //      object&, class System.Func`1<!!0>)
15095                     //      ret
15096                     //
15097                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15098                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15099                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15100                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15101                     //
15102                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15103                     // native return type, which is what it will be set to eventually. We generate the
15104                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15105                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15106
15107                     bool restoreType = false;
15108                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15109                     {
15110                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15111                         op2->gtType = info.compRetNativeType;
15112                         restoreType = true;
15113                     }
15114
15115                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15116                                      (unsigned)CHECK_SPILL_ALL);
15117
15118                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15119
15120                     if (restoreType)
15121                     {
15122                         op2->gtType = TYP_STRUCT; // restore it to what it was
15123                     }
15124
15125                     op2 = tmpOp2;
15126
15127 #ifdef DEBUG
15128                     if (impInlineInfo->retExpr)
15129                     {
15130                         // Some other block(s) have seen the CEE_RET first.
15131                         // Better they spilled to the same temp.
15132                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15133                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15134                     }
15135 #endif
15136                 }
15137
15138 #ifdef DEBUG
15139                 if (verbose)
15140                 {
15141                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15142                     gtDispTree(op2);
15143                 }
15144 #endif
15145
15146                 // Report the return expression
15147                 impInlineInfo->retExpr = op2;
15148             }
15149             else
15150             {
15151                 // compRetNativeType is TYP_STRUCT.
15152                 // This implies that struct return via RetBuf arg or multi-reg struct return
15153
15154                 GenTreePtr iciCall = impInlineInfo->iciCall;
15155                 assert(iciCall->gtOper == GT_CALL);
15156
15157                 // Assign the inlinee return into a spill temp.
15158                 // spill temp only exists if there are multiple return points
15159                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15160                 {
15161                     // in this case we have to insert multiple struct copies to the temp
15162                     // and the retexpr is just the temp.
15163                     assert(info.compRetNativeType != TYP_VOID);
15164                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15165
15166                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15167                                      (unsigned)CHECK_SPILL_ALL);
15168                 }
15169
15170 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15171 #if defined(_TARGET_ARM_)
15172                 // TODO-ARM64-NYI: HFA
15173                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15174                 // next ifdefs could be refactored in a single method with the ifdef inside.
15175                 if (IsHfa(retClsHnd))
15176                 {
15177 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15178 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15179                 ReturnTypeDesc retTypeDesc;
15180                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15181                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15182
15183                 if (retRegCount != 0)
15184                 {
15185                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15186                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15187                     // max allowed.)
15188                     assert(retRegCount == MAX_RET_REG_COUNT);
15189                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15190                     CLANG_FORMAT_COMMENT_ANCHOR;
15191 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15192
15193                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15194                     {
15195                         if (!impInlineInfo->retExpr)
15196                         {
15197 #if defined(_TARGET_ARM_)
15198                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15199 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15200                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15201                             impInlineInfo->retExpr =
15202                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15203 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15204                         }
15205                     }
15206                     else
15207                     {
15208                         impInlineInfo->retExpr = op2;
15209                     }
15210                 }
15211                 else
15212 #elif defined(_TARGET_ARM64_)
15213                 ReturnTypeDesc retTypeDesc;
15214                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15215                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15216
15217                 if (retRegCount != 0)
15218                 {
15219                     assert(!iciCall->AsCall()->HasRetBufArg());
15220                     assert(retRegCount >= 2);
15221                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15222                     {
15223                         if (!impInlineInfo->retExpr)
15224                         {
15225                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15226                             impInlineInfo->retExpr =
15227                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15228                         }
15229                     }
15230                     else
15231                     {
15232                         impInlineInfo->retExpr = op2;
15233                     }
15234                 }
15235                 else
15236 #endif // defined(_TARGET_ARM64_)
15237                 {
15238                     assert(iciCall->AsCall()->HasRetBufArg());
15239                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15240                     // spill temp only exists if there are multiple return points
15241                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15242                     {
15243                         // if this is the first return we have seen set the retExpr
15244                         if (!impInlineInfo->retExpr)
15245                         {
15246                             impInlineInfo->retExpr =
15247                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15248                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15249                         }
15250                     }
15251                     else
15252                     {
15253                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15254                     }
15255                 }
15256             }
15257         }
15258     }
15259
15260     if (compIsForInlining())
15261     {
15262         return true;
15263     }
15264
15265     if (info.compRetType == TYP_VOID)
15266     {
15267         // return void
15268         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15269     }
15270     else if (info.compRetBuffArg != BAD_VAR_NUM)
15271     {
15272         // Assign value to return buff (first param)
15273         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15274
15275         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15276         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15277
15278         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15279         CLANG_FORMAT_COMMENT_ANCHOR;
15280
15281 #if defined(_TARGET_AMD64_)
15282
15283         // x64 (System V and Win64) calling convention requires to
15284         // return the implicit return buffer explicitly (in RAX).
15285         // Change the return type to be BYREF.
15286         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15287 #else  // !defined(_TARGET_AMD64_)
15288         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15289         // In such case the return value of the function is changed to BYREF.
15290         // If profiler hook is not needed the return type of the function is TYP_VOID.
15291         if (compIsProfilerHookNeeded())
15292         {
15293             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15294         }
15295         else
15296         {
15297             // return void
15298             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15299         }
15300 #endif // !defined(_TARGET_AMD64_)
15301     }
15302     else if (varTypeIsStruct(info.compRetType))
15303     {
15304 #if !FEATURE_MULTIREG_RET
15305         // For both ARM architectures the HFA native types are maintained as structs.
15306         // Also on System V AMD64 the multireg structs returns are also left as structs.
15307         noway_assert(info.compRetNativeType != TYP_STRUCT);
15308 #endif
15309         op2 = impFixupStructReturnType(op2, retClsHnd);
15310         // return op2
15311         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15312     }
15313     else
15314     {
15315         // return op2
15316         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15317     }
15318
15319     // We must have imported a tailcall and jumped to RET
15320     if (prefixFlags & PREFIX_TAILCALL)
15321     {
15322 #ifndef _TARGET_AMD64_
15323         // Jit64 compat:
15324         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15325         //      tail.call
15326         //      pop
15327         //      ret
15328         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15329 #endif
15330
15331         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15332
15333         // impImportCall() would have already appended TYP_VOID calls
15334         if (info.compRetType == TYP_VOID)
15335         {
15336             return true;
15337         }
15338     }
15339
15340     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15341 #ifdef DEBUG
15342     // Remember at which BC offset the tree was finished
15343     impNoteLastILoffs();
15344 #endif
15345     return true;
15346 }
15347
15348 /*****************************************************************************
15349  *  Mark the block as unimported.
15350  *  Note that the caller is responsible for calling impImportBlockPending(),
15351  *  with the appropriate stack-state
15352  */
15353
15354 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15355 {
15356 #ifdef DEBUG
15357     if (verbose && (block->bbFlags & BBF_IMPORTED))
15358     {
15359         printf("\nBB%02u will be reimported\n", block->bbNum);
15360     }
15361 #endif
15362
15363     block->bbFlags &= ~BBF_IMPORTED;
15364 }
15365
15366 /*****************************************************************************
15367  *  Mark the successors of the given block as unimported.
15368  *  Note that the caller is responsible for calling impImportBlockPending()
15369  *  for all the successors, with the appropriate stack-state.
15370  */
15371
15372 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15373 {
15374     for (unsigned i = 0; i < block->NumSucc(); i++)
15375     {
15376         impReimportMarkBlock(block->GetSucc(i));
15377     }
15378 }
15379
15380 /*****************************************************************************
15381  *
15382  *  Filter wrapper to handle only passed in exception code
15383  *  from it).
15384  */
15385
15386 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15387 {
15388     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15389     {
15390         return EXCEPTION_EXECUTE_HANDLER;
15391     }
15392
15393     return EXCEPTION_CONTINUE_SEARCH;
15394 }
15395
15396 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15397 {
15398     assert(block->hasTryIndex());
15399     assert(!compIsForInlining());
15400
15401     unsigned  tryIndex = block->getTryIndex();
15402     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15403
15404     if (isTryStart)
15405     {
15406         assert(block->bbFlags & BBF_TRY_BEG);
15407
15408         // The Stack must be empty
15409         //
15410         if (block->bbStkDepth != 0)
15411         {
15412             BADCODE("Evaluation stack must be empty on entry into a try block");
15413         }
15414     }
15415
15416     // Save the stack contents, we'll need to restore it later
15417     //
15418     SavedStack blockState;
15419     impSaveStackState(&blockState, false);
15420
15421     while (HBtab != nullptr)
15422     {
15423         if (isTryStart)
15424         {
15425             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15426             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15427             //
15428             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15429             {
15430                 // We  trigger an invalid program exception here unless we have a try/fault region.
15431                 //
15432                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15433                 {
15434                     BADCODE(
15435                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15436                 }
15437                 else
15438                 {
15439                     // Allow a try/fault region to proceed.
15440                     assert(HBtab->HasFaultHandler());
15441                 }
15442             }
15443
15444             /* Recursively process the handler block */
15445             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15446
15447             //  Construct the proper verification stack state
15448             //   either empty or one that contains just
15449             //   the Exception Object that we are dealing with
15450             //
15451             verCurrentState.esStackDepth = 0;
15452
15453             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15454             {
15455                 CORINFO_CLASS_HANDLE clsHnd;
15456
15457                 if (HBtab->HasFilter())
15458                 {
15459                     clsHnd = impGetObjectClass();
15460                 }
15461                 else
15462                 {
15463                     CORINFO_RESOLVED_TOKEN resolvedToken;
15464
15465                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15466                     resolvedToken.tokenScope   = info.compScopeHnd;
15467                     resolvedToken.token        = HBtab->ebdTyp;
15468                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15469                     info.compCompHnd->resolveToken(&resolvedToken);
15470
15471                     clsHnd = resolvedToken.hClass;
15472                 }
15473
15474                 // push catch arg the stack, spill to a temp if necessary
15475                 // Note: can update HBtab->ebdHndBeg!
15476                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15477             }
15478
15479             // Queue up the handler for importing
15480             //
15481             impImportBlockPending(hndBegBB);
15482
15483             if (HBtab->HasFilter())
15484             {
15485                 /* @VERIFICATION : Ideally the end of filter state should get
15486                    propagated to the catch handler, this is an incompleteness,
15487                    but is not a security/compliance issue, since the only
15488                    interesting state is the 'thisInit' state.
15489                    */
15490
15491                 verCurrentState.esStackDepth = 0;
15492
15493                 BasicBlock* filterBB = HBtab->ebdFilter;
15494
15495                 // push catch arg the stack, spill to a temp if necessary
15496                 // Note: can update HBtab->ebdFilter!
15497                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15498
15499                 impImportBlockPending(filterBB);
15500             }
15501         }
15502         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15503         {
15504             /* Recursively process the handler block */
15505
15506             verCurrentState.esStackDepth = 0;
15507
15508             // Queue up the fault handler for importing
15509             //
15510             impImportBlockPending(HBtab->ebdHndBeg);
15511         }
15512
15513         // Now process our enclosing try index (if any)
15514         //
15515         tryIndex = HBtab->ebdEnclosingTryIndex;
15516         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15517         {
15518             HBtab = nullptr;
15519         }
15520         else
15521         {
15522             HBtab = ehGetDsc(tryIndex);
15523         }
15524     }
15525
15526     // Restore the stack contents
15527     impRestoreStackState(&blockState);
15528 }
15529
15530 //***************************************************************
15531 // Import the instructions for the given basic block.  Perform
15532 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15533 // time, or whose verification pre-state is changed.
15534
15535 #ifdef _PREFAST_
15536 #pragma warning(push)
15537 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15538 #endif
15539 void Compiler::impImportBlock(BasicBlock* block)
15540 {
15541     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15542     // handle them specially. In particular, there is no IL to import for them, but we do need
15543     // to mark them as imported and put their successors on the pending import list.
15544     if (block->bbFlags & BBF_INTERNAL)
15545     {
15546         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15547         block->bbFlags |= BBF_IMPORTED;
15548
15549         for (unsigned i = 0; i < block->NumSucc(); i++)
15550         {
15551             impImportBlockPending(block->GetSucc(i));
15552         }
15553
15554         return;
15555     }
15556
15557     bool markImport;
15558
15559     assert(block);
15560
15561     /* Make the block globaly available */
15562
15563     compCurBB = block;
15564
15565 #ifdef DEBUG
15566     /* Initialize the debug variables */
15567     impCurOpcName = "unknown";
15568     impCurOpcOffs = block->bbCodeOffs;
15569 #endif
15570
15571     /* Set the current stack state to the merged result */
15572     verResetCurrentState(block, &verCurrentState);
15573
15574     /* Now walk the code and import the IL into GenTrees */
15575
15576     struct FilterVerificationExceptionsParam
15577     {
15578         Compiler*   pThis;
15579         BasicBlock* block;
15580     };
15581     FilterVerificationExceptionsParam param;
15582
15583     param.pThis = this;
15584     param.block = block;
15585
15586     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15587     {
15588         /* @VERIFICATION : For now, the only state propagation from try
15589            to it's handler is "thisInit" state (stack is empty at start of try).
15590            In general, for state that we track in verification, we need to
15591            model the possibility that an exception might happen at any IL
15592            instruction, so we really need to merge all states that obtain
15593            between IL instructions in a try block into the start states of
15594            all handlers.
15595
15596            However we do not allow the 'this' pointer to be uninitialized when
15597            entering most kinds try regions (only try/fault are allowed to have
15598            an uninitialized this pointer on entry to the try)
15599
15600            Fortunately, the stack is thrown away when an exception
15601            leads to a handler, so we don't have to worry about that.
15602            We DO, however, have to worry about the "thisInit" state.
15603            But only for the try/fault case.
15604
15605            The only allowed transition is from TIS_Uninit to TIS_Init.
15606
15607            So for a try/fault region for the fault handler block
15608            we will merge the start state of the try begin
15609            and the post-state of each block that is part of this try region
15610         */
15611
15612         // merge the start state of the try begin
15613         //
15614         if (pParam->block->bbFlags & BBF_TRY_BEG)
15615         {
15616             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15617         }
15618
15619         pParam->pThis->impImportBlockCode(pParam->block);
15620
15621         // As discussed above:
15622         // merge the post-state of each block that is part of this try region
15623         //
15624         if (pParam->block->hasTryIndex())
15625         {
15626             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15627         }
15628     }
15629     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15630     {
15631         verHandleVerificationFailure(block DEBUGARG(false));
15632     }
15633     PAL_ENDTRY
15634
15635     if (compDonotInline())
15636     {
15637         return;
15638     }
15639
15640     assert(!compDonotInline());
15641
15642     markImport = false;
15643
15644 SPILLSTACK:
15645
15646     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15647     bool        reimportSpillClique = false;
15648     BasicBlock* tgtBlock            = nullptr;
15649
15650     /* If the stack is non-empty, we might have to spill its contents */
15651
15652     if (verCurrentState.esStackDepth != 0)
15653     {
15654         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15655                                   // on the stack, its lifetime is hard to determine, simply
15656                                   // don't reuse such temps.
15657
15658         GenTreePtr addStmt = nullptr;
15659
15660         /* Do the successors of 'block' have any other predecessors ?
15661            We do not want to do some of the optimizations related to multiRef
15662            if we can reimport blocks */
15663
15664         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15665
15666         switch (block->bbJumpKind)
15667         {
15668             case BBJ_COND:
15669
15670                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15671
15672                 assert(impTreeLast);
15673                 assert(impTreeLast->gtOper == GT_STMT);
15674                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15675
15676                 addStmt     = impTreeLast;
15677                 impTreeLast = impTreeLast->gtPrev;
15678
15679                 /* Note if the next block has more than one ancestor */
15680
15681                 multRef |= block->bbNext->bbRefs;
15682
15683                 /* Does the next block have temps assigned? */
15684
15685                 baseTmp  = block->bbNext->bbStkTempsIn;
15686                 tgtBlock = block->bbNext;
15687
15688                 if (baseTmp != NO_BASE_TMP)
15689                 {
15690                     break;
15691                 }
15692
15693                 /* Try the target of the jump then */
15694
15695                 multRef |= block->bbJumpDest->bbRefs;
15696                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15697                 tgtBlock = block->bbJumpDest;
15698                 break;
15699
15700             case BBJ_ALWAYS:
15701                 multRef |= block->bbJumpDest->bbRefs;
15702                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15703                 tgtBlock = block->bbJumpDest;
15704                 break;
15705
15706             case BBJ_NONE:
15707                 multRef |= block->bbNext->bbRefs;
15708                 baseTmp  = block->bbNext->bbStkTempsIn;
15709                 tgtBlock = block->bbNext;
15710                 break;
15711
15712             case BBJ_SWITCH:
15713
15714                 BasicBlock** jmpTab;
15715                 unsigned     jmpCnt;
15716
15717                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15718
15719                 assert(impTreeLast);
15720                 assert(impTreeLast->gtOper == GT_STMT);
15721                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15722
15723                 addStmt     = impTreeLast;
15724                 impTreeLast = impTreeLast->gtPrev;
15725
15726                 jmpCnt = block->bbJumpSwt->bbsCount;
15727                 jmpTab = block->bbJumpSwt->bbsDstTab;
15728
15729                 do
15730                 {
15731                     tgtBlock = (*jmpTab);
15732
15733                     multRef |= tgtBlock->bbRefs;
15734
15735                     // Thanks to spill cliques, we should have assigned all or none
15736                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15737                     baseTmp = tgtBlock->bbStkTempsIn;
15738                     if (multRef > 1)
15739                     {
15740                         break;
15741                     }
15742                 } while (++jmpTab, --jmpCnt);
15743
15744                 break;
15745
15746             case BBJ_CALLFINALLY:
15747             case BBJ_EHCATCHRET:
15748             case BBJ_RETURN:
15749             case BBJ_EHFINALLYRET:
15750             case BBJ_EHFILTERRET:
15751             case BBJ_THROW:
15752                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15753                 break;
15754
15755             default:
15756                 noway_assert(!"Unexpected bbJumpKind");
15757                 break;
15758         }
15759
15760         assert(multRef >= 1);
15761
15762         /* Do we have a base temp number? */
15763
15764         bool newTemps = (baseTmp == NO_BASE_TMP);
15765
15766         if (newTemps)
15767         {
15768             /* Grab enough temps for the whole stack */
15769             baseTmp = impGetSpillTmpBase(block);
15770         }
15771
15772         /* Spill all stack entries into temps */
15773         unsigned level, tempNum;
15774
15775         JITDUMP("\nSpilling stack entries into temps\n");
15776         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15777         {
15778             GenTreePtr tree = verCurrentState.esStack[level].val;
15779
15780             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15781                the other. This should merge to a byref in unverifiable code.
15782                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15783                successor would be imported assuming there was a TYP_I_IMPL on
15784                the stack. Thus the value would not get GC-tracked. Hence,
15785                change the temp to TYP_BYREF and reimport the successors.
15786                Note: We should only allow this in unverifiable code.
15787             */
15788             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15789             {
15790                 lvaTable[tempNum].lvType = TYP_BYREF;
15791                 impReimportMarkSuccessors(block);
15792                 markImport = true;
15793             }
15794
15795 #ifdef _TARGET_64BIT_
15796             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15797             {
15798                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15799                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15800                 {
15801                     // Merge the current state into the entry state of block;
15802                     // the call to verMergeEntryStates must have changed
15803                     // the entry state of the block by merging the int local var
15804                     // and the native-int stack entry.
15805                     bool changed = false;
15806                     if (verMergeEntryStates(tgtBlock, &changed))
15807                     {
15808                         impRetypeEntryStateTemps(tgtBlock);
15809                         impReimportBlockPending(tgtBlock);
15810                         assert(changed);
15811                     }
15812                     else
15813                     {
15814                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15815                         break;
15816                     }
15817                 }
15818
15819                 // Some other block in the spill clique set this to "int", but now we have "native int".
15820                 // Change the type and go back to re-import any blocks that used the wrong type.
15821                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15822                 reimportSpillClique      = true;
15823             }
15824             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15825             {
15826                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15827                 // Insert a sign-extension to "native int" so we match the clique.
15828                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15829             }
15830
15831             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15832             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15833             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15834             // behavior instead of asserting and then generating bad code (where we save/restore the
15835             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15836             // imported already, we need to change the type of the local and reimport the spill clique.
15837             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15838             // the 'byref' size.
15839             if (!tiVerificationNeeded)
15840             {
15841                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15842                 {
15843                     // Some other block in the spill clique set this to "int", but now we have "byref".
15844                     // Change the type and go back to re-import any blocks that used the wrong type.
15845                     lvaTable[tempNum].lvType = TYP_BYREF;
15846                     reimportSpillClique      = true;
15847                 }
15848                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15849                 {
15850                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15851                     // Insert a sign-extension to "native int" so we match the clique size.
15852                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15853                 }
15854             }
15855 #endif // _TARGET_64BIT_
15856
15857 #if FEATURE_X87_DOUBLES
15858             // X87 stack doesn't differentiate between float/double
15859             // so promoting is no big deal.
15860             // For everybody else keep it as float until we have a collision and then promote
15861             // Just like for x64's TYP_INT<->TYP_I_IMPL
15862
15863             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15864             {
15865                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15866             }
15867
15868 #else // !FEATURE_X87_DOUBLES
15869
15870             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15871             {
15872                 // Some other block in the spill clique set this to "float", but now we have "double".
15873                 // Change the type and go back to re-import any blocks that used the wrong type.
15874                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15875                 reimportSpillClique      = true;
15876             }
15877             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15878             {
15879                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15880                 // Insert a cast to "double" so we match the clique.
15881                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15882             }
15883
15884 #endif // FEATURE_X87_DOUBLES
15885
15886             /* If addStmt has a reference to tempNum (can only happen if we
15887                are spilling to the temps already used by a previous block),
15888                we need to spill addStmt */
15889
15890             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15891             {
15892                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15893
15894                 if (addTree->gtOper == GT_JTRUE)
15895                 {
15896                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15897                     assert(relOp->OperIsCompare());
15898
15899                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15900
15901                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15902                     {
15903                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15904                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15905                         type              = genActualType(lvaTable[temp].TypeGet());
15906                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15907                     }
15908
15909                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15910                     {
15911                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15912                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15913                         type              = genActualType(lvaTable[temp].TypeGet());
15914                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15915                     }
15916                 }
15917                 else
15918                 {
15919                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15920
15921                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15922                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15923                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15924                 }
15925             }
15926
15927             /* Spill the stack entry, and replace with the temp */
15928
15929             if (!impSpillStackEntry(level, tempNum
15930 #ifdef DEBUG
15931                                     ,
15932                                     true, "Spill Stack Entry"
15933 #endif
15934                                     ))
15935             {
15936                 if (markImport)
15937                 {
15938                     BADCODE("bad stack state");
15939                 }
15940
15941                 // Oops. Something went wrong when spilling. Bad code.
15942                 verHandleVerificationFailure(block DEBUGARG(true));
15943
15944                 goto SPILLSTACK;
15945             }
15946         }
15947
15948         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15949
15950         if (addStmt)
15951         {
15952             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15953         }
15954     }
15955
15956     // Some of the append/spill logic works on compCurBB
15957
15958     assert(compCurBB == block);
15959
15960     /* Save the tree list in the block */
15961     impEndTreeList(block);
15962
15963     // impEndTreeList sets BBF_IMPORTED on the block
15964     // We do *NOT* want to set it later than this because
15965     // impReimportSpillClique might clear it if this block is both a
15966     // predecessor and successor in the current spill clique
15967     assert(block->bbFlags & BBF_IMPORTED);
15968
15969     // If we had a int/native int, or float/double collision, we need to re-import
15970     if (reimportSpillClique)
15971     {
15972         // This will re-import all the successors of block (as well as each of their predecessors)
15973         impReimportSpillClique(block);
15974
15975         // For blocks that haven't been imported yet, we still need to mark them as pending import.
15976         for (unsigned i = 0; i < block->NumSucc(); i++)
15977         {
15978             BasicBlock* succ = block->GetSucc(i);
15979             if ((succ->bbFlags & BBF_IMPORTED) == 0)
15980             {
15981                 impImportBlockPending(succ);
15982             }
15983         }
15984     }
15985     else // the normal case
15986     {
15987         // otherwise just import the successors of block
15988
15989         /* Does this block jump to any other blocks? */
15990         for (unsigned i = 0; i < block->NumSucc(); i++)
15991         {
15992             impImportBlockPending(block->GetSucc(i));
15993         }
15994     }
15995 }
15996 #ifdef _PREFAST_
15997 #pragma warning(pop)
15998 #endif
15999
16000 /*****************************************************************************/
16001 //
16002 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16003 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16004 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16005 // (its "pre-state").
16006
16007 void Compiler::impImportBlockPending(BasicBlock* block)
16008 {
16009 #ifdef DEBUG
16010     if (verbose)
16011     {
16012         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16013     }
16014 #endif
16015
16016     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16017     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16018     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16019
16020     // If the block has not been imported, add to pending set.
16021     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16022
16023     // Initialize bbEntryState just the first time we try to add this block to the pending list
16024     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16025     // We use NULL to indicate the 'common' state to avoid memory allocation
16026     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16027         (impGetPendingBlockMember(block) == 0))
16028     {
16029         verInitBBEntryState(block, &verCurrentState);
16030         assert(block->bbStkDepth == 0);
16031         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16032         assert(addToPending);
16033         assert(impGetPendingBlockMember(block) == 0);
16034     }
16035     else
16036     {
16037         // The stack should have the same height on entry to the block from all its predecessors.
16038         if (block->bbStkDepth != verCurrentState.esStackDepth)
16039         {
16040 #ifdef DEBUG
16041             char buffer[400];
16042             sprintf_s(buffer, sizeof(buffer),
16043                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16044                       "Previous depth was %d, current depth is %d",
16045                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16046                       verCurrentState.esStackDepth);
16047             buffer[400 - 1] = 0;
16048             NO_WAY(buffer);
16049 #else
16050             NO_WAY("Block entered with different stack depths");
16051 #endif
16052         }
16053
16054         // Additionally, if we need to verify, merge the verification state.
16055         if (tiVerificationNeeded)
16056         {
16057             // Merge the current state into the entry state of block; if this does not change the entry state
16058             // by merging, do not add the block to the pending-list.
16059             bool changed = false;
16060             if (!verMergeEntryStates(block, &changed))
16061             {
16062                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16063                 addToPending = true; // We will pop it off, and check the flag set above.
16064             }
16065             else if (changed)
16066             {
16067                 addToPending = true;
16068
16069                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16070             }
16071         }
16072
16073         if (!addToPending)
16074         {
16075             return;
16076         }
16077
16078         if (block->bbStkDepth > 0)
16079         {
16080             // We need to fix the types of any spill temps that might have changed:
16081             //   int->native int, float->double, int->byref, etc.
16082             impRetypeEntryStateTemps(block);
16083         }
16084
16085         // OK, we must add to the pending list, if it's not already in it.
16086         if (impGetPendingBlockMember(block) != 0)
16087         {
16088             return;
16089         }
16090     }
16091
16092     // Get an entry to add to the pending list
16093
16094     PendingDsc* dsc;
16095
16096     if (impPendingFree)
16097     {
16098         // We can reuse one of the freed up dscs.
16099         dsc            = impPendingFree;
16100         impPendingFree = dsc->pdNext;
16101     }
16102     else
16103     {
16104         // We have to create a new dsc
16105         dsc = new (this, CMK_Unknown) PendingDsc;
16106     }
16107
16108     dsc->pdBB                 = block;
16109     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16110     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16111
16112     // Save the stack trees for later
16113
16114     if (verCurrentState.esStackDepth)
16115     {
16116         impSaveStackState(&dsc->pdSavedStack, false);
16117     }
16118
16119     // Add the entry to the pending list
16120
16121     dsc->pdNext    = impPendingList;
16122     impPendingList = dsc;
16123     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16124
16125     // Various assertions require us to now to consider the block as not imported (at least for
16126     // the final time...)
16127     block->bbFlags &= ~BBF_IMPORTED;
16128
16129 #ifdef DEBUG
16130     if (verbose && 0)
16131     {
16132         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16133     }
16134 #endif
16135 }
16136
16137 /*****************************************************************************/
16138 //
16139 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16140 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16141 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16142
16143 void Compiler::impReimportBlockPending(BasicBlock* block)
16144 {
16145     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16146
16147     assert(block->bbFlags & BBF_IMPORTED);
16148
16149     // OK, we must add to the pending list, if it's not already in it.
16150     if (impGetPendingBlockMember(block) != 0)
16151     {
16152         return;
16153     }
16154
16155     // Get an entry to add to the pending list
16156
16157     PendingDsc* dsc;
16158
16159     if (impPendingFree)
16160     {
16161         // We can reuse one of the freed up dscs.
16162         dsc            = impPendingFree;
16163         impPendingFree = dsc->pdNext;
16164     }
16165     else
16166     {
16167         // We have to create a new dsc
16168         dsc = new (this, CMK_ImpStack) PendingDsc;
16169     }
16170
16171     dsc->pdBB = block;
16172
16173     if (block->bbEntryState)
16174     {
16175         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16176         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16177         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16178     }
16179     else
16180     {
16181         dsc->pdThisPtrInit        = TIS_Bottom;
16182         dsc->pdSavedStack.ssDepth = 0;
16183         dsc->pdSavedStack.ssTrees = nullptr;
16184     }
16185
16186     // Add the entry to the pending list
16187
16188     dsc->pdNext    = impPendingList;
16189     impPendingList = dsc;
16190     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16191
16192     // Various assertions require us to now to consider the block as not imported (at least for
16193     // the final time...)
16194     block->bbFlags &= ~BBF_IMPORTED;
16195
16196 #ifdef DEBUG
16197     if (verbose && 0)
16198     {
16199         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16200     }
16201 #endif
16202 }
16203
16204 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16205 {
16206     if (comp->impBlockListNodeFreeList == nullptr)
16207     {
16208         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16209     }
16210     else
16211     {
16212         BlockListNode* res             = comp->impBlockListNodeFreeList;
16213         comp->impBlockListNodeFreeList = res->m_next;
16214         return res;
16215     }
16216 }
16217
16218 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16219 {
16220     node->m_next             = impBlockListNodeFreeList;
16221     impBlockListNodeFreeList = node;
16222 }
16223
16224 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16225 {
16226     bool toDo = true;
16227
16228     noway_assert(!fgComputePredsDone);
16229     if (!fgCheapPredsValid)
16230     {
16231         fgComputeCheapPreds();
16232     }
16233
16234     BlockListNode* succCliqueToDo = nullptr;
16235     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16236     while (toDo)
16237     {
16238         toDo = false;
16239         // Look at the successors of every member of the predecessor to-do list.
16240         while (predCliqueToDo != nullptr)
16241         {
16242             BlockListNode* node = predCliqueToDo;
16243             predCliqueToDo      = node->m_next;
16244             BasicBlock* blk     = node->m_blk;
16245             FreeBlockListNode(node);
16246
16247             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16248             {
16249                 BasicBlock* succ = blk->GetSucc(succNum);
16250                 // If it's not already in the clique, add it, and also add it
16251                 // as a member of the successor "toDo" set.
16252                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16253                 {
16254                     callback->Visit(SpillCliqueSucc, succ);
16255                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16256                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16257                     toDo           = true;
16258                 }
16259             }
16260         }
16261         // Look at the predecessors of every member of the successor to-do list.
16262         while (succCliqueToDo != nullptr)
16263         {
16264             BlockListNode* node = succCliqueToDo;
16265             succCliqueToDo      = node->m_next;
16266             BasicBlock* blk     = node->m_blk;
16267             FreeBlockListNode(node);
16268
16269             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16270             {
16271                 BasicBlock* predBlock = pred->block;
16272                 // If it's not already in the clique, add it, and also add it
16273                 // as a member of the predecessor "toDo" set.
16274                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16275                 {
16276                     callback->Visit(SpillCliquePred, predBlock);
16277                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16278                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16279                     toDo           = true;
16280                 }
16281             }
16282         }
16283     }
16284
16285     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16286     // miss walking back to include the predecessor we started from.
16287     // This most likely cause: missing or out of date bbPreds
16288     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16289 }
16290
16291 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16292 {
16293     if (predOrSucc == SpillCliqueSucc)
16294     {
16295         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16296         blk->bbStkTempsIn = m_baseTmp;
16297     }
16298     else
16299     {
16300         assert(predOrSucc == SpillCliquePred);
16301         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16302         blk->bbStkTempsOut = m_baseTmp;
16303     }
16304 }
16305
16306 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16307 {
16308     // For Preds we could be a little smarter and just find the existing store
16309     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16310     // just re-import the whole block (just like we do for successors)
16311
16312     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16313     {
16314         // If we haven't imported this block and we're not going to (because it isn't on
16315         // the pending list) then just ignore it for now.
16316
16317         // This block has either never been imported (EntryState == NULL) or it failed
16318         // verification. Neither state requires us to force it to be imported now.
16319         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16320         return;
16321     }
16322
16323     // For successors we have a valid verCurrentState, so just mark them for reimport
16324     // the 'normal' way
16325     // Unlike predecessors, we *DO* need to reimport the current block because the
16326     // initial import had the wrong entry state types.
16327     // Similarly, blocks that are currently on the pending list, still need to call
16328     // impImportBlockPending to fixup their entry state.
16329     if (predOrSucc == SpillCliqueSucc)
16330     {
16331         m_pComp->impReimportMarkBlock(blk);
16332
16333         // Set the current stack state to that of the blk->bbEntryState
16334         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16335         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16336
16337         m_pComp->impImportBlockPending(blk);
16338     }
16339     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16340     {
16341         // As described above, we are only visiting predecessors so they can
16342         // add the appropriate casts, since we have already done that for the current
16343         // block, it does not need to be reimported.
16344         // Nor do we need to reimport blocks that are still pending, but not yet
16345         // imported.
16346         //
16347         // For predecessors, we have no state to seed the EntryState, so we just have
16348         // to assume the existing one is correct.
16349         // If the block is also a successor, it will get the EntryState properly
16350         // updated when it is visited as a successor in the above "if" block.
16351         assert(predOrSucc == SpillCliquePred);
16352         m_pComp->impReimportBlockPending(blk);
16353     }
16354 }
16355
16356 // Re-type the incoming lclVar nodes to match the varDsc.
16357 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16358 {
16359     if (blk->bbEntryState != nullptr)
16360     {
16361         EntryState* es = blk->bbEntryState;
16362         for (unsigned level = 0; level < es->esStackDepth; level++)
16363         {
16364             GenTreePtr tree = es->esStack[level].val;
16365             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16366             {
16367                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16368                 noway_assert(lclNum < lvaCount);
16369                 LclVarDsc* varDsc              = lvaTable + lclNum;
16370                 es->esStack[level].val->gtType = varDsc->TypeGet();
16371             }
16372         }
16373     }
16374 }
16375
16376 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16377 {
16378     if (block->bbStkTempsOut != NO_BASE_TMP)
16379     {
16380         return block->bbStkTempsOut;
16381     }
16382
16383 #ifdef DEBUG
16384     if (verbose)
16385     {
16386         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16387     }
16388 #endif // DEBUG
16389
16390     // Otherwise, choose one, and propagate to all members of the spill clique.
16391     // Grab enough temps for the whole stack.
16392     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16393     SetSpillTempsBase callback(baseTmp);
16394
16395     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16396     // to one spill clique, and similarly can only be the sucessor to one spill clique
16397     impWalkSpillCliqueFromPred(block, &callback);
16398
16399     return baseTmp;
16400 }
16401
16402 void Compiler::impReimportSpillClique(BasicBlock* block)
16403 {
16404 #ifdef DEBUG
16405     if (verbose)
16406     {
16407         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16408     }
16409 #endif // DEBUG
16410
16411     // If we get here, it is because this block is already part of a spill clique
16412     // and one predecessor had an outgoing live stack slot of type int, and this
16413     // block has an outgoing live stack slot of type native int.
16414     // We need to reset these before traversal because they have already been set
16415     // by the previous walk to determine all the members of the spill clique.
16416     impInlineRoot()->impSpillCliquePredMembers.Reset();
16417     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16418
16419     ReimportSpillClique callback(this);
16420
16421     impWalkSpillCliqueFromPred(block, &callback);
16422 }
16423
16424 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16425 // a copy of "srcState", cloning tree pointers as required.
16426 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16427 {
16428     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16429     {
16430         block->bbEntryState = nullptr;
16431         return;
16432     }
16433
16434     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16435
16436     // block->bbEntryState.esRefcount = 1;
16437
16438     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16439     block->bbEntryState->thisInitialized = TIS_Bottom;
16440
16441     if (srcState->esStackDepth > 0)
16442     {
16443         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16444         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16445
16446         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16447         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16448         {
16449             GenTreePtr tree                         = srcState->esStack[level].val;
16450             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16451         }
16452     }
16453
16454     if (verTrackObjCtorInitState)
16455     {
16456         verSetThisInit(block, srcState->thisInitialized);
16457     }
16458
16459     return;
16460 }
16461
16462 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16463 {
16464     assert(tis != TIS_Bottom); // Precondition.
16465     if (block->bbEntryState == nullptr)
16466     {
16467         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16468     }
16469
16470     block->bbEntryState->thisInitialized = tis;
16471 }
16472
16473 /*
16474  * Resets the current state to the state at the start of the basic block
16475  */
16476 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16477 {
16478
16479     if (block->bbEntryState == nullptr)
16480     {
16481         destState->esStackDepth    = 0;
16482         destState->thisInitialized = TIS_Bottom;
16483         return;
16484     }
16485
16486     destState->esStackDepth = block->bbEntryState->esStackDepth;
16487
16488     if (destState->esStackDepth > 0)
16489     {
16490         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16491
16492         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16493     }
16494
16495     destState->thisInitialized = block->bbThisOnEntry();
16496
16497     return;
16498 }
16499
16500 ThisInitState BasicBlock::bbThisOnEntry()
16501 {
16502     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16503 }
16504
16505 unsigned BasicBlock::bbStackDepthOnEntry()
16506 {
16507     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16508 }
16509
16510 void BasicBlock::bbSetStack(void* stackBuffer)
16511 {
16512     assert(bbEntryState);
16513     assert(stackBuffer);
16514     bbEntryState->esStack = (StackEntry*)stackBuffer;
16515 }
16516
16517 StackEntry* BasicBlock::bbStackOnEntry()
16518 {
16519     assert(bbEntryState);
16520     return bbEntryState->esStack;
16521 }
16522
16523 void Compiler::verInitCurrentState()
16524 {
16525     verTrackObjCtorInitState        = FALSE;
16526     verCurrentState.thisInitialized = TIS_Bottom;
16527
16528     if (tiVerificationNeeded)
16529     {
16530         // Track this ptr initialization
16531         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16532         {
16533             verTrackObjCtorInitState        = TRUE;
16534             verCurrentState.thisInitialized = TIS_Uninit;
16535         }
16536     }
16537
16538     // initialize stack info
16539
16540     verCurrentState.esStackDepth = 0;
16541     assert(verCurrentState.esStack != nullptr);
16542
16543     // copy current state to entry state of first BB
16544     verInitBBEntryState(fgFirstBB, &verCurrentState);
16545 }
16546
16547 Compiler* Compiler::impInlineRoot()
16548 {
16549     if (impInlineInfo == nullptr)
16550     {
16551         return this;
16552     }
16553     else
16554     {
16555         return impInlineInfo->InlineRoot;
16556     }
16557 }
16558
16559 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16560 {
16561     if (predOrSucc == SpillCliquePred)
16562     {
16563         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16564     }
16565     else
16566     {
16567         assert(predOrSucc == SpillCliqueSucc);
16568         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16569     }
16570 }
16571
16572 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16573 {
16574     if (predOrSucc == SpillCliquePred)
16575     {
16576         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16577     }
16578     else
16579     {
16580         assert(predOrSucc == SpillCliqueSucc);
16581         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16582     }
16583 }
16584
16585 /*****************************************************************************
16586  *
16587  *  Convert the instrs ("import") into our internal format (trees). The
16588  *  basic flowgraph has already been constructed and is passed in.
16589  */
16590
16591 void Compiler::impImport(BasicBlock* method)
16592 {
16593 #ifdef DEBUG
16594     if (verbose)
16595     {
16596         printf("*************** In impImport() for %s\n", info.compFullName);
16597     }
16598 #endif
16599
16600     /* Allocate the stack contents */
16601
16602     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16603     {
16604         /* Use local variable, don't waste time allocating on the heap */
16605
16606         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16607         verCurrentState.esStack = impSmallStack;
16608     }
16609     else
16610     {
16611         impStkSize              = info.compMaxStack;
16612         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16613     }
16614
16615     // initialize the entry state at start of method
16616     verInitCurrentState();
16617
16618     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16619     Compiler* inlineRoot = impInlineRoot();
16620     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16621     {
16622         // We have initialized these previously, but to size 0.  Make them larger.
16623         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16624         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16625         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16626     }
16627     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16628     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16629     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16630     impBlockListNodeFreeList = nullptr;
16631
16632 #ifdef DEBUG
16633     impLastILoffsStmt   = nullptr;
16634     impNestedStackSpill = false;
16635 #endif
16636     impBoxTemp = BAD_VAR_NUM;
16637
16638     impPendingList = impPendingFree = nullptr;
16639
16640     /* Add the entry-point to the worker-list */
16641
16642     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16643     // from EH normalization.
16644     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16645     // out.
16646     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16647     {
16648         // Treat these as imported.
16649         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16650         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16651         method->bbFlags |= BBF_IMPORTED;
16652     }
16653
16654     impImportBlockPending(method);
16655
16656     /* Import blocks in the worker-list until there are no more */
16657
16658     while (impPendingList)
16659     {
16660         /* Remove the entry at the front of the list */
16661
16662         PendingDsc* dsc = impPendingList;
16663         impPendingList  = impPendingList->pdNext;
16664         impSetPendingBlockMember(dsc->pdBB, 0);
16665
16666         /* Restore the stack state */
16667
16668         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16669         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16670         if (verCurrentState.esStackDepth)
16671         {
16672             impRestoreStackState(&dsc->pdSavedStack);
16673         }
16674
16675         /* Add the entry to the free list for reuse */
16676
16677         dsc->pdNext    = impPendingFree;
16678         impPendingFree = dsc;
16679
16680         /* Now import the block */
16681
16682         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16683         {
16684
16685 #ifdef _TARGET_64BIT_
16686             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16687             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16688             // method for further explanation on why we raise this exception instead of making the jitted
16689             // code throw the verification exception during execution.
16690             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16691             {
16692                 BADCODE("Basic block marked as not verifiable");
16693             }
16694             else
16695 #endif // _TARGET_64BIT_
16696             {
16697                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16698                 impEndTreeList(dsc->pdBB);
16699             }
16700         }
16701         else
16702         {
16703             impImportBlock(dsc->pdBB);
16704
16705             if (compDonotInline())
16706             {
16707                 return;
16708             }
16709             if (compIsForImportOnly() && !tiVerificationNeeded)
16710             {
16711                 return;
16712             }
16713         }
16714     }
16715
16716 #ifdef DEBUG
16717     if (verbose && info.compXcptnsCount)
16718     {
16719         printf("\nAfter impImport() added block for try,catch,finally");
16720         fgDispBasicBlocks();
16721         printf("\n");
16722     }
16723
16724     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16725     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16726     {
16727         block->bbFlags &= ~BBF_VISITED;
16728     }
16729 #endif
16730
16731     assert(!compIsForInlining() || !tiVerificationNeeded);
16732 }
16733
16734 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16735 // The invariant here is that if it's not a ref or a method and has a class handle
16736 // it's a valuetype
16737 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16738 {
16739     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16740     {
16741         return true;
16742     }
16743     else
16744     {
16745         return false;
16746     }
16747 }
16748
16749 /*****************************************************************************
16750  *  Check to see if the tree is the address of a local or
16751     the address of a field in a local.
16752
16753     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16754
16755  */
16756
16757 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16758 {
16759     if (tree->gtOper != GT_ADDR)
16760     {
16761         return FALSE;
16762     }
16763
16764     GenTreePtr op = tree->gtOp.gtOp1;
16765     while (op->gtOper == GT_FIELD)
16766     {
16767         op = op->gtField.gtFldObj;
16768         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16769         {
16770             op = op->gtOp.gtOp1;
16771         }
16772         else
16773         {
16774             return false;
16775         }
16776     }
16777
16778     if (op->gtOper == GT_LCL_VAR)
16779     {
16780         *lclVarTreeOut = op;
16781         return TRUE;
16782     }
16783     else
16784     {
16785         return FALSE;
16786     }
16787 }
16788
16789 //------------------------------------------------------------------------
16790 // impMakeDiscretionaryInlineObservations: make observations that help
16791 // determine the profitability of a discretionary inline
16792 //
16793 // Arguments:
16794 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16795 //    inlineResult -- InlineResult accumulating information about this inline
16796 //
16797 // Notes:
16798 //    If inlining or prejitting the root, this method also makes
16799 //    various observations about the method that factor into inline
16800 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16801
16802 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16803 {
16804     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16805            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16806            );
16807
16808     // If we're really inlining, we should just have one result in play.
16809     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16810
16811     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16812     // to the trouble of estimating the native code size. Even if it did, it
16813     // shouldn't be relying on the result of this method.
16814     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16815
16816     // Note if the caller contains NEWOBJ or NEWARR.
16817     Compiler* rootCompiler = impInlineRoot();
16818
16819     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16820     {
16821         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16822     }
16823
16824     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16825     {
16826         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16827     }
16828
16829     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16830     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16831
16832     if (isSpecialMethod)
16833     {
16834         if (calleeIsStatic)
16835         {
16836             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16837         }
16838         else
16839         {
16840             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16841         }
16842     }
16843     else if (!calleeIsStatic)
16844     {
16845         // Callee is an instance method.
16846         //
16847         // Check if the callee has the same 'this' as the root.
16848         if (pInlineInfo != nullptr)
16849         {
16850             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16851             assert(thisArg);
16852             bool isSameThis = impIsThis(thisArg);
16853             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16854         }
16855     }
16856
16857     // Note if the callee's class is a promotable struct
16858     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16859     {
16860         lvaStructPromotionInfo structPromotionInfo;
16861         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16862         if (structPromotionInfo.canPromote)
16863         {
16864             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16865         }
16866     }
16867
16868 #ifdef FEATURE_SIMD
16869
16870     // Note if this method is has SIMD args or return value
16871     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16872     {
16873         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16874     }
16875
16876 #endif // FEATURE_SIMD
16877
16878     // Roughly classify callsite frequency.
16879     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16880
16881     // If this is a prejit root, or a maximally hot block...
16882     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16883     {
16884         frequency = InlineCallsiteFrequency::HOT;
16885     }
16886     // No training data.  Look for loop-like things.
16887     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16888     // However, give it to things nearby.
16889     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16890              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16891     {
16892         frequency = InlineCallsiteFrequency::LOOP;
16893     }
16894     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16895     {
16896         frequency = InlineCallsiteFrequency::WARM;
16897     }
16898     // Now modify the multiplier based on where we're called from.
16899     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16900     {
16901         frequency = InlineCallsiteFrequency::RARE;
16902     }
16903     else
16904     {
16905         frequency = InlineCallsiteFrequency::BORING;
16906     }
16907
16908     // Also capture the block weight of the call site.  In the prejit
16909     // root case, assume there's some hot call site for this method.
16910     unsigned weight = 0;
16911
16912     if (pInlineInfo != nullptr)
16913     {
16914         weight = pInlineInfo->iciBlock->bbWeight;
16915     }
16916     else
16917     {
16918         weight = BB_MAX_WEIGHT;
16919     }
16920
16921     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16922     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16923 }
16924
16925 /*****************************************************************************
16926  This method makes STATIC inlining decision based on the IL code.
16927  It should not make any inlining decision based on the context.
16928  If forceInline is true, then the inlining decision should not depend on
16929  performance heuristics (code size, etc.).
16930  */
16931
16932 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16933                               CORINFO_METHOD_INFO*  methInfo,
16934                               bool                  forceInline,
16935                               InlineResult*         inlineResult)
16936 {
16937     unsigned codeSize = methInfo->ILCodeSize;
16938
16939     // We shouldn't have made up our minds yet...
16940     assert(!inlineResult->IsDecided());
16941
16942     if (methInfo->EHcount)
16943     {
16944         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16945         return;
16946     }
16947
16948     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16949     {
16950         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16951         return;
16952     }
16953
16954     // For now we don't inline varargs (import code can't handle it)
16955
16956     if (methInfo->args.isVarArg())
16957     {
16958         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16959         return;
16960     }
16961
16962     // Reject if it has too many locals.
16963     // This is currently an implementation limit due to fixed-size arrays in the
16964     // inline info, rather than a performance heuristic.
16965
16966     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16967
16968     if (methInfo->locals.numArgs > MAX_INL_LCLS)
16969     {
16970         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16971         return;
16972     }
16973
16974     // Make sure there aren't too many arguments.
16975     // This is currently an implementation limit due to fixed-size arrays in the
16976     // inline info, rather than a performance heuristic.
16977
16978     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16979
16980     if (methInfo->args.numArgs > MAX_INL_ARGS)
16981     {
16982         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16983         return;
16984     }
16985
16986     // Note force inline state
16987
16988     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16989
16990     // Note IL code size
16991
16992     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16993
16994     if (inlineResult->IsFailure())
16995     {
16996         return;
16997     }
16998
16999     // Make sure maxstack is not too big
17000
17001     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17002
17003     if (inlineResult->IsFailure())
17004     {
17005         return;
17006     }
17007 }
17008
17009 /*****************************************************************************
17010  */
17011
17012 void Compiler::impCheckCanInline(GenTreePtr             call,
17013                                  CORINFO_METHOD_HANDLE  fncHandle,
17014                                  unsigned               methAttr,
17015                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17016                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17017                                  InlineResult*          inlineResult)
17018 {
17019     // Either EE or JIT might throw exceptions below.
17020     // If that happens, just don't inline the method.
17021
17022     struct Param
17023     {
17024         Compiler*              pThis;
17025         GenTreePtr             call;
17026         CORINFO_METHOD_HANDLE  fncHandle;
17027         unsigned               methAttr;
17028         CORINFO_CONTEXT_HANDLE exactContextHnd;
17029         InlineResult*          result;
17030         InlineCandidateInfo**  ppInlineCandidateInfo;
17031     } param = {nullptr};
17032
17033     param.pThis                 = this;
17034     param.call                  = call;
17035     param.fncHandle             = fncHandle;
17036     param.methAttr              = methAttr;
17037     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17038     param.result                = inlineResult;
17039     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17040
17041     bool success = eeRunWithErrorTrap<Param>(
17042         [](Param* pParam) {
17043             DWORD                  dwRestrictions = 0;
17044             CorInfoInitClassResult initClassResult;
17045
17046 #ifdef DEBUG
17047             const char* methodName;
17048             const char* className;
17049             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17050
17051             if (JitConfig.JitNoInline())
17052             {
17053                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17054                 goto _exit;
17055             }
17056 #endif
17057
17058             /* Try to get the code address/size for the method */
17059
17060             CORINFO_METHOD_INFO methInfo;
17061             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17062             {
17063                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17064                 goto _exit;
17065             }
17066
17067             bool forceInline;
17068             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17069
17070             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17071
17072             if (pParam->result->IsFailure())
17073             {
17074                 assert(pParam->result->IsNever());
17075                 goto _exit;
17076             }
17077
17078             // Speculatively check if initClass() can be done.
17079             // If it can be done, we will try to inline the method. If inlining
17080             // succeeds, then we will do the non-speculative initClass() and commit it.
17081             // If this speculative call to initClass() fails, there is no point
17082             // trying to inline this method.
17083             initClassResult =
17084                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17085                                                            pParam->exactContextHnd /* context */,
17086                                                            TRUE /* speculative */);
17087
17088             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17089             {
17090                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17091                 goto _exit;
17092             }
17093
17094             // Given the EE the final say in whether to inline or not.
17095             // This should be last since for verifiable code, this can be expensive
17096
17097             /* VM Inline check also ensures that the method is verifiable if needed */
17098             CorInfoInline vmResult;
17099             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17100                                                                   &dwRestrictions);
17101
17102             if (vmResult == INLINE_FAIL)
17103             {
17104                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17105             }
17106             else if (vmResult == INLINE_NEVER)
17107             {
17108                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17109             }
17110
17111             if (pParam->result->IsFailure())
17112             {
17113                 // Make sure not to report this one.  It was already reported by the VM.
17114                 pParam->result->SetReported();
17115                 goto _exit;
17116             }
17117
17118             // check for unsupported inlining restrictions
17119             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17120
17121             if (dwRestrictions & INLINE_SAME_THIS)
17122             {
17123                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17124                 assert(thisArg);
17125
17126                 if (!pParam->pThis->impIsThis(thisArg))
17127                 {
17128                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17129                     goto _exit;
17130                 }
17131             }
17132
17133             /* Get the method properties */
17134
17135             CORINFO_CLASS_HANDLE clsHandle;
17136             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17137             unsigned clsAttr;
17138             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17139
17140             /* Get the return type */
17141
17142             var_types fncRetType;
17143             fncRetType = pParam->call->TypeGet();
17144
17145 #ifdef DEBUG
17146             var_types fncRealRetType;
17147             fncRealRetType = JITtype2varType(methInfo.args.retType);
17148
17149             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17150                    // <BUGNUM> VSW 288602 </BUGNUM>
17151                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17152                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17153                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17154 #endif
17155
17156             //
17157             // Allocate an InlineCandidateInfo structure
17158             //
17159             InlineCandidateInfo* pInfo;
17160             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17161
17162             pInfo->dwRestrictions  = dwRestrictions;
17163             pInfo->methInfo        = methInfo;
17164             pInfo->methAttr        = pParam->methAttr;
17165             pInfo->clsHandle       = clsHandle;
17166             pInfo->clsAttr         = clsAttr;
17167             pInfo->fncRetType      = fncRetType;
17168             pInfo->exactContextHnd = pParam->exactContextHnd;
17169             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17170             pInfo->initClassResult = initClassResult;
17171
17172             *(pParam->ppInlineCandidateInfo) = pInfo;
17173
17174         _exit:;
17175         },
17176         &param);
17177     if (!success)
17178     {
17179         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17180     }
17181 }
17182
17183 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17184                                       GenTreePtr    curArgVal,
17185                                       unsigned      argNum,
17186                                       InlineResult* inlineResult)
17187 {
17188     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17189
17190     if (curArgVal->gtOper == GT_MKREFANY)
17191     {
17192         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17193         return;
17194     }
17195
17196     inlCurArgInfo->argNode = curArgVal;
17197
17198     GenTreePtr lclVarTree;
17199     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17200     {
17201         inlCurArgInfo->argIsByRefToStructLocal = true;
17202 #ifdef FEATURE_SIMD
17203         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17204         {
17205             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17206         }
17207 #endif // FEATURE_SIMD
17208     }
17209
17210     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17211     {
17212         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17213         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17214     }
17215
17216     if (curArgVal->gtOper == GT_LCL_VAR)
17217     {
17218         inlCurArgInfo->argIsLclVar = true;
17219
17220         /* Remember the "original" argument number */
17221         curArgVal->gtLclVar.gtLclILoffs = argNum;
17222     }
17223
17224     if ((curArgVal->OperKind() & GTK_CONST) ||
17225         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17226     {
17227         inlCurArgInfo->argIsInvariant = true;
17228         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17229         {
17230             /* Abort, but do not mark as not inlinable */
17231             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17232             return;
17233         }
17234     }
17235
17236     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17237     {
17238         inlCurArgInfo->argHasLdargaOp = true;
17239     }
17240
17241 #ifdef DEBUG
17242     if (verbose)
17243     {
17244         if (inlCurArgInfo->argIsThis)
17245         {
17246             printf("thisArg:");
17247         }
17248         else
17249         {
17250             printf("\nArgument #%u:", argNum);
17251         }
17252         if (inlCurArgInfo->argIsLclVar)
17253         {
17254             printf(" is a local var");
17255         }
17256         if (inlCurArgInfo->argIsInvariant)
17257         {
17258             printf(" is a constant");
17259         }
17260         if (inlCurArgInfo->argHasGlobRef)
17261         {
17262             printf(" has global refs");
17263         }
17264         if (inlCurArgInfo->argHasSideEff)
17265         {
17266             printf(" has side effects");
17267         }
17268         if (inlCurArgInfo->argHasLdargaOp)
17269         {
17270             printf(" has ldarga effect");
17271         }
17272         if (inlCurArgInfo->argHasStargOp)
17273         {
17274             printf(" has starg effect");
17275         }
17276         if (inlCurArgInfo->argIsByRefToStructLocal)
17277         {
17278             printf(" is byref to a struct local");
17279         }
17280
17281         printf("\n");
17282         gtDispTree(curArgVal);
17283         printf("\n");
17284     }
17285 #endif
17286 }
17287
17288 /*****************************************************************************
17289  *
17290  */
17291
17292 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17293 {
17294     assert(!compIsForInlining());
17295
17296     GenTreePtr           call         = pInlineInfo->iciCall;
17297     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17298     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17299     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17300     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17301     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17302
17303     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17304
17305     /* init the argument stuct */
17306
17307     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17308
17309     /* Get hold of the 'this' pointer and the argument list proper */
17310
17311     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17312     GenTreePtr argList = call->gtCall.gtCallArgs;
17313     unsigned   argCnt  = 0; // Count of the arguments
17314
17315     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17316
17317     if (thisArg)
17318     {
17319         inlArgInfo[0].argIsThis = true;
17320
17321         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17322
17323         if (inlineResult->IsFailure())
17324         {
17325             return;
17326         }
17327
17328         /* Increment the argument count */
17329         argCnt++;
17330     }
17331
17332     /* Record some information about each of the arguments */
17333     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17334
17335 #if USER_ARGS_COME_LAST
17336     unsigned typeCtxtArg = thisArg ? 1 : 0;
17337 #else  // USER_ARGS_COME_LAST
17338     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17339 #endif // USER_ARGS_COME_LAST
17340
17341     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17342     {
17343         if (argTmp == argList && hasRetBuffArg)
17344         {
17345             continue;
17346         }
17347
17348         // Ignore the type context argument
17349         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17350         {
17351             typeCtxtArg = 0xFFFFFFFF;
17352             continue;
17353         }
17354
17355         assert(argTmp->gtOper == GT_LIST);
17356         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17357
17358         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17359
17360         if (inlineResult->IsFailure())
17361         {
17362             return;
17363         }
17364
17365         /* Increment the argument count */
17366         argCnt++;
17367     }
17368
17369     /* Make sure we got the arg number right */
17370     assert(argCnt == methInfo->args.totalILArgs());
17371
17372 #ifdef FEATURE_SIMD
17373     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17374 #endif // FEATURE_SIMD
17375
17376     /* We have typeless opcodes, get type information from the signature */
17377
17378     if (thisArg)
17379     {
17380         var_types sigType;
17381
17382         if (clsAttr & CORINFO_FLG_VALUECLASS)
17383         {
17384             sigType = TYP_BYREF;
17385         }
17386         else
17387         {
17388             sigType = TYP_REF;
17389         }
17390
17391         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17392         lclVarInfo[0].lclHasLdlocaOp = false;
17393
17394 #ifdef FEATURE_SIMD
17395         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17396         // the inlining multiplier) for anything in that assembly.
17397         // But we only need to normalize it if it is a TYP_STRUCT
17398         // (which we need to do even if we have already set foundSIMDType).
17399         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17400         {
17401             if (sigType == TYP_STRUCT)
17402             {
17403                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17404             }
17405             foundSIMDType = true;
17406         }
17407 #endif // FEATURE_SIMD
17408         lclVarInfo[0].lclTypeInfo = sigType;
17409
17410         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17411                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17412                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17413
17414         if (genActualType(thisArg->gtType) != genActualType(sigType))
17415         {
17416             if (sigType == TYP_REF)
17417             {
17418                 /* The argument cannot be bashed into a ref (see bug 750871) */
17419                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17420                 return;
17421             }
17422
17423             /* This can only happen with byrefs <-> ints/shorts */
17424
17425             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17426             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17427
17428             if (sigType == TYP_BYREF)
17429             {
17430                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17431             }
17432             else if (thisArg->gtType == TYP_BYREF)
17433             {
17434                 assert(sigType == TYP_I_IMPL);
17435
17436                 /* If possible change the BYREF to an int */
17437                 if (thisArg->IsVarAddr())
17438                 {
17439                     thisArg->gtType              = TYP_I_IMPL;
17440                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17441                 }
17442                 else
17443                 {
17444                     /* Arguments 'int <- byref' cannot be bashed */
17445                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17446                     return;
17447                 }
17448             }
17449         }
17450     }
17451
17452     /* Init the types of the arguments and make sure the types
17453      * from the trees match the types in the signature */
17454
17455     CORINFO_ARG_LIST_HANDLE argLst;
17456     argLst = methInfo->args.args;
17457
17458     unsigned i;
17459     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17460     {
17461         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17462
17463         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17464
17465 #ifdef FEATURE_SIMD
17466         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17467         {
17468             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17469             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17470             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17471             foundSIMDType = true;
17472             if (sigType == TYP_STRUCT)
17473             {
17474                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17475                 sigType              = structType;
17476             }
17477         }
17478 #endif // FEATURE_SIMD
17479
17480         lclVarInfo[i].lclTypeInfo    = sigType;
17481         lclVarInfo[i].lclHasLdlocaOp = false;
17482
17483         /* Does the tree type match the signature type? */
17484
17485         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17486
17487         if (sigType != inlArgNode->gtType)
17488         {
17489             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17490                but in bad IL cases with caller-callee signature mismatches we can see other types.
17491                Intentionally reject cases with mismatches so the jit is more flexible when
17492                encountering bad IL. */
17493
17494             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17495                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17496                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17497
17498             if (!isPlausibleTypeMatch)
17499             {
17500                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17501                 return;
17502             }
17503
17504             /* Is it a narrowing or widening cast?
17505              * Widening casts are ok since the value computed is already
17506              * normalized to an int (on the IL stack) */
17507
17508             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17509             {
17510                 if (sigType == TYP_BYREF)
17511                 {
17512                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17513                 }
17514                 else if (inlArgNode->gtType == TYP_BYREF)
17515                 {
17516                     assert(varTypeIsIntOrI(sigType));
17517
17518                     /* If possible bash the BYREF to an int */
17519                     if (inlArgNode->IsVarAddr())
17520                     {
17521                         inlArgNode->gtType           = TYP_I_IMPL;
17522                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17523                     }
17524                     else
17525                     {
17526                         /* Arguments 'int <- byref' cannot be changed */
17527                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17528                         return;
17529                     }
17530                 }
17531                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17532                 {
17533                     /* Narrowing cast */
17534
17535                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17536                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17537                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17538                     {
17539                         /* We don't need to insert a cast here as the variable
17540                            was assigned a normalized value of the right type */
17541
17542                         continue;
17543                     }
17544
17545                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17546
17547                     inlArgInfo[i].argIsLclVar = false;
17548
17549                     /* Try to fold the node in case we have constant arguments */
17550
17551                     if (inlArgInfo[i].argIsInvariant)
17552                     {
17553                         inlArgNode            = gtFoldExprConst(inlArgNode);
17554                         inlArgInfo[i].argNode = inlArgNode;
17555                         assert(inlArgNode->OperIsConst());
17556                     }
17557                 }
17558 #ifdef _TARGET_64BIT_
17559                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17560                 {
17561                     // This should only happen for int -> native int widening
17562                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17563
17564                     inlArgInfo[i].argIsLclVar = false;
17565
17566                     /* Try to fold the node in case we have constant arguments */
17567
17568                     if (inlArgInfo[i].argIsInvariant)
17569                     {
17570                         inlArgNode            = gtFoldExprConst(inlArgNode);
17571                         inlArgInfo[i].argNode = inlArgNode;
17572                         assert(inlArgNode->OperIsConst());
17573                     }
17574                 }
17575 #endif // _TARGET_64BIT_
17576             }
17577         }
17578     }
17579
17580     /* Init the types of the local variables */
17581
17582     CORINFO_ARG_LIST_HANDLE localsSig;
17583     localsSig = methInfo->locals.args;
17584
17585     for (i = 0; i < methInfo->locals.numArgs; i++)
17586     {
17587         bool      isPinned;
17588         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17589
17590         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17591         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17592         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17593
17594         if (isPinned)
17595         {
17596             // Pinned locals may cause inlines to fail.
17597             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17598             if (inlineResult->IsFailure())
17599             {
17600                 return;
17601             }
17602         }
17603
17604         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17605
17606         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17607         // out on the inline.
17608         if (type == TYP_STRUCT)
17609         {
17610             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17611             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17612             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17613             {
17614                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17615                 if (inlineResult->IsFailure())
17616                 {
17617                     return;
17618                 }
17619
17620                 // Do further notification in the case where the call site is rare; some policies do
17621                 // not track the relative hotness of call sites for "always" inline cases.
17622                 if (pInlineInfo->iciBlock->isRunRarely())
17623                 {
17624                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17625                     if (inlineResult->IsFailure())
17626                     {
17627
17628                         return;
17629                     }
17630                 }
17631             }
17632         }
17633
17634         localsSig = info.compCompHnd->getArgNext(localsSig);
17635
17636 #ifdef FEATURE_SIMD
17637         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17638         {
17639             foundSIMDType = true;
17640             if (featureSIMD && type == TYP_STRUCT)
17641             {
17642                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17643                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17644             }
17645         }
17646 #endif // FEATURE_SIMD
17647     }
17648
17649 #ifdef FEATURE_SIMD
17650     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17651     {
17652         foundSIMDType = true;
17653     }
17654     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17655 #endif // FEATURE_SIMD
17656 }
17657
17658 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17659 {
17660     assert(compIsForInlining());
17661
17662     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17663
17664     if (tmpNum == BAD_VAR_NUM)
17665     {
17666         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17667
17668         // The lifetime of this local might span multiple BBs.
17669         // So it is a long lifetime local.
17670         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17671
17672         lvaTable[tmpNum].lvType = lclTyp;
17673         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17674         {
17675             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17676         }
17677
17678         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17679         {
17680             lvaTable[tmpNum].lvPinned = 1;
17681
17682             if (!impInlineInfo->hasPinnedLocals)
17683             {
17684                 // If the inlinee returns a value, use a spill temp
17685                 // for the return value to ensure that even in case
17686                 // where the return expression refers to one of the
17687                 // pinned locals, we can unpin the local right after
17688                 // the inlined method body.
17689                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17690                 {
17691                     lvaInlineeReturnSpillTemp =
17692                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17693                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17694                 }
17695             }
17696
17697             impInlineInfo->hasPinnedLocals = true;
17698         }
17699
17700         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17701         {
17702             if (varTypeIsStruct(lclTyp))
17703             {
17704                 lvaSetStruct(tmpNum,
17705                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17706                              true /* unsafe value cls check */);
17707             }
17708             else
17709             {
17710                 // This is a wrapped primitive.  Make sure the verstate knows that
17711                 lvaTable[tmpNum].lvVerTypeInfo =
17712                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17713             }
17714         }
17715     }
17716
17717     return tmpNum;
17718 }
17719
17720 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17721 // Only use this method for the arguments of the inlinee method.
17722 // !!! Do not use it for the locals of the inlinee method. !!!!
17723
17724 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17725 {
17726     /* Get the argument type */
17727     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17728
17729     GenTreePtr op1 = nullptr;
17730
17731     // constant or address of local
17732     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17733     {
17734         /* Clone the constant. Note that we cannot directly use argNode
17735         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17736         would introduce aliasing between inlArgInfo[].argNode and
17737         impInlineExpr. Then gtFoldExpr() could change it, causing further
17738         references to the argument working off of the bashed copy. */
17739
17740         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17741         PREFIX_ASSUME(op1 != nullptr);
17742         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17743     }
17744     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17745     {
17746         /* Argument is a local variable (of the caller)
17747          * Can we re-use the passed argument node? */
17748
17749         op1                          = inlArgInfo[lclNum].argNode;
17750         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17751
17752         if (inlArgInfo[lclNum].argIsUsed)
17753         {
17754             assert(op1->gtOper == GT_LCL_VAR);
17755             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17756
17757             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17758             {
17759                 lclTyp = genActualType(lclTyp);
17760             }
17761
17762             /* Create a new lcl var node - remember the argument lclNum */
17763             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17764         }
17765     }
17766     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17767     {
17768         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17769            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17770            This way we will increase the chance for this byref to be optimized away by
17771            a subsequent "dereference" operation.
17772
17773            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17774            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17775            For example, if the caller is:
17776                 ldloca.s   V_1  // V_1 is a local struct
17777                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17778            and the callee being inlined has:
17779                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17780                     ldarga.s   ptrToInts
17781                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17782            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17783            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17784         */
17785         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17786                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17787         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17788     }
17789     else
17790     {
17791         /* Argument is a complex expression - it must be evaluated into a temp */
17792
17793         if (inlArgInfo[lclNum].argHasTmp)
17794         {
17795             assert(inlArgInfo[lclNum].argIsUsed);
17796             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17797
17798             /* Create a new lcl var node - remember the argument lclNum */
17799             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17800
17801             /* This is the second or later use of the this argument,
17802             so we have to use the temp (instead of the actual arg) */
17803             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17804         }
17805         else
17806         {
17807             /* First time use */
17808             assert(inlArgInfo[lclNum].argIsUsed == false);
17809
17810             /* Reserve a temp for the expression.
17811             * Use a large size node as we may change it later */
17812
17813             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17814
17815             lvaTable[tmpNum].lvType = lclTyp;
17816             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17817             if (inlArgInfo[lclNum].argHasLdargaOp)
17818             {
17819                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17820             }
17821
17822             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17823             {
17824                 if (varTypeIsStruct(lclTyp))
17825                 {
17826                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17827                                  true /* unsafe value cls check */);
17828                 }
17829                 else
17830                 {
17831                     // This is a wrapped primitive.  Make sure the verstate knows that
17832                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17833                 }
17834             }
17835
17836             inlArgInfo[lclNum].argHasTmp = true;
17837             inlArgInfo[lclNum].argTmpNum = tmpNum;
17838
17839             // If we require strict exception order, then arguments must
17840             // be evaluated in sequence before the body of the inlined method.
17841             // So we need to evaluate them to a temp.
17842             // Also, if arguments have global references, we need to
17843             // evaluate them to a temp before the inlined body as the
17844             // inlined body may be modifying the global ref.
17845             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17846             // if it is a struct, because it requires some additional handling.
17847
17848             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17849             {
17850                 /* Get a *LARGE* LCL_VAR node */
17851                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17852
17853                 /* Record op1 as the very first use of this argument.
17854                 If there are no further uses of the arg, we may be
17855                 able to use the actual arg node instead of the temp.
17856                 If we do see any further uses, we will clear this. */
17857                 inlArgInfo[lclNum].argBashTmpNode = op1;
17858             }
17859             else
17860             {
17861                 /* Get a small LCL_VAR node */
17862                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17863                 /* No bashing of this argument */
17864                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17865             }
17866         }
17867     }
17868
17869     /* Mark the argument as used */
17870
17871     inlArgInfo[lclNum].argIsUsed = true;
17872
17873     return op1;
17874 }
17875
17876 /******************************************************************************
17877  Is this the original "this" argument to the call being inlined?
17878
17879  Note that we do not inline methods with "starg 0", and so we do not need to
17880  worry about it.
17881 */
17882
17883 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17884 {
17885     assert(compIsForInlining());
17886     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17887 }
17888
17889 //-----------------------------------------------------------------------------
17890 // This function checks if a dereference in the inlinee can guarantee that
17891 // the "this" is non-NULL.
17892 // If we haven't hit a branch or a side effect, and we are dereferencing
17893 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17894 // then we can avoid a separate null pointer check.
17895 //
17896 // "additionalTreesToBeEvaluatedBefore"
17897 // is the set of pending trees that have not yet been added to the statement list,
17898 // and which have been removed from verCurrentState.esStack[]
17899
17900 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17901                                                                   GenTreePtr  variableBeingDereferenced,
17902                                                                   InlArgInfo* inlArgInfo)
17903 {
17904     assert(compIsForInlining());
17905     assert(opts.OptEnabled(CLFLG_INLINING));
17906
17907     BasicBlock* block = compCurBB;
17908
17909     GenTreePtr stmt;
17910     GenTreePtr expr;
17911
17912     if (block != fgFirstBB)
17913     {
17914         return FALSE;
17915     }
17916
17917     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17918     {
17919         return FALSE;
17920     }
17921
17922     if (additionalTreesToBeEvaluatedBefore &&
17923         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17924     {
17925         return FALSE;
17926     }
17927
17928     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17929     {
17930         expr = stmt->gtStmt.gtStmtExpr;
17931
17932         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17933         {
17934             return FALSE;
17935         }
17936     }
17937
17938     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17939     {
17940         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17941         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17942         {
17943             return FALSE;
17944         }
17945     }
17946
17947     return TRUE;
17948 }
17949
17950 /******************************************************************************/
17951 // Check the inlining eligibility of this GT_CALL node.
17952 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17953
17954 // Todo: find a way to record the failure reasons in the IR (or
17955 // otherwise build tree context) so when we do the inlining pass we
17956 // can capture these reasons
17957
17958 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17959                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17960                                       CORINFO_CALL_INFO*     callInfo)
17961 {
17962     // Let the strategy know there's another call
17963     impInlineRoot()->m_inlineStrategy->NoteCall();
17964
17965     if (!opts.OptEnabled(CLFLG_INLINING))
17966     {
17967         /* XXX Mon 8/18/2008
17968          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
17969          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
17970          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
17971          * figure out why we did not set MAXOPT for this compile.
17972          */
17973         assert(!compIsForInlining());
17974         return;
17975     }
17976
17977     if (compIsForImportOnly())
17978     {
17979         // Don't bother creating the inline candidate during verification.
17980         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17981         // that leads to the creation of multiple instances of Compiler.
17982         return;
17983     }
17984
17985     GenTreeCall* call = callNode->AsCall();
17986     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17987
17988     // Don't inline if not optimizing root method
17989     if (opts.compDbgCode)
17990     {
17991         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17992         return;
17993     }
17994
17995     // Don't inline if inlining into root method is disabled.
17996     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17997     {
17998         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17999         return;
18000     }
18001
18002     // Inlining candidate determination needs to honor only IL tail prefix.
18003     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18004     if (call->IsTailPrefixedCall())
18005     {
18006         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18007         return;
18008     }
18009
18010     // Tail recursion elimination takes precedence over inlining.
18011     // TODO: We may want to do some of the additional checks from fgMorphCall
18012     // here to reduce the chance we don't inline a call that won't be optimized
18013     // as a fast tail call or turned into a loop.
18014     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18015     {
18016         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18017         return;
18018     }
18019
18020     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18021     {
18022         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18023         return;
18024     }
18025
18026     /* Ignore helper calls */
18027
18028     if (call->gtCallType == CT_HELPER)
18029     {
18030         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18031         return;
18032     }
18033
18034     /* Ignore indirect calls */
18035     if (call->gtCallType == CT_INDIRECT)
18036     {
18037         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18038         return;
18039     }
18040
18041     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18042      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18043      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18044
18045     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18046     unsigned              methAttr;
18047
18048     // Reuse method flags from the original callInfo if possible
18049     if (fncHandle == callInfo->hMethod)
18050     {
18051         methAttr = callInfo->methodFlags;
18052     }
18053     else
18054     {
18055         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18056     }
18057
18058 #ifdef DEBUG
18059     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18060     {
18061         methAttr |= CORINFO_FLG_FORCEINLINE;
18062     }
18063 #endif
18064
18065     // Check for COMPlus_AggressiveInlining
18066     if (compDoAggressiveInlining)
18067     {
18068         methAttr |= CORINFO_FLG_FORCEINLINE;
18069     }
18070
18071     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18072     {
18073         /* Don't bother inline blocks that are in the filter region */
18074         if (bbInCatchHandlerILRange(compCurBB))
18075         {
18076 #ifdef DEBUG
18077             if (verbose)
18078             {
18079                 printf("\nWill not inline blocks that are in the catch handler region\n");
18080             }
18081
18082 #endif
18083
18084             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18085             return;
18086         }
18087
18088         if (bbInFilterILRange(compCurBB))
18089         {
18090 #ifdef DEBUG
18091             if (verbose)
18092             {
18093                 printf("\nWill not inline blocks that are in the filter region\n");
18094             }
18095 #endif
18096
18097             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18098             return;
18099         }
18100     }
18101
18102     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18103
18104     if (opts.compNeedSecurityCheck)
18105     {
18106         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18107         return;
18108     }
18109
18110     /* Check if we tried to inline this method before */
18111
18112     if (methAttr & CORINFO_FLG_DONT_INLINE)
18113     {
18114         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18115         return;
18116     }
18117
18118     /* Cannot inline synchronized methods */
18119
18120     if (methAttr & CORINFO_FLG_SYNCH)
18121     {
18122         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18123         return;
18124     }
18125
18126     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18127
18128     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18129     {
18130         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18131         return;
18132     }
18133
18134     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18135     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18136
18137     if (inlineResult.IsFailure())
18138     {
18139         return;
18140     }
18141
18142     // The old value should be NULL
18143     assert(call->gtInlineCandidateInfo == nullptr);
18144
18145     call->gtInlineCandidateInfo = inlineCandidateInfo;
18146
18147     // Mark the call node as inline candidate.
18148     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18149
18150     // Let the strategy know there's another candidate.
18151     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18152
18153     // Since we're not actually inlining yet, and this call site is
18154     // still just an inline candidate, there's nothing to report.
18155     inlineResult.SetReported();
18156 }
18157
18158 /******************************************************************************/
18159 // Returns true if the given intrinsic will be implemented by target-specific
18160 // instructions
18161
18162 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18163 {
18164 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18165     switch (intrinsicId)
18166     {
18167         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18168         //
18169         // TODO: Because the x86 backend only targets SSE for floating-point code,
18170         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18171         //       implemented those intrinsics as x87 instructions). If this poses
18172         //       a CQ problem, it may be necessary to change the implementation of
18173         //       the helper calls to decrease call overhead or switch back to the
18174         //       x87 instructions. This is tracked by #7097.
18175         case CORINFO_INTRINSIC_Sqrt:
18176         case CORINFO_INTRINSIC_Abs:
18177             return true;
18178
18179         default:
18180             return false;
18181     }
18182 #elif defined(_TARGET_ARM64_)
18183     switch (intrinsicId)
18184     {
18185         case CORINFO_INTRINSIC_Sqrt:
18186         case CORINFO_INTRINSIC_Abs:
18187         case CORINFO_INTRINSIC_Round:
18188             return true;
18189
18190         default:
18191             return false;
18192     }
18193 #elif defined(_TARGET_ARM_)
18194     switch (intrinsicId)
18195     {
18196         case CORINFO_INTRINSIC_Sqrt:
18197         case CORINFO_INTRINSIC_Abs:
18198         case CORINFO_INTRINSIC_Round:
18199             return true;
18200
18201         default:
18202             return false;
18203     }
18204 #elif defined(_TARGET_X86_)
18205     switch (intrinsicId)
18206     {
18207         case CORINFO_INTRINSIC_Sin:
18208         case CORINFO_INTRINSIC_Cos:
18209         case CORINFO_INTRINSIC_Sqrt:
18210         case CORINFO_INTRINSIC_Abs:
18211         case CORINFO_INTRINSIC_Round:
18212             return true;
18213
18214         default:
18215             return false;
18216     }
18217 #else
18218     // TODO: This portion of logic is not implemented for other arch.
18219     // The reason for returning true is that on all other arch the only intrinsic
18220     // enabled are target intrinsics.
18221     return true;
18222 #endif //_TARGET_AMD64_
18223 }
18224
18225 /******************************************************************************/
18226 // Returns true if the given intrinsic will be implemented by calling System.Math
18227 // methods.
18228
18229 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18230 {
18231     // Currently, if an math intrisic is not implemented by target-specific
18232     // intructions, it will be implemented by a System.Math call. In the
18233     // future, if we turn to implementing some of them with helper callers,
18234     // this predicate needs to be revisited.
18235     return !IsTargetIntrinsic(intrinsicId);
18236 }
18237
18238 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18239 {
18240     switch (intrinsicId)
18241     {
18242         case CORINFO_INTRINSIC_Sin:
18243         case CORINFO_INTRINSIC_Sqrt:
18244         case CORINFO_INTRINSIC_Abs:
18245         case CORINFO_INTRINSIC_Cos:
18246         case CORINFO_INTRINSIC_Round:
18247         case CORINFO_INTRINSIC_Cosh:
18248         case CORINFO_INTRINSIC_Sinh:
18249         case CORINFO_INTRINSIC_Tan:
18250         case CORINFO_INTRINSIC_Tanh:
18251         case CORINFO_INTRINSIC_Asin:
18252         case CORINFO_INTRINSIC_Acos:
18253         case CORINFO_INTRINSIC_Atan:
18254         case CORINFO_INTRINSIC_Atan2:
18255         case CORINFO_INTRINSIC_Log10:
18256         case CORINFO_INTRINSIC_Pow:
18257         case CORINFO_INTRINSIC_Exp:
18258         case CORINFO_INTRINSIC_Ceiling:
18259         case CORINFO_INTRINSIC_Floor:
18260             return true;
18261         default:
18262             return false;
18263     }
18264 }
18265
18266 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18267 {
18268     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18269 }
18270 /*****************************************************************************/