Merge pull request #8879 from russellhadley/span-intrinsics
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492     // On coreclr the check for GC includes a "may" to account for the special
1493     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1495     // pointer.
1496     const bool mayContainGCPtrs =
1497         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1498
1499 #ifdef FEATURE_SIMD
1500     // Check to see if this is a SIMD type.
1501     if (featureSIMD && !mayContainGCPtrs)
1502     {
1503         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1504
1505         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1506         {
1507             unsigned int sizeBytes;
1508             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509             if (simdBaseType != TYP_UNKNOWN)
1510             {
1511                 assert(sizeBytes == originalSize);
1512                 structType = getSIMDTypeForSize(sizeBytes);
1513                 if (pSimdBaseType != nullptr)
1514                 {
1515                     *pSimdBaseType = simdBaseType;
1516                 }
1517 #ifdef _TARGET_AMD64_
1518                 // Amd64: also indicate that we use floating point registers
1519                 compFloatingPointUsed = true;
1520 #endif
1521             }
1522         }
1523     }
1524 #endif // FEATURE_SIMD
1525
1526     // Fetch GC layout info if requested
1527     if (gcLayout != nullptr)
1528     {
1529         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1530
1531         // Verify that the quick test up above via the class attributes gave a
1532         // safe view of the type's GCness.
1533         //
1534         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1535         // does not report any gc fields.
1536
1537         assert(mayContainGCPtrs || (numGCVars == 0));
1538
1539         if (pNumGCVars != nullptr)
1540         {
1541             *pNumGCVars = numGCVars;
1542         }
1543     }
1544     else
1545     {
1546         // Can't safely ask for number of GC pointers without also
1547         // asking for layout.
1548         assert(pNumGCVars == nullptr);
1549     }
1550
1551     return structType;
1552 }
1553
1554 //****************************************************************************
1555 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1557 //
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1559                                       CORINFO_CLASS_HANDLE structHnd,
1560                                       unsigned             curLevel,
1561                                       bool                 forceNormalization /*=false*/)
1562 {
1563     assert(forceNormalization || varTypeIsStruct(structVal));
1564     assert(structHnd != NO_CLASS_HANDLE);
1565     var_types structType = structVal->TypeGet();
1566     bool      makeTemp   = false;
1567     if (structType == TYP_STRUCT)
1568     {
1569         structType = impNormStructType(structHnd);
1570     }
1571     bool                 alreadyNormalized = false;
1572     GenTreeLclVarCommon* structLcl         = nullptr;
1573
1574     genTreeOps oper = structVal->OperGet();
1575     switch (oper)
1576     {
1577         // GT_RETURN and GT_MKREFANY don't capture the handle.
1578         case GT_RETURN:
1579             break;
1580         case GT_MKREFANY:
1581             alreadyNormalized = true;
1582             break;
1583
1584         case GT_CALL:
1585             structVal->gtCall.gtRetClsHnd = structHnd;
1586             makeTemp                      = true;
1587             break;
1588
1589         case GT_RET_EXPR:
1590             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1591             makeTemp                         = true;
1592             break;
1593
1594         case GT_ARGPLACE:
1595             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1596             break;
1597
1598         case GT_INDEX:
1599             // This will be transformed to an OBJ later.
1600             alreadyNormalized                    = true;
1601             structVal->gtIndex.gtStructElemClass = structHnd;
1602             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1603             break;
1604
1605         case GT_FIELD:
1606             // Wrap it in a GT_OBJ.
1607             structVal->gtType = structType;
1608             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1609             break;
1610
1611         case GT_LCL_VAR:
1612         case GT_LCL_FLD:
1613             structLcl = structVal->AsLclVarCommon();
1614             // Wrap it in a GT_OBJ.
1615             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1616             __fallthrough;
1617
1618         case GT_OBJ:
1619         case GT_BLK:
1620         case GT_DYN_BLK:
1621         case GT_ASG:
1622             // These should already have the appropriate type.
1623             assert(structVal->gtType == structType);
1624             alreadyNormalized = true;
1625             break;
1626
1627         case GT_IND:
1628             assert(structVal->gtType == structType);
1629             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630             alreadyNormalized = true;
1631             break;
1632
1633 #ifdef FEATURE_SIMD
1634         case GT_SIMD:
1635             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1636             break;
1637 #endif // FEATURE_SIMD
1638
1639         case GT_COMMA:
1640         {
1641             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642             GenTree* blockNode = structVal->gtOp.gtOp2;
1643             assert(blockNode->gtType == structType);
1644
1645             // Is this GT_COMMA(op1, GT_COMMA())?
1646             GenTree* parent = structVal;
1647             if (blockNode->OperGet() == GT_COMMA)
1648             {
1649                 // Find the last node in the comma chain.
1650                 do
1651                 {
1652                     assert(blockNode->gtType == structType);
1653                     parent    = blockNode;
1654                     blockNode = blockNode->gtOp.gtOp2;
1655                 } while (blockNode->OperGet() == GT_COMMA);
1656             }
1657
1658 #ifdef FEATURE_SIMD
1659             if (blockNode->OperGet() == GT_SIMD)
1660             {
1661                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662                 alreadyNormalized  = true;
1663             }
1664             else
1665 #endif
1666             {
1667                 assert(blockNode->OperIsBlk());
1668
1669                 // Sink the GT_COMMA below the blockNode addr.
1670                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1672                 //
1673                 // In case of a chained GT_COMMA case, we sink the last
1674                 // GT_COMMA below the blockNode addr.
1675                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676                 assert(blockNodeAddr->gtType == TYP_BYREF);
1677                 GenTree* commaNode    = parent;
1678                 commaNode->gtType     = TYP_BYREF;
1679                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680                 blockNode->gtOp.gtOp1 = commaNode;
1681                 if (parent == structVal)
1682                 {
1683                     structVal = blockNode;
1684                 }
1685                 alreadyNormalized = true;
1686             }
1687         }
1688         break;
1689
1690         default:
1691             assert(!"Unexpected node in impNormStructVal()");
1692             break;
1693     }
1694     structVal->gtType  = structType;
1695     GenTree* structObj = structVal;
1696
1697     if (!alreadyNormalized || forceNormalization)
1698     {
1699         if (makeTemp)
1700         {
1701             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1702
1703             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1704
1705             // The structVal is now the temp itself
1706
1707             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1710         }
1711         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1712         {
1713             // Wrap it in a GT_OBJ
1714             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1715         }
1716     }
1717
1718     if (structLcl != nullptr)
1719     {
1720         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721         // so we don't set GTF_EXCEPT here.
1722         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1723         {
1724             structObj->gtFlags &= ~GTF_GLOB_REF;
1725         }
1726     }
1727     else
1728     {
1729         // In general a OBJ is an indirection and could raise an exception.
1730         structObj->gtFlags |= GTF_EXCEPT;
1731     }
1732     return (structObj);
1733 }
1734
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1738 //
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1741 //
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1744 // at compile-time.
1745 //
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747                                       BOOL*                   pRuntimeLookup /* = NULL */,
1748                                       BOOL                    mustRestoreHandle /* = FALSE */,
1749                                       BOOL                    importParent /* = FALSE */)
1750 {
1751     assert(!fgGlobalMorph);
1752
1753     CORINFO_GENERICHANDLE_RESULT embedInfo;
1754     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1755
1756     if (pRuntimeLookup)
1757     {
1758         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1759     }
1760
1761     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1762     {
1763         switch (embedInfo.handleType)
1764         {
1765             case CORINFO_HANDLETYPE_CLASS:
1766                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1767                 break;
1768
1769             case CORINFO_HANDLETYPE_METHOD:
1770                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1771                 break;
1772
1773             case CORINFO_HANDLETYPE_FIELD:
1774                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1776                 break;
1777
1778             default:
1779                 break;
1780         }
1781     }
1782
1783     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784                            embedInfo.compileTimeHandle);
1785 }
1786
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788                                      CORINFO_LOOKUP*         pLookup,
1789                                      unsigned                handleFlags,
1790                                      void*                   compileTimeHandle)
1791 {
1792     if (!pLookup->lookupKind.needsRuntimeLookup)
1793     {
1794         // No runtime lookup is required.
1795         // Access is direct or memory-indirect (of a fixed address) reference
1796
1797         CORINFO_GENERIC_HANDLE handle       = nullptr;
1798         void*                  pIndirection = nullptr;
1799         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1800
1801         if (pLookup->constLookup.accessType == IAT_VALUE)
1802         {
1803             handle = pLookup->constLookup.handle;
1804         }
1805         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1806         {
1807             pIndirection = pLookup->constLookup.addr;
1808         }
1809         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1810     }
1811     else if (compIsForInlining())
1812     {
1813         // Don't import runtime lookups when inlining
1814         // Inlining has to be aborted in such a case
1815         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1816         return nullptr;
1817     }
1818     else
1819     {
1820         // Need to use dictionary-based access which depends on the typeContext
1821         // which is only available at runtime, not at compile-time.
1822
1823         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1824     }
1825 }
1826
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829                                                unsigned              handleFlags,
1830                                                void*                 compileTimeHandle)
1831 {
1832     CORINFO_GENERIC_HANDLE handle       = nullptr;
1833     void*                  pIndirection = nullptr;
1834     assert(pLookup->accessType != IAT_PPVALUE);
1835
1836     if (pLookup->accessType == IAT_VALUE)
1837     {
1838         handle = pLookup->handle;
1839     }
1840     else if (pLookup->accessType == IAT_PVALUE)
1841     {
1842         pIndirection = pLookup->addr;
1843     }
1844     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1845 }
1846
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849     CorInfoHelpFunc         helper,
1850     var_types               type,
1851     GenTreeArgList*         args /* =NULL*/,
1852     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1853 {
1854     CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1857     {
1858         return nullptr;
1859     }
1860 #else
1861     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1862 #endif
1863
1864     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1865
1866     op1->gtCall.setEntryPoint(lookup);
1867
1868     return op1;
1869 }
1870 #endif
1871
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1873 {
1874     GenTreePtr op1 = nullptr;
1875
1876     switch (pCallInfo->kind)
1877     {
1878         case CORINFO_CALL:
1879             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1880
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882             if (opts.IsReadyToRun())
1883             {
1884                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1885                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1887             }
1888             else
1889             {
1890                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1891             }
1892 #endif
1893             break;
1894
1895         case CORINFO_CALL_CODE_POINTER:
1896             if (compIsForInlining())
1897             {
1898                 // Don't import runtime lookups when inlining
1899                 // Inlining has to be aborted in such a case
1900                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1901                 return nullptr;
1902             }
1903
1904             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1905             break;
1906
1907         default:
1908             noway_assert(!"unknown call kind");
1909             break;
1910     }
1911
1912     return op1;
1913 }
1914
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1917 //
1918 // Arguments:
1919 //    kind - lookup kind.
1920 //
1921 // Return Value:
1922 //    Return GenTree pointer to generic shared context.
1923 //
1924 // Notes:
1925 //    Reports about generic context using.
1926
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1928 {
1929     GenTreePtr ctxTree = nullptr;
1930
1931     // Collectible types requires that for shared generic code, if we use the generic context parameter
1932     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933     // context parameter is this that we don't need the eager reporting logic.)
1934     lvaGenericsContextUsed = true;
1935
1936     if (kind == CORINFO_LOOKUP_THISOBJ)
1937     {
1938         // this Object
1939         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1940
1941         // Vtable pointer of this object
1942         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1945     }
1946     else
1947     {
1948         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1949
1950         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1951     }
1952     return ctxTree;
1953 }
1954
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957    generic instantiations.
1958    The lookup depends on the typeContext which is only available at
1959    runtime, and not at compile-time.
1960    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1961    The cases are:
1962
1963    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964       instantiation-specific handle, and the tokens to lookup the handle.
1965    2. pLookup->indirections != CORINFO_USEHELPER :
1966       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1967           to get the handle.
1968       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969           If it is non-NULL, it is the handle required. Else, call a helper
1970           to lookup the handle.
1971  */
1972
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974                                             CORINFO_LOOKUP*         pLookup,
1975                                             void*                   compileTimeHandle)
1976 {
1977
1978     // This method can only be called from the importer instance of the Compiler.
1979     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980     assert(!compIsForInlining());
1981
1982     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1983
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985     if (opts.IsReadyToRun())
1986     {
1987         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1989     }
1990 #endif
1991
1992     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993     // It's available only via the run-time helper function
1994     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1995     {
1996         GenTreeArgList* helperArgs =
1997             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998                                                       nullptr, compileTimeHandle));
1999
2000         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2001     }
2002
2003     // Slot pointer
2004     GenTreePtr slotPtrTree = ctxTree;
2005
2006     if (pRuntimeLookup->testForNull)
2007     {
2008         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2010     }
2011
2012     // Applied repeated indirections
2013     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2014     {
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021         if (pRuntimeLookup->offsets[i] != 0)
2022         {
2023             slotPtrTree =
2024                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2025         }
2026     }
2027
2028     // No null test required
2029     if (!pRuntimeLookup->testForNull)
2030     {
2031         if (pRuntimeLookup->indirections == 0)
2032         {
2033             return slotPtrTree;
2034         }
2035
2036         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2038
2039         if (!pRuntimeLookup->testForFixup)
2040         {
2041             return slotPtrTree;
2042         }
2043
2044         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2045
2046         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047                                       nullptr DEBUGARG("impRuntimeLookup test"));
2048         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2049
2050         // Use a GT_AND to check for the lowest bit and indirect if it is set
2051         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053         relop->gtFlags |= GTF_RELOP_QMARK;
2054
2055         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056                            nullptr DEBUGARG("impRuntimeLookup indir"));
2057         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2060
2061         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2062
2063         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065         return gtNewLclvNode(tmp, TYP_I_IMPL);
2066     }
2067
2068     assert(pRuntimeLookup->indirections != 0);
2069
2070     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2071
2072     // Extract the handle
2073     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074     handle->gtFlags |= GTF_IND_NONFAULTING;
2075
2076     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2078
2079     // Call to helper
2080     GenTreeArgList* helperArgs =
2081         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082                                                   compileTimeHandle));
2083     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2084
2085     // Check for null and possibly call helper
2086     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087     relop->gtFlags |= GTF_RELOP_QMARK;
2088
2089     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090                                                          gtNewNothingNode(), // do nothing if nonnull
2091                                                          helperCall);
2092
2093     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2094
2095     unsigned tmp;
2096     if (handleCopy->IsLocal())
2097     {
2098         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2099     }
2100     else
2101     {
2102         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2103     }
2104
2105     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106     return gtNewLclvNode(tmp, TYP_I_IMPL);
2107 }
2108
2109 /******************************************************************************
2110  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112  *     else, grab a new temp.
2113  *  For structs (which can be pushed on the stack using obj, etc),
2114  *  special handling is needed
2115  */
2116
2117 struct RecursiveGuard
2118 {
2119 public:
2120     RecursiveGuard()
2121     {
2122         m_pAddress = nullptr;
2123     }
2124
2125     ~RecursiveGuard()
2126     {
2127         if (m_pAddress)
2128         {
2129             *m_pAddress = false;
2130         }
2131     }
2132
2133     void Init(bool* pAddress, bool bInitialize)
2134     {
2135         assert(pAddress && *pAddress == false && "Recursive guard violation");
2136         m_pAddress = pAddress;
2137
2138         if (bInitialize)
2139         {
2140             *m_pAddress = true;
2141         }
2142     }
2143
2144 protected:
2145     bool* m_pAddress;
2146 };
2147
2148 bool Compiler::impSpillStackEntry(unsigned level,
2149                                   unsigned tnum
2150 #ifdef DEBUG
2151                                   ,
2152                                   bool        bAssertOnRecursion,
2153                                   const char* reason
2154 #endif
2155                                   )
2156 {
2157
2158 #ifdef DEBUG
2159     RecursiveGuard guard;
2160     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2161 #endif
2162
2163     GenTreePtr tree = verCurrentState.esStack[level].val;
2164
2165     /* Allocate a temp if we haven't been asked to use a particular one */
2166
2167     if (tiVerificationNeeded)
2168     {
2169         // Ignore bad temp requests (they will happen with bad code and will be
2170         // catched when importing the destblock)
2171         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2172         {
2173             return false;
2174         }
2175     }
2176     else
2177     {
2178         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2179         {
2180             return false;
2181         }
2182     }
2183
2184     if (tnum == BAD_VAR_NUM)
2185     {
2186         tnum = lvaGrabTemp(true DEBUGARG(reason));
2187     }
2188     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2189     {
2190         // if verification is needed and tnum's type is incompatible with
2191         // type on that stack, we grab a new temp. This is safe since
2192         // we will throw a verification exception in the dest block.
2193
2194         var_types valTyp = tree->TypeGet();
2195         var_types dstTyp = lvaTable[tnum].TypeGet();
2196
2197         // if the two types are different, we return. This will only happen with bad code and will
2198         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2200             !(
2201 #ifndef _TARGET_64BIT_
2202                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2205         {
2206             if (verNeedsVerification())
2207             {
2208                 return false;
2209             }
2210         }
2211     }
2212
2213     /* Assign the spilled entry to the temp */
2214     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2215
2216     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2218     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2219     verCurrentState.esStack[level].val = temp;
2220
2221     return true;
2222 }
2223
2224 /*****************************************************************************
2225  *
2226  *  Ensure that the stack has only spilled values
2227  */
2228
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2230 {
2231     assert(!spillLeaves || opts.compDbgCode);
2232
2233     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2234     {
2235         GenTreePtr tree = verCurrentState.esStack[level].val;
2236
2237         if (!spillLeaves && tree->OperIsLeaf())
2238         {
2239             continue;
2240         }
2241
2242         // Temps introduced by the importer itself don't need to be spilled
2243
2244         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2245
2246         if (isTempLcl)
2247         {
2248             continue;
2249         }
2250
2251         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2252     }
2253 }
2254
2255 void Compiler::impSpillEvalStack()
2256 {
2257     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2258     {
2259         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2260     }
2261 }
2262
2263 /*****************************************************************************
2264  *
2265  *  If the stack contains any trees with side effects in them, assign those
2266  *  trees to temps and append the assignments to the statement list.
2267  *  On return the stack is guaranteed to be empty.
2268  */
2269
2270 inline void Compiler::impEvalSideEffects()
2271 {
2272     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273     verCurrentState.esStackDepth = 0;
2274 }
2275
2276 /*****************************************************************************
2277  *
2278  *  If the stack contains any trees with side effects in them, assign those
2279  *  trees to temps and replace them on the stack with refs to their temps.
2280  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2281  */
2282
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2284 {
2285     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2286
2287     /* Before we make any appends to the tree list we must spill the
2288      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2289
2290     impSpillSpecialSideEff();
2291
2292     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2293     {
2294         chkLevel = verCurrentState.esStackDepth;
2295     }
2296
2297     assert(chkLevel <= verCurrentState.esStackDepth);
2298
2299     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2300
2301     for (unsigned i = 0; i < chkLevel; i++)
2302     {
2303         GenTreePtr tree = verCurrentState.esStack[i].val;
2304
2305         GenTreePtr lclVarTree;
2306
2307         if ((tree->gtFlags & spillFlags) != 0 ||
2308             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2309              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311                                            // lvAddrTaken flag.
2312         {
2313             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2314         }
2315     }
2316 }
2317
2318 /*****************************************************************************
2319  *
2320  *  If the stack contains any trees with special side effects in them, assign
2321  *  those trees to temps and replace them on the stack with refs to their temps.
2322  */
2323
2324 inline void Compiler::impSpillSpecialSideEff()
2325 {
2326     // Only exception objects need to be carefully handled
2327
2328     if (!compCurBB->bbCatchTyp)
2329     {
2330         return;
2331     }
2332
2333     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2334     {
2335         GenTreePtr tree = verCurrentState.esStack[level].val;
2336         // Make sure if we have an exception object in the sub tree we spill ourselves.
2337         if (gtHasCatchArg(tree))
2338         {
2339             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2340         }
2341     }
2342 }
2343
2344 /*****************************************************************************
2345  *
2346  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2347  */
2348
2349 void Compiler::impSpillValueClasses()
2350 {
2351     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2352     {
2353         GenTreePtr tree = verCurrentState.esStack[level].val;
2354
2355         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2356         {
2357             // Tree walk was aborted, which means that we found a
2358             // value class on the stack.  Need to spill that
2359             // stack entry.
2360
2361             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2362         }
2363     }
2364 }
2365
2366 /*****************************************************************************
2367  *
2368  *  Callback that checks if a tree node is TYP_STRUCT
2369  */
2370
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2372 {
2373     fgWalkResult walkResult = WALK_CONTINUE;
2374
2375     if ((*pTree)->gtType == TYP_STRUCT)
2376     {
2377         // Abort the walk and indicate that we found a value class
2378
2379         walkResult = WALK_ABORT;
2380     }
2381
2382     return walkResult;
2383 }
2384
2385 /*****************************************************************************
2386  *
2387  *  If the stack contains any trees with references to local #lclNum, assign
2388  *  those trees to temps and replace their place on the stack with refs to
2389  *  their temps.
2390  */
2391
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2393 {
2394     /* Before we make any appends to the tree list we must spill the
2395      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2396
2397     impSpillSpecialSideEff();
2398
2399     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2400     {
2401         GenTreePtr tree = verCurrentState.esStack[level].val;
2402
2403         /* If the tree may throw an exception, and the block has a handler,
2404            then we need to spill assignments to the local if the local is
2405            live on entry to the handler.
2406            Just spill 'em all without considering the liveness */
2407
2408         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2409
2410         /* Skip the tree if it doesn't have an affected reference,
2411            unless xcptnCaught */
2412
2413         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2414         {
2415             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2416         }
2417     }
2418 }
2419
2420 /*****************************************************************************
2421  *
2422  *  Push catch arg onto the stack.
2423  *  If there are jumps to the beginning of the handler, insert basic block
2424  *  and spill catch arg to a temp. Update the handler block if necessary.
2425  *
2426  *  Returns the basic block of the actual handler.
2427  */
2428
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2430 {
2431     // Do not inject the basic block twice on reimport. This should be
2432     // hit only under JIT stress. See if the block is the one we injected.
2433     // Note that EH canonicalization can inject internal blocks here. We might
2434     // be able to re-use such a block (but we don't, right now).
2435     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2437     {
2438         GenTreePtr tree = hndBlk->bbTreeList;
2439
2440         if (tree != nullptr && tree->gtOper == GT_STMT)
2441         {
2442             tree = tree->gtStmt.gtStmtExpr;
2443             assert(tree != nullptr);
2444
2445             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2447             {
2448                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2449
2450                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2451
2452                 return hndBlk->bbNext;
2453             }
2454         }
2455
2456         // If we get here, it must have been some other kind of internal block. It's possible that
2457         // someone prepended something to our injected block, but that's unlikely.
2458     }
2459
2460     /* Push the exception address value on the stack */
2461     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2462
2463     /* Mark the node as having a side-effect - i.e. cannot be
2464      * moved around since it is tied to a fixed location (EAX) */
2465     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2466
2467     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2469     {
2470         if (hndBlk->bbRefs == 1)
2471         {
2472             hndBlk->bbRefs++;
2473         }
2474
2475         /* Create extra basic block for the spill */
2476         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478         newBlk->setBBWeight(hndBlk->bbWeight);
2479         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2480
2481         /* Account for the new link we are about to create */
2482         hndBlk->bbRefs++;
2483
2484         /* Spill into a temp */
2485         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486         lvaTable[tempNum].lvType = TYP_REF;
2487         arg                      = gtNewTempAssign(tempNum, arg);
2488
2489         hndBlk->bbStkTempsIn = tempNum;
2490
2491         /* Report the debug info. impImportBlockCode won't treat
2492          * the actual handler as exception block and thus won't do it for us. */
2493         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2494         {
2495             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496             arg            = gtNewStmt(arg, impCurStmtOffs);
2497         }
2498
2499         fgInsertStmtAtEnd(newBlk, arg);
2500
2501         arg = gtNewLclvNode(tempNum, TYP_REF);
2502     }
2503
2504     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2505
2506     return hndBlk;
2507 }
2508
2509 /*****************************************************************************
2510  *
2511  *  Given a tree, clone it. *pClone is set to the cloned tree.
2512  *  Returns the original tree if the cloning was easy,
2513  *   else returns the temp to which the tree had to be spilled to.
2514  *  If the tree has side-effects, it will be spilled to a temp.
2515  */
2516
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2518                                   GenTreePtr*          pClone,
2519                                   CORINFO_CLASS_HANDLE structHnd,
2520                                   unsigned             curLevel,
2521                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2522 {
2523     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2524     {
2525         GenTreePtr clone = gtClone(tree, true);
2526
2527         if (clone)
2528         {
2529             *pClone = clone;
2530             return tree;
2531         }
2532     }
2533
2534     /* Store the operand in a temp and return the temp */
2535
2536     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2537
2538     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539     // return a struct type. It also may modify the struct type to a more
2540     // specialized type (e.g. a SIMD type).  So we will get the type from
2541     // the lclVar AFTER calling impAssignTempGen().
2542
2543     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544     var_types type = genActualType(lvaTable[temp].TypeGet());
2545
2546     *pClone = gtNewLclvNode(temp, type);
2547     return gtNewLclvNode(temp, type);
2548 }
2549
2550 /*****************************************************************************
2551  * Remember the IL offset (including stack-empty info) for the trees we will
2552  * generate now.
2553  */
2554
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2556 {
2557     if (compIsForInlining())
2558     {
2559         GenTreePtr callStmt = impInlineInfo->iciStmt;
2560         assert(callStmt->gtOper == GT_STMT);
2561         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2562     }
2563     else
2564     {
2565         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567         impCurStmtOffs    = offs | stkBit;
2568     }
2569 }
2570
2571 /*****************************************************************************
2572  * Returns current IL offset with stack-empty and call-instruction info incorporated
2573  */
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2575 {
2576     if (compIsForInlining())
2577     {
2578         return BAD_IL_OFFSET;
2579     }
2580     else
2581     {
2582         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585         return offs | stkBit | callInstructionBit;
2586     }
2587 }
2588
2589 /*****************************************************************************
2590  *
2591  *  Remember the instr offset for the statements
2592  *
2593  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2595  *  as some of the trees corresponding to code up to impCurOpcOffs might
2596  *  still be sitting on the stack.
2597  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598  *  This should be called when an opcode finally/explicitly causes
2599  *  impAppendTree(tree) to be called (as opposed to being called because of
2600  *  a spill caused by the opcode)
2601  */
2602
2603 #ifdef DEBUG
2604
2605 void Compiler::impNoteLastILoffs()
2606 {
2607     if (impLastILoffsStmt == nullptr)
2608     {
2609         // We should have added a statement for the current basic block
2610         // Is this assert correct ?
2611
2612         assert(impTreeLast);
2613         assert(impTreeLast->gtOper == GT_STMT);
2614
2615         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2616     }
2617     else
2618     {
2619         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620         impLastILoffsStmt                          = nullptr;
2621     }
2622 }
2623
2624 #endif // DEBUG
2625
2626 /*****************************************************************************
2627  * We don't create any GenTree (excluding spills) for a branch.
2628  * For debugging info, we need a placeholder so that we can note
2629  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2630  */
2631
2632 void Compiler::impNoteBranchOffs()
2633 {
2634     if (opts.compDbgCode)
2635     {
2636         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2637     }
2638 }
2639
2640 /*****************************************************************************
2641  * Locate the next stmt boundary for which we need to record info.
2642  * We will have to spill the stack at such boundaries if it is not
2643  * already empty.
2644  * Returns the next stmt boundary (after the start of the block)
2645  */
2646
2647 unsigned Compiler::impInitBlockLineInfo()
2648 {
2649     /* Assume the block does not correspond with any IL offset. This prevents
2650        us from reporting extra offsets. Extra mappings can cause confusing
2651        stepping, especially if the extra mapping is a jump-target, and the
2652        debugger does not ignore extra mappings, but instead rewinds to the
2653        nearest known offset */
2654
2655     impCurStmtOffsSet(BAD_IL_OFFSET);
2656
2657     if (compIsForInlining())
2658     {
2659         return ~0;
2660     }
2661
2662     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2663
2664     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2665     {
2666         impCurStmtOffsSet(blockOffs);
2667     }
2668
2669     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2670     {
2671         impCurStmtOffsSet(blockOffs);
2672     }
2673
2674     /* Always report IL offset 0 or some tests get confused.
2675        Probably a good idea anyways */
2676
2677     if (blockOffs == 0)
2678     {
2679         impCurStmtOffsSet(blockOffs);
2680     }
2681
2682     if (!info.compStmtOffsetsCount)
2683     {
2684         return ~0;
2685     }
2686
2687     /* Find the lowest explicit stmt boundary within the block */
2688
2689     /* Start looking at an entry that is based on our instr offset */
2690
2691     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2692
2693     if (index >= info.compStmtOffsetsCount)
2694     {
2695         index = info.compStmtOffsetsCount - 1;
2696     }
2697
2698     /* If we've guessed too far, back up */
2699
2700     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2701     {
2702         index--;
2703     }
2704
2705     /* If we guessed short, advance ahead */
2706
2707     while (info.compStmtOffsets[index] < blockOffs)
2708     {
2709         index++;
2710
2711         if (index == info.compStmtOffsetsCount)
2712         {
2713             return info.compStmtOffsetsCount;
2714         }
2715     }
2716
2717     assert(index < info.compStmtOffsetsCount);
2718
2719     if (info.compStmtOffsets[index] == blockOffs)
2720     {
2721         /* There is an explicit boundary for the start of this basic block.
2722            So we will start with bbCodeOffs. Else we will wait until we
2723            get to the next explicit boundary */
2724
2725         impCurStmtOffsSet(blockOffs);
2726
2727         index++;
2728     }
2729
2730     return index;
2731 }
2732
2733 /*****************************************************************************/
2734
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2736 {
2737     switch (opcode)
2738     {
2739         case CEE_CALL:
2740         case CEE_CALLI:
2741         case CEE_CALLVIRT:
2742             return true;
2743
2744         default:
2745             return false;
2746     }
2747 }
2748
2749 /*****************************************************************************/
2750
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2752 {
2753     switch (opcode)
2754     {
2755         case CEE_CALL:
2756         case CEE_CALLI:
2757         case CEE_CALLVIRT:
2758         case CEE_JMP:
2759         case CEE_NEWOBJ:
2760         case CEE_NEWARR:
2761             return true;
2762
2763         default:
2764             return false;
2765     }
2766 }
2767
2768 /*****************************************************************************/
2769
2770 // One might think it is worth caching these values, but results indicate
2771 // that it isn't.
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2775 {
2776     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2778     return refAnyClass;
2779 }
2780
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2782 {
2783     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785     return typeHandleClass;
2786 }
2787
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2789 {
2790     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792     return argIteratorClass;
2793 }
2794
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2796 {
2797     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2799     return stringClass;
2800 }
2801
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2803 {
2804     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2806     return objectClass;
2807 }
2808
2809 /*****************************************************************************
2810  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811  *  set its type to TYP_BYREF when we create it. We know if it can be
2812  *  changed to TYP_I_IMPL only at the point where we use it
2813  */
2814
2815 /* static */
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2817 {
2818     if (tree1->IsVarAddr())
2819     {
2820         tree1->gtType = TYP_I_IMPL;
2821     }
2822
2823     if (tree2 && tree2->IsVarAddr())
2824     {
2825         tree2->gtType = TYP_I_IMPL;
2826     }
2827 }
2828
2829 /*****************************************************************************
2830  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831  *  to make that an explicit cast in our trees, so any implicit casts that
2832  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833  *  turned into explicit casts here.
2834  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2835  */
2836
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2838 {
2839     var_types currType   = genActualType(tree->gtType);
2840     var_types wantedType = genActualType(dstTyp);
2841
2842     if (wantedType != currType)
2843     {
2844         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2846         {
2847             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2848             {
2849                 tree->gtType = TYP_I_IMPL;
2850             }
2851         }
2852 #ifdef _TARGET_64BIT_
2853         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2854         {
2855             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2857         }
2858         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2859         {
2860             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2862         }
2863 #endif // _TARGET_64BIT_
2864     }
2865
2866     return tree;
2867 }
2868
2869 /*****************************************************************************
2870  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871  *  but we want to make that an explicit cast in our trees, so any implicit casts
2872  *  that exist in the IL are turned into explicit casts here.
2873  */
2874
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2876 {
2877 #ifndef LEGACY_BACKEND
2878     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2879     {
2880         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2881     }
2882 #endif // !LEGACY_BACKEND
2883
2884     return tree;
2885 }
2886
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 //    with a GT_COPYBLK node.
2890 //
2891 // Arguments:
2892 //    sig - The InitializeArray signature.
2893 //
2894 // Return Value:
2895 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 //    nullptr otherwise.
2897 //
2898 // Notes:
2899 //    The function recognizes the following IL pattern:
2900 //      ldc <length> or a list of ldc <lower bound>/<length>
2901 //      newarr or newobj
2902 //      dup
2903 //      ldtoken <field handle>
2904 //      call InitializeArray
2905 //    The lower bounds need not be constant except when the array rank is 1.
2906 //    The function recognizes all kinds of arrays thus enabling a small runtime
2907 //    such as CoreRT to skip providing an implementation for InitializeArray.
2908
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2910 {
2911     assert(sig->numArgs == 2);
2912
2913     GenTreePtr fieldTokenNode = impStackTop(0).val;
2914     GenTreePtr arrayLocalNode = impStackTop(1).val;
2915
2916     //
2917     // Verify that the field token is known and valid.  Note that It's also
2918     // possible for the token to come from reflection, in which case we cannot do
2919     // the optimization and must therefore revert to calling the helper.  You can
2920     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2921     //
2922
2923     // Check to see if the ldtoken helper call is what we see here.
2924     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2926     {
2927         return nullptr;
2928     }
2929
2930     // Strip helper call away
2931     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2932
2933     if (fieldTokenNode->gtOper == GT_IND)
2934     {
2935         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2936     }
2937
2938     // Check for constant
2939     if (fieldTokenNode->gtOper != GT_CNS_INT)
2940     {
2941         return nullptr;
2942     }
2943
2944     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2946     {
2947         return nullptr;
2948     }
2949
2950     //
2951     // We need to get the number of elements in the array and the size of each element.
2952     // We verify that the newarr statement is exactly what we expect it to be.
2953     // If it's not then we just return NULL and we don't optimize this call
2954     //
2955
2956     //
2957     // It is possible the we don't have any statements in the block yet
2958     //
2959     if (impTreeLast->gtOper != GT_STMT)
2960     {
2961         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2962         return nullptr;
2963     }
2964
2965     //
2966     // We start by looking at the last statement, making sure it's an assignment, and
2967     // that the target of the assignment is the array passed to InitializeArray.
2968     //
2969     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2973     {
2974         return nullptr;
2975     }
2976
2977     //
2978     // Make sure that the object being assigned is a helper call.
2979     //
2980
2981     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2983     {
2984         return nullptr;
2985     }
2986
2987     //
2988     // Verify that it is one of the new array helpers.
2989     //
2990
2991     bool isMDArray = false;
2992
2993     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2999 #endif
3000             )
3001     {
3002 #if COR_JIT_EE_VERSION > 460
3003         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3004         {
3005             return nullptr;
3006         }
3007
3008         isMDArray = true;
3009 #endif
3010     }
3011
3012     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3013
3014     //
3015     // Make sure we found a compile time handle to the array
3016     //
3017
3018     if (!arrayClsHnd)
3019     {
3020         return nullptr;
3021     }
3022
3023     unsigned rank = 0;
3024     S_UINT32 numElements;
3025
3026     if (isMDArray)
3027     {
3028         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3029
3030         if (rank == 0)
3031         {
3032             return nullptr;
3033         }
3034
3035         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036         assert(tokenArg != nullptr);
3037         GenTreeArgList* numArgsArg = tokenArg->Rest();
3038         assert(numArgsArg != nullptr);
3039         GenTreeArgList* argsArg = numArgsArg->Rest();
3040         assert(argsArg != nullptr);
3041
3042         //
3043         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044         // so at least one length must be present and the rank can't exceed 32 so there can
3045         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3046         //
3047
3048         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3050         {
3051             return nullptr;
3052         }
3053
3054         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055         bool     lowerBoundsSpecified;
3056
3057         if (numArgs == rank * 2)
3058         {
3059             lowerBoundsSpecified = true;
3060         }
3061         else if (numArgs == rank)
3062         {
3063             lowerBoundsSpecified = false;
3064
3065             //
3066             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068             // we get a SDArray as well, see the for loop below.
3069             //
3070
3071             if (rank == 1)
3072             {
3073                 isMDArray = false;
3074             }
3075         }
3076         else
3077         {
3078             return nullptr;
3079         }
3080
3081         //
3082         // The rank is known to be at least 1 so we can start with numElements being 1
3083         // to avoid the need to special case the first dimension.
3084         //
3085
3086         numElements = S_UINT32(1);
3087
3088         struct Match
3089         {
3090             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3091             {
3092                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3094             }
3095
3096             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3097             {
3098                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3101             }
3102
3103             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3104             {
3105                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3107             }
3108
3109             static bool IsComma(GenTree* tree)
3110             {
3111                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3112             }
3113         };
3114
3115         unsigned argIndex = 0;
3116         GenTree* comma;
3117
3118         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3119         {
3120             if (lowerBoundsSpecified)
3121             {
3122                 //
3123                 // In general lower bounds can be ignored because they're not needed to
3124                 // calculate the total number of elements. But for single dimensional arrays
3125                 // we need to know if the lower bound is 0 because in this case the runtime
3126                 // creates a SDArray and this affects the way the array data offset is calculated.
3127                 //
3128
3129                 if (rank == 1)
3130                 {
3131                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3132                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3134
3135                     if (lowerBoundNode->IsIntegralConst(0))
3136                     {
3137                         isMDArray = false;
3138                     }
3139                 }
3140
3141                 comma = comma->gtGetOp2();
3142                 argIndex++;
3143             }
3144
3145             GenTree* lengthNodeAssign = comma->gtGetOp1();
3146             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3148
3149             if (!lengthNode->IsCnsIntOrI())
3150             {
3151                 return nullptr;
3152             }
3153
3154             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3155             argIndex++;
3156         }
3157
3158         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3159
3160         if (argIndex != numArgs)
3161         {
3162             return nullptr;
3163         }
3164     }
3165     else
3166     {
3167         //
3168         // Make sure there are exactly two arguments:  the array class and
3169         // the number of elements.
3170         //
3171
3172         GenTreePtr arrayLengthNode;
3173
3174         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3177         {
3178             // Array length is 1st argument for readytorun helper
3179             arrayLengthNode = args->Current();
3180         }
3181         else
3182 #endif
3183         {
3184             // Array length is 2nd argument for regular helper
3185             arrayLengthNode = args->Rest()->Current();
3186         }
3187
3188         //
3189         // Make sure that the number of elements look valid.
3190         //
3191         if (arrayLengthNode->gtOper != GT_CNS_INT)
3192         {
3193             return nullptr;
3194         }
3195
3196         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3197
3198         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3199         {
3200             return nullptr;
3201         }
3202     }
3203
3204     CORINFO_CLASS_HANDLE elemClsHnd;
3205     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3206
3207     //
3208     // Note that genTypeSize will return zero for non primitive types, which is exactly
3209     // what we want (size will then be 0, and we will catch this in the conditional below).
3210     // Note that we don't expect this to fail for valid binaries, so we assert in the
3211     // non-verification case (the verification case should not assert but rather correctly
3212     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3213     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3214     // why.
3215     //
3216
3217     S_UINT32 elemSize(genTypeSize(elementType));
3218     S_UINT32 size = elemSize * S_UINT32(numElements);
3219
3220     if (size.IsOverflow())
3221     {
3222         return nullptr;
3223     }
3224
3225     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3226     {
3227         assert(verNeedsVerification());
3228         return nullptr;
3229     }
3230
3231     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3232     if (!initData)
3233     {
3234         return nullptr;
3235     }
3236
3237     //
3238     // At this point we are ready to commit to implementing the InitializeArray
3239     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3240     // return the struct assignment node.
3241     //
3242
3243     impPopStack();
3244     impPopStack();
3245
3246     const unsigned blkSize = size.Value();
3247     GenTreePtr     dst;
3248
3249     if (isMDArray)
3250     {
3251         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3252
3253         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3254     }
3255     else
3256     {
3257         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3258     }
3259     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3260     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3262
3263     return gtNewBlkOpNode(blk,     // dst
3264                           src,     // src
3265                           blkSize, // size
3266                           false,   // volatil
3267                           true);   // copyBlock
3268 }
3269
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3273
3274 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3275                                   CORINFO_CLASS_HANDLE  clsHnd,
3276                                   CORINFO_METHOD_HANDLE method,
3277                                   CORINFO_SIG_INFO*     sig,
3278                                   int                   memberRef,
3279                                   bool                  readonlyCall,
3280                                   bool                  tailCall,
3281                                   CorInfoIntrinsics*    pIntrinsicID)
3282 {
3283     bool mustExpand = false;
3284 #if COR_JIT_EE_VERSION > 460
3285     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3286 #else
3287     CorInfoIntrinsics intrinsicID                                      = info.compCompHnd->getIntrinsicID(method);
3288 #endif
3289     *pIntrinsicID = intrinsicID;
3290
3291 #ifndef _TARGET_ARM_
3292     genTreeOps interlockedOperator;
3293 #endif
3294
3295     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3296     {
3297         // must be done regardless of DbgCode and MinOpts
3298         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3299     }
3300 #ifdef _TARGET_64BIT_
3301     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3302     {
3303         // must be done regardless of DbgCode and MinOpts
3304         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3305     }
3306 #else
3307     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3308 #endif
3309
3310     GenTreePtr retNode = nullptr;
3311
3312     //
3313     // We disable the inlining of instrinsics for MinOpts.
3314     //
3315     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3316     {
3317         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3318         return retNode;
3319     }
3320
3321     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3322     // seem to work properly for Infinity values, we don't do
3323     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3324
3325     var_types callType = JITtype2varType(sig->retType);
3326
3327     /* First do the intrinsics which are always smaller than a call */
3328
3329     switch (intrinsicID)
3330     {
3331         GenTreePtr op1, op2;
3332
3333         case CORINFO_INTRINSIC_Sin:
3334         case CORINFO_INTRINSIC_Sqrt:
3335         case CORINFO_INTRINSIC_Abs:
3336         case CORINFO_INTRINSIC_Cos:
3337         case CORINFO_INTRINSIC_Round:
3338         case CORINFO_INTRINSIC_Cosh:
3339         case CORINFO_INTRINSIC_Sinh:
3340         case CORINFO_INTRINSIC_Tan:
3341         case CORINFO_INTRINSIC_Tanh:
3342         case CORINFO_INTRINSIC_Asin:
3343         case CORINFO_INTRINSIC_Acos:
3344         case CORINFO_INTRINSIC_Atan:
3345         case CORINFO_INTRINSIC_Atan2:
3346         case CORINFO_INTRINSIC_Log10:
3347         case CORINFO_INTRINSIC_Pow:
3348         case CORINFO_INTRINSIC_Exp:
3349         case CORINFO_INTRINSIC_Ceiling:
3350         case CORINFO_INTRINSIC_Floor:
3351
3352             // These are math intrinsics
3353
3354             assert(callType != TYP_STRUCT);
3355
3356             op1 = nullptr;
3357
3358 #if defined(LEGACY_BACKEND)
3359             if (IsTargetIntrinsic(intrinsicID))
3360 #elif !defined(_TARGET_X86_)
3361             // Intrinsics that are not implemented directly by target instructions will
3362             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3363             // don't do this optimization, because
3364             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3365             //  b) It will be non-trivial task or too late to re-materialize a surviving
3366             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3367             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3368 #else
3369             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3370             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3371             // code generation for certain EH constructs.
3372             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3373 #endif
3374             {
3375                 switch (sig->numArgs)
3376                 {
3377                     case 1:
3378                         op1 = impPopStack().val;
3379
3380 #if FEATURE_X87_DOUBLES
3381
3382                         // X87 stack doesn't differentiate between float/double
3383                         // so it doesn't need a cast, but everybody else does
3384                         // Just double check it is at least a FP type
3385                         noway_assert(varTypeIsFloating(op1));
3386
3387 #else // FEATURE_X87_DOUBLES
3388
3389                         if (op1->TypeGet() != callType)
3390                         {
3391                             op1 = gtNewCastNode(callType, op1, callType);
3392                         }
3393
3394 #endif // FEATURE_X87_DOUBLES
3395
3396                         op1 = new (this, GT_INTRINSIC)
3397                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3398                         break;
3399
3400                     case 2:
3401                         op2 = impPopStack().val;
3402                         op1 = impPopStack().val;
3403
3404 #if FEATURE_X87_DOUBLES
3405
3406                         // X87 stack doesn't differentiate between float/double
3407                         // so it doesn't need a cast, but everybody else does
3408                         // Just double check it is at least a FP type
3409                         noway_assert(varTypeIsFloating(op2));
3410                         noway_assert(varTypeIsFloating(op1));
3411
3412 #else // FEATURE_X87_DOUBLES
3413
3414                         if (op2->TypeGet() != callType)
3415                         {
3416                             op2 = gtNewCastNode(callType, op2, callType);
3417                         }
3418                         if (op1->TypeGet() != callType)
3419                         {
3420                             op1 = gtNewCastNode(callType, op1, callType);
3421                         }
3422
3423 #endif // FEATURE_X87_DOUBLES
3424
3425                         op1 = new (this, GT_INTRINSIC)
3426                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3427                         break;
3428
3429                     default:
3430                         NO_WAY("Unsupported number of args for Math Instrinsic");
3431                 }
3432
3433 #ifndef LEGACY_BACKEND
3434                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3435                 {
3436                     op1->gtFlags |= GTF_CALL;
3437                 }
3438 #endif
3439             }
3440
3441             retNode = op1;
3442             break;
3443
3444 #ifdef _TARGET_XARCH_
3445         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3446         case CORINFO_INTRINSIC_InterlockedAdd32:
3447             interlockedOperator = GT_LOCKADD;
3448             goto InterlockedBinOpCommon;
3449         case CORINFO_INTRINSIC_InterlockedXAdd32:
3450             interlockedOperator = GT_XADD;
3451             goto InterlockedBinOpCommon;
3452         case CORINFO_INTRINSIC_InterlockedXchg32:
3453             interlockedOperator = GT_XCHG;
3454             goto InterlockedBinOpCommon;
3455
3456 #ifdef _TARGET_AMD64_
3457         case CORINFO_INTRINSIC_InterlockedAdd64:
3458             interlockedOperator = GT_LOCKADD;
3459             goto InterlockedBinOpCommon;
3460         case CORINFO_INTRINSIC_InterlockedXAdd64:
3461             interlockedOperator = GT_XADD;
3462             goto InterlockedBinOpCommon;
3463         case CORINFO_INTRINSIC_InterlockedXchg64:
3464             interlockedOperator = GT_XCHG;
3465             goto InterlockedBinOpCommon;
3466 #endif // _TARGET_AMD64_
3467
3468         InterlockedBinOpCommon:
3469             assert(callType != TYP_STRUCT);
3470             assert(sig->numArgs == 2);
3471
3472             op2 = impPopStack().val;
3473             op1 = impPopStack().val;
3474
3475             // This creates:
3476             //   val
3477             // XAdd
3478             //   addr
3479             //     field (for example)
3480             //
3481             // In the case where the first argument is the address of a local, we might
3482             // want to make this *not* make the var address-taken -- but atomic instructions
3483             // on a local are probably pretty useless anyway, so we probably don't care.
3484
3485             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3486             op1->gtFlags |= GTF_GLOB_EFFECT;
3487             retNode = op1;
3488             break;
3489 #endif // _TARGET_XARCH_
3490
3491         case CORINFO_INTRINSIC_MemoryBarrier:
3492
3493             assert(sig->numArgs == 0);
3494
3495             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3496             op1->gtFlags |= GTF_GLOB_EFFECT;
3497             retNode = op1;
3498             break;
3499
3500 #ifdef _TARGET_XARCH_
3501         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3502         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3503 #ifdef _TARGET_AMD64_
3504         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3505 #endif
3506         {
3507             assert(callType != TYP_STRUCT);
3508             assert(sig->numArgs == 3);
3509             GenTreePtr op3;
3510
3511             op3 = impPopStack().val; // comparand
3512             op2 = impPopStack().val; // value
3513             op1 = impPopStack().val; // location
3514
3515             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3516
3517             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3518             retNode = node;
3519             break;
3520         }
3521 #endif
3522
3523         case CORINFO_INTRINSIC_StringLength:
3524             op1 = impPopStack().val;
3525             if (!opts.MinOpts() && !opts.compDbgCode)
3526             {
3527                 GenTreeArrLen* arrLen =
3528                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3529                 op1 = arrLen;
3530             }
3531             else
3532             {
3533                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3534                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3535                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3536                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3537             }
3538             retNode = op1;
3539             break;
3540
3541         case CORINFO_INTRINSIC_StringGetChar:
3542             op2 = impPopStack().val;
3543             op1 = impPopStack().val;
3544             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3545             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3546             retNode = op1;
3547             break;
3548
3549         case CORINFO_INTRINSIC_InitializeArray:
3550             retNode = impInitializeArrayIntrinsic(sig);
3551             break;
3552
3553         case CORINFO_INTRINSIC_Array_Address:
3554         case CORINFO_INTRINSIC_Array_Get:
3555         case CORINFO_INTRINSIC_Array_Set:
3556             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3557             break;
3558
3559         case CORINFO_INTRINSIC_GetTypeFromHandle:
3560             op1 = impStackTop(0).val;
3561             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3562                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3563             {
3564                 op1 = impPopStack().val;
3565                 // Change call to return RuntimeType directly.
3566                 op1->gtType = TYP_REF;
3567                 retNode     = op1;
3568             }
3569             // Call the regular function.
3570             break;
3571
3572         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3573             op1 = impStackTop(0).val;
3574             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3575                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3576             {
3577                 // Old tree
3578                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3579                 //
3580                 // New tree
3581                 // TreeToGetNativeTypeHandle
3582
3583                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3584                 // to that helper.
3585
3586                 op1 = impPopStack().val;
3587
3588                 // Get native TypeHandle argument to old helper
3589                 op1 = op1->gtCall.gtCallArgs;
3590                 assert(op1->OperIsList());
3591                 assert(op1->gtOp.gtOp2 == nullptr);
3592                 op1     = op1->gtOp.gtOp1;
3593                 retNode = op1;
3594             }
3595             // Call the regular function.
3596             break;
3597
3598 #ifndef LEGACY_BACKEND
3599         case CORINFO_INTRINSIC_Object_GetType:
3600
3601             op1 = impPopStack().val;
3602             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3603
3604             // Set the CALL flag to indicate that the operator is implemented by a call.
3605             // Set also the EXCEPTION flag because the native implementation of
3606             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3607             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3608             retNode = op1;
3609             break;
3610 #endif
3611         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3612         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3613         // substitution.  The parameter byref will be assigned into the newly allocated object.
3614         case CORINFO_INTRINSIC_ByReference_Ctor:
3615         {
3616             // Remove call to constructor and directly assign the byref passed
3617             // to the call to the first slot of the ByReference struct.
3618             op1                                    = impPopStack().val;
3619             GenTreePtr           thisptr           = newobjThis;
3620             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3621             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3622             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3623             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3624             assert(byReferenceStruct != nullptr);
3625             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3626             retNode = assign;
3627             break;
3628         }
3629         // Implement ptr value getter for ByReference struct.
3630         case CORINFO_INTRINSIC_ByReference_Value:
3631         {
3632             op1                         = impPopStack().val;
3633             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3634             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3635             retNode                     = field;
3636             break;
3637         }
3638         default:
3639             /* Unknown intrinsic */
3640             break;
3641     }
3642
3643     if (mustExpand)
3644     {
3645         if (retNode == nullptr)
3646         {
3647             NO_WAY("JIT must expand the intrinsic!");
3648         }
3649     }
3650
3651     return retNode;
3652 }
3653
3654 /*****************************************************************************/
3655
3656 GenTreePtr Compiler::impArrayAccessIntrinsic(
3657     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3658 {
3659     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3660        the following, as it generates fatter code.
3661     */
3662
3663     if (compCodeOpt() == SMALL_CODE)
3664     {
3665         return nullptr;
3666     }
3667
3668     /* These intrinsics generate fatter (but faster) code and are only
3669        done if we don't need SMALL_CODE */
3670
3671     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3672
3673     // The rank 1 case is special because it has to handle two array formats
3674     // we will simply not do that case
3675     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3676     {
3677         return nullptr;
3678     }
3679
3680     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3681     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3682
3683     // For the ref case, we will only be able to inline if the types match
3684     // (verifier checks for this, we don't care for the nonverified case and the
3685     // type is final (so we don't need to do the cast)
3686     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3687     {
3688         // Get the call site signature
3689         CORINFO_SIG_INFO LocalSig;
3690         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3691         assert(LocalSig.hasThis());
3692
3693         CORINFO_CLASS_HANDLE actualElemClsHnd;
3694
3695         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3696         {
3697             // Fetch the last argument, the one that indicates the type we are setting.
3698             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3699             for (unsigned r = 0; r < rank; r++)
3700             {
3701                 argType = info.compCompHnd->getArgNext(argType);
3702             }
3703
3704             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3705             actualElemClsHnd = argInfo.GetClassHandle();
3706         }
3707         else
3708         {
3709             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3710
3711             // Fetch the return type
3712             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3713             assert(retInfo.IsByRef());
3714             actualElemClsHnd = retInfo.GetClassHandle();
3715         }
3716
3717         // if it's not final, we can't do the optimization
3718         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3719         {
3720             return nullptr;
3721         }
3722     }
3723
3724     unsigned arrayElemSize;
3725     if (elemType == TYP_STRUCT)
3726     {
3727         assert(arrElemClsHnd);
3728
3729         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3730     }
3731     else
3732     {
3733         arrayElemSize = genTypeSize(elemType);
3734     }
3735
3736     if ((unsigned char)arrayElemSize != arrayElemSize)
3737     {
3738         // arrayElemSize would be truncated as an unsigned char.
3739         // This means the array element is too large. Don't do the optimization.
3740         return nullptr;
3741     }
3742
3743     GenTreePtr val = nullptr;
3744
3745     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3746     {
3747         // Assignment of a struct is more work, and there are more gets than sets.
3748         if (elemType == TYP_STRUCT)
3749         {
3750             return nullptr;
3751         }
3752
3753         val = impPopStack().val;
3754         assert(genActualType(elemType) == genActualType(val->gtType) ||
3755                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3756                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3757                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3758     }
3759
3760     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3761
3762     GenTreePtr inds[GT_ARR_MAX_RANK];
3763     for (unsigned k = rank; k > 0; k--)
3764     {
3765         inds[k - 1] = impPopStack().val;
3766     }
3767
3768     GenTreePtr arr = impPopStack().val;
3769     assert(arr->gtType == TYP_REF);
3770
3771     GenTreePtr arrElem =
3772         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3773                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3774
3775     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3776     {
3777         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3778     }
3779
3780     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3781     {
3782         assert(val != nullptr);
3783         return gtNewAssignNode(arrElem, val);
3784     }
3785     else
3786     {
3787         return arrElem;
3788     }
3789 }
3790
3791 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3792 {
3793     unsigned i;
3794
3795     // do some basic checks first
3796     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3797     {
3798         return FALSE;
3799     }
3800
3801     if (verCurrentState.esStackDepth > 0)
3802     {
3803         // merge stack types
3804         StackEntry* parentStack = block->bbStackOnEntry();
3805         StackEntry* childStack  = verCurrentState.esStack;
3806
3807         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3808         {
3809             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3810             {
3811                 return FALSE;
3812             }
3813         }
3814     }
3815
3816     // merge initialization status of this ptr
3817
3818     if (verTrackObjCtorInitState)
3819     {
3820         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3821         assert(verCurrentState.thisInitialized != TIS_Bottom);
3822
3823         // If the successor block's thisInit state is unknown, copy it from the current state.
3824         if (block->bbThisOnEntry() == TIS_Bottom)
3825         {
3826             *changed = true;
3827             verSetThisInit(block, verCurrentState.thisInitialized);
3828         }
3829         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3830         {
3831             if (block->bbThisOnEntry() != TIS_Top)
3832             {
3833                 *changed = true;
3834                 verSetThisInit(block, TIS_Top);
3835
3836                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3837                 {
3838                     // The block is bad. Control can flow through the block to any handler that catches the
3839                     // verification exception, but the importer ignores bad blocks and therefore won't model
3840                     // this flow in the normal way. To complete the merge into the bad block, the new state
3841                     // needs to be manually pushed to the handlers that may be reached after the verification
3842                     // exception occurs.
3843                     //
3844                     // Usually, the new state was already propagated to the relevant handlers while processing
3845                     // the predecessors of the bad block. The exception is when the bad block is at the start
3846                     // of a try region, meaning it is protected by additional handlers that do not protect its
3847                     // predecessors.
3848                     //
3849                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3850                     {
3851                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3852                         // recursive calls back into this code path (if successors of the current bad block are
3853                         // also bad blocks).
3854                         //
3855                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3856                         verCurrentState.thisInitialized = TIS_Top;
3857                         impVerifyEHBlock(block, true);
3858                         verCurrentState.thisInitialized = origTIS;
3859                     }
3860                 }
3861             }
3862         }
3863     }
3864     else
3865     {
3866         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3867     }
3868
3869     return TRUE;
3870 }
3871
3872 /*****************************************************************************
3873  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3874  *   already logged it (presumably in a more detailed fashion than done here)
3875  * 'bVerificationException' is true for a verification exception, false for a
3876  *   "call unauthorized by host" exception.
3877  */
3878
3879 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3880 {
3881     block->bbJumpKind = BBJ_THROW;
3882     block->bbFlags |= BBF_FAILED_VERIFICATION;
3883
3884     impCurStmtOffsSet(block->bbCodeOffs);
3885
3886 #ifdef DEBUG
3887     // we need this since BeginTreeList asserts otherwise
3888     impTreeList = impTreeLast = nullptr;
3889     block->bbFlags &= ~BBF_IMPORTED;
3890
3891     if (logMsg)
3892     {
3893         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3894                 block->bbCodeOffs, block->bbCodeOffsEnd));
3895         if (verbose)
3896         {
3897             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3898         }
3899     }
3900
3901     if (JitConfig.DebugBreakOnVerificationFailure())
3902     {
3903         DebugBreak();
3904     }
3905 #endif
3906
3907     impBeginTreeList();
3908
3909     // if the stack is non-empty evaluate all the side-effects
3910     if (verCurrentState.esStackDepth > 0)
3911     {
3912         impEvalSideEffects();
3913     }
3914     assert(verCurrentState.esStackDepth == 0);
3915
3916     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3917                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3918     // verCurrentState.esStackDepth = 0;
3919     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3920
3921     // The inliner is not able to handle methods that require throw block, so
3922     // make sure this methods never gets inlined.
3923     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3924 }
3925
3926 /*****************************************************************************
3927  *
3928  */
3929 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3930
3931 {
3932     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3933     // slightly different mechanism in which it calls the JIT to perform IL verification:
3934     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3935     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3936     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3937     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3938     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3939     // to fail upon runtime of the jitted method.
3940     //
3941     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3942     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3943     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3944     // we detect these two conditions, instead of generating a throw statement inside the offending
3945     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3946     // to return false and make RyuJIT behave the same way JIT64 does.
3947     //
3948     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3949     // RyuJIT for the time being until we completely replace JIT64.
3950     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3951
3952     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3953     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3954     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3955     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3956     // be turned off during importation).
3957     CLANG_FORMAT_COMMENT_ANCHOR;
3958
3959 #ifdef _TARGET_64BIT_
3960
3961 #ifdef DEBUG
3962     bool canSkipVerificationResult =
3963         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3964     assert(tiVerificationNeeded || canSkipVerificationResult);
3965 #endif // DEBUG
3966
3967     // Add the non verifiable flag to the compiler
3968     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3969     {
3970         tiIsVerifiableCode = FALSE;
3971     }
3972 #endif //_TARGET_64BIT_
3973     verResetCurrentState(block, &verCurrentState);
3974     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3975
3976 #ifdef DEBUG
3977     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3978 #endif                   // DEBUG
3979 }
3980
3981 /******************************************************************************/
3982 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3983 {
3984     assert(ciType < CORINFO_TYPE_COUNT);
3985
3986     typeInfo tiResult;
3987     switch (ciType)
3988     {
3989         case CORINFO_TYPE_STRING:
3990         case CORINFO_TYPE_CLASS:
3991             tiResult = verMakeTypeInfo(clsHnd);
3992             if (!tiResult.IsType(TI_REF))
3993             { // type must be consistent with element type
3994                 return typeInfo();
3995             }
3996             break;
3997
3998 #ifdef _TARGET_64BIT_
3999         case CORINFO_TYPE_NATIVEINT:
4000         case CORINFO_TYPE_NATIVEUINT:
4001             if (clsHnd)
4002             {
4003                 // If we have more precise information, use it
4004                 return verMakeTypeInfo(clsHnd);
4005             }
4006             else
4007             {
4008                 return typeInfo::nativeInt();
4009             }
4010             break;
4011 #endif // _TARGET_64BIT_
4012
4013         case CORINFO_TYPE_VALUECLASS:
4014         case CORINFO_TYPE_REFANY:
4015             tiResult = verMakeTypeInfo(clsHnd);
4016             // type must be constant with element type;
4017             if (!tiResult.IsValueClass())
4018             {
4019                 return typeInfo();
4020             }
4021             break;
4022         case CORINFO_TYPE_VAR:
4023             return verMakeTypeInfo(clsHnd);
4024
4025         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4026         case CORINFO_TYPE_VOID:
4027             return typeInfo();
4028             break;
4029
4030         case CORINFO_TYPE_BYREF:
4031         {
4032             CORINFO_CLASS_HANDLE childClassHandle;
4033             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4034             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4035         }
4036         break;
4037
4038         default:
4039             if (clsHnd)
4040             { // If we have more precise information, use it
4041                 return typeInfo(TI_STRUCT, clsHnd);
4042             }
4043             else
4044             {
4045                 return typeInfo(JITtype2tiType(ciType));
4046             }
4047     }
4048     return tiResult;
4049 }
4050
4051 /******************************************************************************/
4052
4053 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4054 {
4055     if (clsHnd == nullptr)
4056     {
4057         return typeInfo();
4058     }
4059
4060     // Byrefs should only occur in method and local signatures, which are accessed
4061     // using ICorClassInfo and ICorClassInfo.getChildType.
4062     // So findClass() and getClassAttribs() should not be called for byrefs
4063
4064     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4065     {
4066         assert(!"Did findClass() return a Byref?");
4067         return typeInfo();
4068     }
4069
4070     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4071
4072     if (attribs & CORINFO_FLG_VALUECLASS)
4073     {
4074         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4075
4076         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4077         // not occur here, so we may want to change this to an assert instead.
4078         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4079         {
4080             return typeInfo();
4081         }
4082
4083 #ifdef _TARGET_64BIT_
4084         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4085         {
4086             return typeInfo::nativeInt();
4087         }
4088 #endif // _TARGET_64BIT_
4089
4090         if (t != CORINFO_TYPE_UNDEF)
4091         {
4092             return (typeInfo(JITtype2tiType(t)));
4093         }
4094         else if (bashStructToRef)
4095         {
4096             return (typeInfo(TI_REF, clsHnd));
4097         }
4098         else
4099         {
4100             return (typeInfo(TI_STRUCT, clsHnd));
4101         }
4102     }
4103     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4104     {
4105         // See comment in _typeInfo.h for why we do it this way.
4106         return (typeInfo(TI_REF, clsHnd, true));
4107     }
4108     else
4109     {
4110         return (typeInfo(TI_REF, clsHnd));
4111     }
4112 }
4113
4114 /******************************************************************************/
4115 BOOL Compiler::verIsSDArray(typeInfo ti)
4116 {
4117     if (ti.IsNullObjRef())
4118     { // nulls are SD arrays
4119         return TRUE;
4120     }
4121
4122     if (!ti.IsType(TI_REF))
4123     {
4124         return FALSE;
4125     }
4126
4127     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4128     {
4129         return FALSE;
4130     }
4131     return TRUE;
4132 }
4133
4134 /******************************************************************************/
4135 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4136 /* Returns an error type if anything goes wrong */
4137
4138 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4139 {
4140     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4141
4142     if (!verIsSDArray(arrayObjectType))
4143     {
4144         return typeInfo();
4145     }
4146
4147     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4148     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4149
4150     return verMakeTypeInfo(ciType, childClassHandle);
4151 }
4152
4153 /*****************************************************************************
4154  */
4155 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4156 {
4157     CORINFO_CLASS_HANDLE classHandle;
4158     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4159
4160     var_types type = JITtype2varType(ciType);
4161     if (varTypeIsGC(type))
4162     {
4163         // For efficiency, getArgType only returns something in classHandle for
4164         // value types.  For other types that have addition type info, you
4165         // have to call back explicitly
4166         classHandle = info.compCompHnd->getArgClass(sig, args);
4167         if (!classHandle)
4168         {
4169             NO_WAY("Could not figure out Class specified in argument or local signature");
4170         }
4171     }
4172
4173     return verMakeTypeInfo(ciType, classHandle);
4174 }
4175
4176 /*****************************************************************************/
4177
4178 // This does the expensive check to figure out whether the method
4179 // needs to be verified. It is called only when we fail verification,
4180 // just before throwing the verification exception.
4181
4182 BOOL Compiler::verNeedsVerification()
4183 {
4184     // If we have previously determined that verification is NOT needed
4185     // (for example in Compiler::compCompile), that means verification is really not needed.
4186     // Return the same decision we made before.
4187     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4188
4189     if (!tiVerificationNeeded)
4190     {
4191         return tiVerificationNeeded;
4192     }
4193
4194     assert(tiVerificationNeeded);
4195
4196     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4197     // obtain the answer.
4198     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4199         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4200
4201     // canSkipVerification will return one of the following three values:
4202     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4203     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4204     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4205     //     but need to insert a callout to the VM to ask during runtime
4206     //     whether to skip verification or not.
4207
4208     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4209     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4210     {
4211         tiRuntimeCalloutNeeded = true;
4212     }
4213
4214     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4215     {
4216         // Dev10 706080 - Testers don't like the assert, so just silence it
4217         // by not using the macros that invoke debugAssert.
4218         badCode();
4219     }
4220
4221     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4222     // The following line means we will NOT do jit time verification if canSkipVerification
4223     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4224     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4225     return tiVerificationNeeded;
4226 }
4227
4228 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4229 {
4230     if (ti.IsByRef())
4231     {
4232         return TRUE;
4233     }
4234     if (!ti.IsType(TI_STRUCT))
4235     {
4236         return FALSE;
4237     }
4238     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4239 }
4240
4241 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4242 {
4243     if (ti.IsPermanentHomeByRef())
4244     {
4245         return TRUE;
4246     }
4247     else
4248     {
4249         return FALSE;
4250     }
4251 }
4252
4253 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4254 {
4255     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4256             || ti.IsUnboxedGenericTypeVar() ||
4257             (ti.IsType(TI_STRUCT) &&
4258              // exclude byreflike structs
4259              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4260 }
4261
4262 // Is it a boxed value type?
4263 bool Compiler::verIsBoxedValueType(typeInfo ti)
4264 {
4265     if (ti.GetType() == TI_REF)
4266     {
4267         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4268         return !!eeIsValueClass(clsHnd);
4269     }
4270     else
4271     {
4272         return false;
4273     }
4274 }
4275
4276 /*****************************************************************************
4277  *
4278  *  Check if a TailCall is legal.
4279  */
4280
4281 bool Compiler::verCheckTailCallConstraint(
4282     OPCODE                  opcode,
4283     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4284     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4285     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4286                                                        // return false to the caller.
4287                                                        // If false, it will throw.
4288     )
4289 {
4290     DWORD            mflags;
4291     CORINFO_SIG_INFO sig;
4292     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4293                                    // this counter is used to keep track of how many items have been
4294                                    // virtually popped
4295
4296     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4297     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4298     unsigned              methodClassFlgs = 0;
4299
4300     assert(impOpcodeIsCallOpcode(opcode));
4301
4302     if (compIsForInlining())
4303     {
4304         return false;
4305     }
4306
4307     // for calli, VerifyOrReturn that this is not a virtual method
4308     if (opcode == CEE_CALLI)
4309     {
4310         /* Get the call sig */
4311         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4312
4313         // We don't know the target method, so we have to infer the flags, or
4314         // assume the worst-case.
4315         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4316     }
4317     else
4318     {
4319         methodHnd = pResolvedToken->hMethod;
4320
4321         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4322
4323         // When verifying generic code we pair the method handle with its
4324         // owning class to get the exact method signature.
4325         methodClassHnd = pResolvedToken->hClass;
4326         assert(methodClassHnd);
4327
4328         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4329
4330         // opcode specific check
4331         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4332     }
4333
4334     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4335     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4336
4337     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4338     {
4339         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4340     }
4341
4342     // check compatibility of the arguments
4343     unsigned int argCount;
4344     argCount = sig.numArgs;
4345     CORINFO_ARG_LIST_HANDLE args;
4346     args = sig.args;
4347     while (argCount--)
4348     {
4349         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4350
4351         // check that the argument is not a byref for tailcalls
4352         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4353
4354         // For unsafe code, we might have parameters containing pointer to the stack location.
4355         // Disallow the tailcall for this kind.
4356         CORINFO_CLASS_HANDLE classHandle;
4357         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4358         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4359
4360         args = info.compCompHnd->getArgNext(args);
4361     }
4362
4363     // update popCount
4364     popCount += sig.numArgs;
4365
4366     // check for 'this' which is on non-static methods, not called via NEWOBJ
4367     if (!(mflags & CORINFO_FLG_STATIC))
4368     {
4369         // Always update the popCount.
4370         // This is crucial for the stack calculation to be correct.
4371         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4372         popCount++;
4373
4374         if (opcode == CEE_CALLI)
4375         {
4376             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4377             // on the stack.
4378             if (tiThis.IsValueClass())
4379             {
4380                 tiThis.MakeByRef();
4381             }
4382             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4383         }
4384         else
4385         {
4386             // Check type compatibility of the this argument
4387             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4388             if (tiDeclaredThis.IsValueClass())
4389             {
4390                 tiDeclaredThis.MakeByRef();
4391             }
4392
4393             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4394         }
4395     }
4396
4397     // Tail calls on constrained calls should be illegal too:
4398     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4399     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4400
4401     // Get the exact view of the signature for an array method
4402     if (sig.retType != CORINFO_TYPE_VOID)
4403     {
4404         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4405         {
4406             assert(opcode != CEE_CALLI);
4407             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4408         }
4409     }
4410
4411     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4412     typeInfo tiCallerRetType =
4413         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4414
4415     // void return type gets morphed into the error type, so we have to treat them specially here
4416     if (sig.retType == CORINFO_TYPE_VOID)
4417     {
4418         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4419                                   speculative);
4420     }
4421     else
4422     {
4423         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4424                                                    NormaliseForStack(tiCallerRetType), true),
4425                                   "tailcall return mismatch", speculative);
4426     }
4427
4428     // for tailcall, stack must be empty
4429     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4430
4431     return true; // Yes, tailcall is legal
4432 }
4433
4434 /*****************************************************************************
4435  *
4436  *  Checks the IL verification rules for the call
4437  */
4438
4439 void Compiler::verVerifyCall(OPCODE                  opcode,
4440                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4441                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4442                              bool                    tailCall,
4443                              bool                    readonlyCall,
4444                              const BYTE*             delegateCreateStart,
4445                              const BYTE*             codeAddr,
4446                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4447 {
4448     DWORD             mflags;
4449     CORINFO_SIG_INFO* sig      = nullptr;
4450     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4451                                     // this counter is used to keep track of how many items have been
4452                                     // virtually popped
4453
4454     // for calli, VerifyOrReturn that this is not a virtual method
4455     if (opcode == CEE_CALLI)
4456     {
4457         Verify(false, "Calli not verifiable");
4458         return;
4459     }
4460
4461     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4462     mflags = callInfo->verMethodFlags;
4463
4464     sig = &callInfo->verSig;
4465
4466     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4467     {
4468         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4469     }
4470
4471     // opcode specific check
4472     unsigned methodClassFlgs = callInfo->classFlags;
4473     switch (opcode)
4474     {
4475         case CEE_CALLVIRT:
4476             // cannot do callvirt on valuetypes
4477             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4478             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4479             break;
4480
4481         case CEE_NEWOBJ:
4482         {
4483             assert(!tailCall); // Importer should not allow this
4484             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4485                            "newobj must be on instance");
4486
4487             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4488             {
4489                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4490                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4491                 typeInfo tiDeclaredFtn =
4492                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4493                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4494
4495                 assert(popCount == 0);
4496                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4497                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4498
4499                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4500                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4501                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4502                                "delegate object type mismatch");
4503
4504                 CORINFO_CLASS_HANDLE objTypeHandle =
4505                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4506
4507                 // the method signature must be compatible with the delegate's invoke method
4508
4509                 // check that for virtual functions, the type of the object used to get the
4510                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4511                 // since this is a bit of work to determine in general, we pattern match stylized
4512                 // code sequences
4513
4514                 // the delegate creation code check, which used to be done later, is now done here
4515                 // so we can read delegateMethodRef directly from
4516                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4517                 // we then use it in our call to isCompatibleDelegate().
4518
4519                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4520                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4521                                "must create delegates with certain IL");
4522
4523                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4524                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4525                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4526                 delegateResolvedToken.token        = delegateMethodRef;
4527                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4528                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4529
4530                 CORINFO_CALL_INFO delegateCallInfo;
4531                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4532                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4533
4534                 BOOL isOpenDelegate = FALSE;
4535                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4536                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4537                                                                       &isOpenDelegate),
4538                                "function incompatible with delegate");
4539
4540                 // check the constraints on the target method
4541                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4542                                "delegate target has unsatisfied class constraints");
4543                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4544                                                                             tiActualFtn.GetMethod()),
4545                                "delegate target has unsatisfied method constraints");
4546
4547                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4548                 // for additional verification rules for delegates
4549                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4550                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4551                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4552                 {
4553
4554                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4555 #ifdef DEBUG
4556                         && StrictCheckForNonVirtualCallToVirtualMethod()
4557 #endif
4558                             )
4559                     {
4560                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4561                         {
4562                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4563                                                verIsBoxedValueType(tiActualObj),
4564                                            "The 'this' parameter to the call must be either the calling method's "
4565                                            "'this' parameter or "
4566                                            "a boxed value type.");
4567                         }
4568                     }
4569                 }
4570
4571                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4572                 {
4573                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4574
4575                     Verify(targetIsStatic || !isOpenDelegate,
4576                            "Unverifiable creation of an open instance delegate for a protected member.");
4577
4578                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4579                                                                 ? info.compClassHnd
4580                                                                 : tiActualObj.GetClassHandleForObjRef();
4581
4582                     // In the case of protected methods, it is a requirement that the 'this'
4583                     // pointer be a subclass of the current context.  Perform this check.
4584                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4585                            "Accessing protected method through wrong type.");
4586                 }
4587                 goto DONE_ARGS;
4588             }
4589         }
4590         // fall thru to default checks
4591         default:
4592             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4593     }
4594     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4595                    "can only newobj a delegate constructor");
4596
4597     // check compatibility of the arguments
4598     unsigned int argCount;
4599     argCount = sig->numArgs;
4600     CORINFO_ARG_LIST_HANDLE args;
4601     args = sig->args;
4602     while (argCount--)
4603     {
4604         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4605
4606         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4607         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4608
4609         args = info.compCompHnd->getArgNext(args);
4610     }
4611
4612 DONE_ARGS:
4613
4614     // update popCount
4615     popCount += sig->numArgs;
4616
4617     // check for 'this' which are is non-static methods, not called via NEWOBJ
4618     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4619     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4620     {
4621         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4622         popCount++;
4623
4624         // If it is null, we assume we can access it (since it will AV shortly)
4625         // If it is anything but a reference class, there is no hierarchy, so
4626         // again, we don't need the precise instance class to compute 'protected' access
4627         if (tiThis.IsType(TI_REF))
4628         {
4629             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4630         }
4631
4632         // Check type compatibility of the this argument
4633         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4634         if (tiDeclaredThis.IsValueClass())
4635         {
4636             tiDeclaredThis.MakeByRef();
4637         }
4638
4639         // If this is a call to the base class .ctor, set thisPtr Init for
4640         // this block.
4641         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4642         {
4643             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4644                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4645             {
4646                 assert(verCurrentState.thisInitialized !=
4647                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4648                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4649                                "Call to base class constructor when 'this' is possibly initialized");
4650                 // Otherwise, 'this' is now initialized.
4651                 verCurrentState.thisInitialized = TIS_Init;
4652                 tiThis.SetInitialisedObjRef();
4653             }
4654             else
4655             {
4656                 // We allow direct calls to value type constructors
4657                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4658                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4659                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4660                                "Bad call to a constructor");
4661             }
4662         }
4663
4664         if (pConstrainedResolvedToken != nullptr)
4665         {
4666             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4667
4668             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4669
4670             // We just dereference this and test for equality
4671             tiThis.DereferenceByRef();
4672             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4673                            "this type mismatch with constrained type operand");
4674
4675             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4676             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4677         }
4678
4679         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4680         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4681         {
4682             tiDeclaredThis.SetIsReadonlyByRef();
4683         }
4684
4685         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4686
4687         if (tiThis.IsByRef())
4688         {
4689             // Find the actual type where the method exists (as opposed to what is declared
4690             // in the metadata). This is to prevent passing a byref as the "this" argument
4691             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4692
4693             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4694             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4695                            "Call to base type of valuetype (which is never a valuetype)");
4696         }
4697
4698         // Rules for non-virtual call to a non-final virtual method:
4699
4700         // Define:
4701         // The "this" pointer is considered to be "possibly written" if
4702         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4703         //   (or)
4704         //   2. It has been stored to (STARG.0) anywhere in the method.
4705
4706         // A non-virtual call to a non-final virtual method is only allowed if
4707         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4708         //   (or)
4709         //   2. The this pointer passed to the callee is the current method's this pointer.
4710         //      (and) The current method's this pointer is not "possibly written".
4711
4712         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4713         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4714         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4715         // hard and more error prone.
4716
4717         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4718 #ifdef DEBUG
4719             && StrictCheckForNonVirtualCallToVirtualMethod()
4720 #endif
4721                 )
4722         {
4723             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4724             {
4725                 VerifyOrReturn(
4726                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4727                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4728                     "a boxed value type.");
4729             }
4730         }
4731     }
4732
4733     // check any constraints on the callee's class and type parameters
4734     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4735                    "method has unsatisfied class constraints");
4736     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4737                    "method has unsatisfied method constraints");
4738
4739     if (mflags & CORINFO_FLG_PROTECTED)
4740     {
4741         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4742                        "Can't access protected method");
4743     }
4744
4745     // Get the exact view of the signature for an array method
4746     if (sig->retType != CORINFO_TYPE_VOID)
4747     {
4748         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4749     }
4750
4751     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4752     // The methods supported by array types are under the control of the EE
4753     // so we can trust that only the Address operation returns a byref.
4754     if (readonlyCall)
4755     {
4756         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4757         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4758                        "unexpected use of readonly prefix");
4759     }
4760
4761     // Verify the tailcall
4762     if (tailCall)
4763     {
4764         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4765     }
4766 }
4767
4768 /*****************************************************************************
4769  *  Checks that a delegate creation is done using the following pattern:
4770  *     dup
4771  *     ldvirtftn targetMemberRef
4772  *  OR
4773  *     ldftn targetMemberRef
4774  *
4775  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4776  *  not in this basic block)
4777  *
4778  *  targetMemberRef is read from the code sequence.
4779  *  targetMemberRef is validated iff verificationNeeded.
4780  */
4781
4782 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4783                                         const BYTE*  codeAddr,
4784                                         mdMemberRef& targetMemberRef)
4785 {
4786     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4787     {
4788         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4789         return TRUE;
4790     }
4791     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4792     {
4793         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4794         return TRUE;
4795     }
4796
4797     return FALSE;
4798 }
4799
4800 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4801 {
4802     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4803     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4804     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4805     if (!tiCompatibleWith(value, normPtrVal, true))
4806     {
4807         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4808         compUnsafeCastUsed = true;
4809     }
4810     return ptrVal;
4811 }
4812
4813 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4814 {
4815     assert(!instrType.IsStruct());
4816
4817     typeInfo ptrVal;
4818     if (ptr.IsByRef())
4819     {
4820         ptrVal = DereferenceByRef(ptr);
4821         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4822         {
4823             Verify(false, "bad pointer");
4824             compUnsafeCastUsed = true;
4825         }
4826         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4827         {
4828             Verify(false, "pointer not consistent with instr");
4829             compUnsafeCastUsed = true;
4830         }
4831     }
4832     else
4833     {
4834         Verify(false, "pointer not byref");
4835         compUnsafeCastUsed = true;
4836     }
4837
4838     return ptrVal;
4839 }
4840
4841 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4842 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4843 // ld*flda or a st*fld.
4844 // 'enclosingClass' is given if we are accessing a field in some specific type.
4845
4846 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4847                               const CORINFO_FIELD_INFO& fieldInfo,
4848                               const typeInfo*           tiThis,
4849                               BOOL                      mutator,
4850                               BOOL                      allowPlainStructAsThis)
4851 {
4852     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4853     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4854     CORINFO_CLASS_HANDLE instanceClass =
4855         info.compClassHnd; // for statics, we imagine the instance is the current class.
4856
4857     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4858     if (mutator)
4859     {
4860         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4861         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4862         {
4863             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4864                        info.compIsStatic == isStaticField,
4865                    "bad use of initonly field (set or address taken)");
4866         }
4867     }
4868
4869     if (tiThis == nullptr)
4870     {
4871         Verify(isStaticField, "used static opcode with non-static field");
4872     }
4873     else
4874     {
4875         typeInfo tThis = *tiThis;
4876
4877         if (allowPlainStructAsThis && tThis.IsValueClass())
4878         {
4879             tThis.MakeByRef();
4880         }
4881
4882         // If it is null, we assume we can access it (since it will AV shortly)
4883         // If it is anything but a refernce class, there is no hierarchy, so
4884         // again, we don't need the precise instance class to compute 'protected' access
4885         if (tiThis->IsType(TI_REF))
4886         {
4887             instanceClass = tiThis->GetClassHandleForObjRef();
4888         }
4889
4890         // Note that even if the field is static, we require that the this pointer
4891         // satisfy the same constraints as a non-static field  This happens to
4892         // be simpler and seems reasonable
4893         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4894         if (tiDeclaredThis.IsValueClass())
4895         {
4896             tiDeclaredThis.MakeByRef();
4897
4898             // we allow read-only tThis, on any field access (even stores!), because if the
4899             // class implementor wants to prohibit stores he should make the field private.
4900             // we do this by setting the read-only bit on the type we compare tThis to.
4901             tiDeclaredThis.SetIsReadonlyByRef();
4902         }
4903         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4904         {
4905             // Any field access is legal on "uninitialized" this pointers.
4906             // The easiest way to implement this is to simply set the
4907             // initialized bit for the duration of the type check on the
4908             // field access only.  It does not change the state of the "this"
4909             // for the function as a whole. Note that the "tThis" is a copy
4910             // of the original "this" type (*tiThis) passed in.
4911             tThis.SetInitialisedObjRef();
4912         }
4913
4914         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4915     }
4916
4917     // Presently the JIT does not check that we don't store or take the address of init-only fields
4918     // since we cannot guarantee their immutability and it is not a security issue.
4919
4920     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4921     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4922                    "field has unsatisfied class constraints");
4923     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4924     {
4925         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4926                "Accessing protected method through wrong type.");
4927     }
4928 }
4929
4930 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4931 {
4932     if (tiOp1.IsNumberType())
4933     {
4934 #ifdef _TARGET_64BIT_
4935         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4936 #else  // _TARGET_64BIT
4937         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4938         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4939         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4940         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4941 #endif // !_TARGET_64BIT_
4942     }
4943     else if (tiOp1.IsObjRef())
4944     {
4945         switch (opcode)
4946         {
4947             case CEE_BEQ_S:
4948             case CEE_BEQ:
4949             case CEE_BNE_UN_S:
4950             case CEE_BNE_UN:
4951             case CEE_CEQ:
4952             case CEE_CGT_UN:
4953                 break;
4954             default:
4955                 Verify(FALSE, "Cond not allowed on object types");
4956         }
4957         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4958     }
4959     else if (tiOp1.IsByRef())
4960     {
4961         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4962     }
4963     else
4964     {
4965         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4966     }
4967 }
4968
4969 void Compiler::verVerifyThisPtrInitialised()
4970 {
4971     if (verTrackObjCtorInitState)
4972     {
4973         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4974     }
4975 }
4976
4977 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4978 {
4979     // Either target == context, in this case calling an alternate .ctor
4980     // Or target is the immediate parent of context
4981
4982     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4983 }
4984
4985 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4986                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4987                                         CORINFO_CALL_INFO*      pCallInfo)
4988 {
4989     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4990     {
4991         NO_WAY("Virtual call to a function added via EnC is not supported");
4992     }
4993
4994 #ifdef FEATURE_READYTORUN_COMPILER
4995     if (opts.IsReadyToRun())
4996     {
4997         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4998         {
4999             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
5000                                                     gtNewArgList(thisPtr));
5001
5002             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5003
5004             return call;
5005         }
5006
5007         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5008         if (IsTargetAbi(CORINFO_CORERT_ABI))
5009         {
5010             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5011
5012             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5013                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5014         }
5015     }
5016 #endif
5017
5018     // Get the exact descriptor for the static callsite
5019     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5020     if (exactTypeDesc == nullptr)
5021     { // compDonotInline()
5022         return nullptr;
5023     }
5024
5025     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5026     if (exactMethodDesc == nullptr)
5027     { // compDonotInline()
5028         return nullptr;
5029     }
5030
5031     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5032
5033     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5034
5035     helpArgs = gtNewListNode(thisPtr, helpArgs);
5036
5037     // Call helper function.  This gets the target address of the final destination callsite.
5038
5039     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5040 }
5041
5042 /*****************************************************************************
5043  *
5044  *  Build and import a box node
5045  */
5046
5047 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5048 {
5049     // Get the tree for the type handle for the boxed object.  In the case
5050     // of shared generic code or ngen'd code this might be an embedded
5051     // computation.
5052     // Note we can only box do it if the class construtor has been called
5053     // We can always do it on primitive types
5054
5055     GenTreePtr op1 = nullptr;
5056     GenTreePtr op2 = nullptr;
5057     var_types  lclTyp;
5058
5059     impSpillSpecialSideEff();
5060
5061     // Now get the expression to box from the stack.
5062     CORINFO_CLASS_HANDLE operCls;
5063     GenTreePtr           exprToBox = impPopStack(operCls).val;
5064
5065     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5066     if (boxHelper == CORINFO_HELP_BOX)
5067     {
5068         // we are doing 'normal' boxing.  This means that we can inline the box operation
5069         // Box(expr) gets morphed into
5070         // temp = new(clsHnd)
5071         // cpobj(temp+4, expr, clsHnd)
5072         // push temp
5073         // The code paths differ slightly below for structs and primitives because
5074         // "cpobj" differs in these cases.  In one case you get
5075         //    impAssignStructPtr(temp+4, expr, clsHnd)
5076         // and the other you get
5077         //    *(temp+4) = expr
5078
5079         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5080         {
5081             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5082         }
5083
5084         // needs to stay in use until this box expression is appended
5085         // some other node.  We approximate this by keeping it alive until
5086         // the opcode stack becomes empty
5087         impBoxTempInUse = true;
5088
5089 #ifdef FEATURE_READYTORUN_COMPILER
5090         bool usingReadyToRunHelper = false;
5091
5092         if (opts.IsReadyToRun())
5093         {
5094             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5095             usingReadyToRunHelper = (op1 != nullptr);
5096         }
5097
5098         if (!usingReadyToRunHelper)
5099 #endif
5100         {
5101             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5102             // and the newfast call with a single call to a dynamic R2R cell that will:
5103             //      1) Load the context
5104             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5105             //      3) Allocate and return the new object for boxing
5106             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5107
5108             // Ensure that the value class is restored
5109             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5110             if (op2 == nullptr)
5111             { // compDonotInline()
5112                 return;
5113             }
5114
5115             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5116                                       gtNewArgList(op2));
5117         }
5118
5119         /* Remember that this basic block contains 'new' of an array */
5120         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5121
5122         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5123
5124         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5125
5126         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5127         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5128         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5129
5130         if (varTypeIsStruct(exprToBox))
5131         {
5132             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5133             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5134         }
5135         else
5136         {
5137             lclTyp = exprToBox->TypeGet();
5138             if (lclTyp == TYP_BYREF)
5139             {
5140                 lclTyp = TYP_I_IMPL;
5141             }
5142             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5143             if (impIsPrimitive(jitType))
5144             {
5145                 lclTyp = JITtype2varType(jitType);
5146             }
5147             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5148                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5149             var_types srcTyp = exprToBox->TypeGet();
5150             var_types dstTyp = lclTyp;
5151
5152             if (srcTyp != dstTyp)
5153             {
5154                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5155                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5156                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5157             }
5158             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5159         }
5160
5161         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5162         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5163
5164         // Record that this is a "box" node.
5165         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5166
5167         // If it is a value class, mark the "box" node.  We can use this information
5168         // to optimise several cases:
5169         //    "box(x) == null" --> false
5170         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5171         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5172
5173         op1->gtFlags |= GTF_BOX_VALUE;
5174         assert(op1->IsBoxedValue());
5175         assert(asg->gtOper == GT_ASG);
5176     }
5177     else
5178     {
5179         // Don't optimize, just call the helper and be done with it
5180
5181         // Ensure that the value class is restored
5182         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5183         if (op2 == nullptr)
5184         { // compDonotInline()
5185             return;
5186         }
5187
5188         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5189         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5190     }
5191
5192     /* Push the result back on the stack, */
5193     /* even if clsHnd is a value class we want the TI_REF */
5194     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5195     impPushOnStack(op1, tiRetVal);
5196 }
5197
5198 //------------------------------------------------------------------------
5199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5200 //
5201 // Arguments:
5202 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5203 //                     by a call to CEEInfo::resolveToken().
5204 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5205 //                by a call to CEEInfo::getCallInfo().
5206 //
5207 // Assumptions:
5208 //    The multi-dimensional array constructor arguments (array dimensions) are
5209 //    pushed on the IL stack on entry to this method.
5210 //
5211 // Notes:
5212 //    Multi-dimensional array constructors are imported as calls to a JIT
5213 //    helper, not as regular calls.
5214
5215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5216 {
5217     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5218     if (classHandle == nullptr)
5219     { // compDonotInline()
5220         return;
5221     }
5222
5223     assert(pCallInfo->sig.numArgs);
5224
5225     GenTreePtr      node;
5226     GenTreeArgList* args;
5227
5228     //
5229     // There are two different JIT helpers that can be used to allocate
5230     // multi-dimensional arrays:
5231     //
5232     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5233     //      This variant is deprecated. It should be eventually removed.
5234     //
5235     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5236     //      pointer to block of int32s. This variant is more portable.
5237     //
5238     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5239     // unconditionally would require ReadyToRun version bump.
5240     //
5241     CLANG_FORMAT_COMMENT_ANCHOR;
5242
5243 #if COR_JIT_EE_VERSION > 460
5244     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5245     {
5246         LclVarDsc* newObjArrayArgsVar;
5247
5248         // Reuse the temp used to pass the array dimensions to avoid bloating
5249         // the stack frame in case there are multiple calls to multi-dim array
5250         // constructors within a single method.
5251         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5252         {
5253             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5254             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5255             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5256         }
5257
5258         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5259         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5260         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5261             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5262
5263         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5264         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5265         // to one allocation at a time.
5266         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5267
5268         //
5269         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5270         //  - Array class handle
5271         //  - Number of dimension arguments
5272         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5273         //
5274
5275         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5276         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5277
5278         // Pop dimension arguments from the stack one at a time and store it
5279         // into lvaNewObjArrayArgs temp.
5280         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5281         {
5282             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5283
5284             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5285             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5286             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5287                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5288             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5289
5290             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5291         }
5292
5293         args = gtNewArgList(node);
5294
5295         // pass number of arguments to the helper
5296         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5297
5298         args = gtNewListNode(classHandle, args);
5299
5300         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5301     }
5302     else
5303 #endif
5304     {
5305         //
5306         // The varargs helper needs the type and method handles as last
5307         // and  last-1 param (this is a cdecl call, so args will be
5308         // pushed in reverse order on the CPU stack)
5309         //
5310
5311         args = gtNewArgList(classHandle);
5312
5313         // pass number of arguments to the helper
5314         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5315
5316         unsigned argFlags = 0;
5317         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5318
5319         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5320
5321         // varargs, so we pop the arguments
5322         node->gtFlags |= GTF_CALL_POP_ARGS;
5323
5324 #ifdef DEBUG
5325         // At the present time we don't track Caller pop arguments
5326         // that have GC references in them
5327         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5328         {
5329             assert(temp->Current()->gtType != TYP_REF);
5330         }
5331 #endif
5332     }
5333
5334     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5335     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5336
5337     // Remember that this basic block contains 'new' of a md array
5338     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5339
5340     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5341 }
5342
5343 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5344                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5345                                       CORINFO_THIS_TRANSFORM  transform)
5346 {
5347     switch (transform)
5348     {
5349         case CORINFO_DEREF_THIS:
5350         {
5351             GenTreePtr obj = thisPtr;
5352
5353             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5354             impBashVarAddrsToI(obj);
5355             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5356             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5357
5358             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5359             // ldind could point anywhere, example a boxed class static int
5360             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5361
5362             return obj;
5363         }
5364
5365         case CORINFO_BOX_THIS:
5366         {
5367             // Constraint calls where there might be no
5368             // unboxed entry point require us to implement the call via helper.
5369             // These only occur when a possible target of the call
5370             // may have inherited an implementation of an interface
5371             // method from System.Object or System.ValueType.  The EE does not provide us with
5372             // "unboxed" versions of these methods.
5373
5374             GenTreePtr obj = thisPtr;
5375
5376             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5377             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5378             obj->gtFlags |= GTF_EXCEPT;
5379
5380             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5381             var_types   objType = JITtype2varType(jitTyp);
5382             if (impIsPrimitive(jitTyp))
5383             {
5384                 if (obj->OperIsBlk())
5385                 {
5386                     obj->ChangeOperUnchecked(GT_IND);
5387
5388                     // Obj could point anywhere, example a boxed class static int
5389                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5390                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5391                 }
5392
5393                 obj->gtType = JITtype2varType(jitTyp);
5394                 assert(varTypeIsArithmetic(obj->gtType));
5395             }
5396
5397             // This pushes on the dereferenced byref
5398             // This is then used immediately to box.
5399             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5400
5401             // This pops off the byref-to-a-value-type remaining on the stack and
5402             // replaces it with a boxed object.
5403             // This is then used as the object to the virtual call immediately below.
5404             impImportAndPushBox(pConstrainedResolvedToken);
5405             if (compDonotInline())
5406             {
5407                 return nullptr;
5408             }
5409
5410             obj = impPopStack().val;
5411             return obj;
5412         }
5413         case CORINFO_NO_THIS_TRANSFORM:
5414         default:
5415             return thisPtr;
5416     }
5417 }
5418
5419 //------------------------------------------------------------------------
5420 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5421 //
5422 // Return Value:
5423 //    true if PInvoke inlining should be enabled in current method, false otherwise
5424 //
5425 // Notes:
5426 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5427
5428 bool Compiler::impCanPInvokeInline()
5429 {
5430     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5431            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5432         ;
5433 }
5434
5435 //------------------------------------------------------------------------
5436 // impCanPInvokeInlineCallSite: basic legality checks using information
5437 // from a call to see if the call qualifies as an inline pinvoke.
5438 //
5439 // Arguments:
5440 //    block      - block contaning the call, or for inlinees, block
5441 //                 containing the call being inlined
5442 //
5443 // Return Value:
5444 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5445 //
5446 // Notes:
5447 //    For runtimes that support exception handling interop there are
5448 //    restrictions on using inline pinvoke in handler regions.
5449 //
5450 //    * We have to disable pinvoke inlining inside of filters because
5451 //    in case the main execution (i.e. in the try block) is inside
5452 //    unmanaged code, we cannot reuse the inlined stub (we still need
5453 //    the original state until we are in the catch handler)
5454 //
5455 //    * We disable pinvoke inlining inside handlers since the GSCookie
5456 //    is in the inlined Frame (see
5457 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5458 //    this would not protect framelets/return-address of handlers.
5459 //
5460 //    These restrictions are currently also in place for CoreCLR but
5461 //    can be relaxed when coreclr/#8459 is addressed.
5462
5463 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5464 {
5465     if (block->hasHndIndex())
5466     {
5467         return false;
5468     }
5469
5470     // The remaining limitations do not apply to CoreRT
5471     if (IsTargetAbi(CORINFO_CORERT_ABI))
5472     {
5473         return true;
5474     }
5475
5476 #ifdef _TARGET_AMD64_
5477     // On x64, we disable pinvoke inlining inside of try regions.
5478     // Here is the comment from JIT64 explaining why:
5479     //
5480     //   [VSWhidbey: 611015] - because the jitted code links in the
5481     //   Frame (instead of the stub) we rely on the Frame not being
5482     //   'active' until inside the stub.  This normally happens by the
5483     //   stub setting the return address pointer in the Frame object
5484     //   inside the stub.  On a normal return, the return address
5485     //   pointer is zeroed out so the Frame can be safely re-used, but
5486     //   if an exception occurs, nobody zeros out the return address
5487     //   pointer.  Thus if we re-used the Frame object, it would go
5488     //   'active' as soon as we link it into the Frame chain.
5489     //
5490     //   Technically we only need to disable PInvoke inlining if we're
5491     //   in a handler or if we're in a try body with a catch or
5492     //   filter/except where other non-handler code in this method
5493     //   might run and try to re-use the dirty Frame object.
5494     //
5495     //   A desktop test case where this seems to matter is
5496     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5497     if (block->hasTryIndex())
5498     {
5499         return false;
5500     }
5501 #endif // _TARGET_AMD64_
5502
5503     return true;
5504 }
5505
5506 //------------------------------------------------------------------------
5507 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5508 // if it can be expressed as an inline pinvoke.
5509 //
5510 // Arguments:
5511 //    call       - tree for the call
5512 //    methHnd    - handle for the method being called (may be null)
5513 //    sig        - signature of the method being called
5514 //    mflags     - method flags for the method being called
5515 //    block      - block contaning the call, or for inlinees, block
5516 //                 containing the call being inlined
5517 //
5518 // Notes:
5519 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5520 //
5521 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5522 //   call passes a combination of legality and profitabilty checks.
5523 //
5524 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5525
5526 void Compiler::impCheckForPInvokeCall(
5527     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5528 {
5529     CorInfoUnmanagedCallConv unmanagedCallConv;
5530
5531     // If VM flagged it as Pinvoke, flag the call node accordingly
5532     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5533     {
5534         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5535     }
5536
5537     if (methHnd)
5538     {
5539         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5540         {
5541             return;
5542         }
5543
5544         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5545     }
5546     else
5547     {
5548         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5549         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5550         {
5551             // Used by the IL Stubs.
5552             callConv = CORINFO_CALLCONV_C;
5553         }
5554         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5555         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5556         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5557         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5558
5559         assert(!call->gtCall.gtCallCookie);
5560     }
5561
5562     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5563         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5564     {
5565         return;
5566     }
5567     optNativeCallCount++;
5568
5569     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5570     {
5571         // PInvoke CALLI in IL stubs must be inlined
5572     }
5573     else
5574     {
5575         // Check legality
5576         if (!impCanPInvokeInlineCallSite(block))
5577         {
5578             return;
5579         }
5580
5581         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5582         // profitability checks
5583         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5584         {
5585             if (impCanPInvokeInline())
5586             {
5587                 return;
5588             }
5589
5590             // Size-speed tradeoff: don't use inline pinvoke at rarely
5591             // executed call sites.  The non-inline version is more
5592             // compact.
5593             if (block->isRunRarely())
5594             {
5595                 return;
5596             }
5597         }
5598
5599         // The expensive check should be last
5600         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5601         {
5602             return;
5603         }
5604     }
5605
5606     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5607
5608     call->gtFlags |= GTF_CALL_UNMANAGED;
5609     info.compCallUnmanaged++;
5610
5611     // AMD64 convention is same for native and managed
5612     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5613     {
5614         call->gtFlags |= GTF_CALL_POP_ARGS;
5615     }
5616
5617     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5618     {
5619         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5620     }
5621 }
5622
5623 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5624 {
5625     var_types callRetTyp = JITtype2varType(sig->retType);
5626
5627     /* The function pointer is on top of the stack - It may be a
5628      * complex expression. As it is evaluated after the args,
5629      * it may cause registered args to be spilled. Simply spill it.
5630      */
5631
5632     // Ignore this trivial case.
5633     if (impStackTop().val->gtOper != GT_LCL_VAR)
5634     {
5635         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5636                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5637     }
5638
5639     /* Get the function pointer */
5640
5641     GenTreePtr fptr = impPopStack().val;
5642     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5643
5644 #ifdef DEBUG
5645     // This temporary must never be converted to a double in stress mode,
5646     // because that can introduce a call to the cast helper after the
5647     // arguments have already been evaluated.
5648
5649     if (fptr->OperGet() == GT_LCL_VAR)
5650     {
5651         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5652     }
5653 #endif
5654
5655     /* Create the call node */
5656
5657     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5658
5659     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5660
5661     return call;
5662 }
5663
5664 /*****************************************************************************/
5665
5666 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5667 {
5668     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5669
5670     /* Since we push the arguments in reverse order (i.e. right -> left)
5671      * spill any side effects from the stack
5672      *
5673      * OBS: If there is only one side effect we do not need to spill it
5674      *      thus we have to spill all side-effects except last one
5675      */
5676
5677     unsigned lastLevelWithSideEffects = UINT_MAX;
5678
5679     unsigned argsToReverse = sig->numArgs;
5680
5681     // For "thiscall", the first argument goes in a register. Since its
5682     // order does not need to be changed, we do not need to spill it
5683
5684     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5685     {
5686         assert(argsToReverse);
5687         argsToReverse--;
5688     }
5689
5690 #ifndef _TARGET_X86_
5691     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5692     argsToReverse = 0;
5693 #endif
5694
5695     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5696     {
5697         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5698         {
5699             assert(lastLevelWithSideEffects == UINT_MAX);
5700
5701             impSpillStackEntry(level,
5702                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5703         }
5704         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5705         {
5706             if (lastLevelWithSideEffects != UINT_MAX)
5707             {
5708                 /* We had a previous side effect - must spill it */
5709                 impSpillStackEntry(lastLevelWithSideEffects,
5710                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5711
5712                 /* Record the level for the current side effect in case we will spill it */
5713                 lastLevelWithSideEffects = level;
5714             }
5715             else
5716             {
5717                 /* This is the first side effect encountered - record its level */
5718
5719                 lastLevelWithSideEffects = level;
5720             }
5721         }
5722     }
5723
5724     /* The argument list is now "clean" - no out-of-order side effects
5725      * Pop the argument list in reverse order */
5726
5727     unsigned   argFlags = 0;
5728     GenTreePtr args     = call->gtCall.gtCallArgs =
5729         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5730
5731     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5732     {
5733         GenTreePtr thisPtr = args->Current();
5734         impBashVarAddrsToI(thisPtr);
5735         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5736     }
5737
5738     if (args)
5739     {
5740         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5741     }
5742 }
5743
5744 //------------------------------------------------------------------------
5745 // impInitClass: Build a node to initialize the class before accessing the
5746 //               field if necessary
5747 //
5748 // Arguments:
5749 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5750 //                     by a call to CEEInfo::resolveToken().
5751 //
5752 // Return Value: If needed, a pointer to the node that will perform the class
5753 //               initializtion.  Otherwise, nullptr.
5754 //
5755
5756 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5757 {
5758     CorInfoInitClassResult initClassResult =
5759         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5760
5761     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5762     {
5763         return nullptr;
5764     }
5765     BOOL runtimeLookup;
5766
5767     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5768
5769     if (node == nullptr)
5770     {
5771         assert(compDonotInline());
5772         return nullptr;
5773     }
5774
5775     if (runtimeLookup)
5776     {
5777         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5778     }
5779     else
5780     {
5781         // Call the shared non gc static helper, as its the fastest
5782         node = fgGetSharedCCtor(pResolvedToken->hClass);
5783     }
5784
5785     return node;
5786 }
5787
5788 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5789 {
5790     GenTreePtr op1 = nullptr;
5791
5792     switch (lclTyp)
5793     {
5794         int     ival;
5795         __int64 lval;
5796         double  dval;
5797
5798         case TYP_BOOL:
5799             ival = *((bool*)fldAddr);
5800             goto IVAL_COMMON;
5801
5802         case TYP_BYTE:
5803             ival = *((signed char*)fldAddr);
5804             goto IVAL_COMMON;
5805
5806         case TYP_UBYTE:
5807             ival = *((unsigned char*)fldAddr);
5808             goto IVAL_COMMON;
5809
5810         case TYP_SHORT:
5811             ival = *((short*)fldAddr);
5812             goto IVAL_COMMON;
5813
5814         case TYP_CHAR:
5815         case TYP_USHORT:
5816             ival = *((unsigned short*)fldAddr);
5817             goto IVAL_COMMON;
5818
5819         case TYP_UINT:
5820         case TYP_INT:
5821             ival = *((int*)fldAddr);
5822         IVAL_COMMON:
5823             op1 = gtNewIconNode(ival);
5824             break;
5825
5826         case TYP_LONG:
5827         case TYP_ULONG:
5828             lval = *((__int64*)fldAddr);
5829             op1  = gtNewLconNode(lval);
5830             break;
5831
5832         case TYP_FLOAT:
5833             dval = *((float*)fldAddr);
5834             op1  = gtNewDconNode(dval);
5835 #if !FEATURE_X87_DOUBLES
5836             // X87 stack doesn't differentiate between float/double
5837             // so R4 is treated as R8, but everybody else does
5838             op1->gtType = TYP_FLOAT;
5839 #endif // FEATURE_X87_DOUBLES
5840             break;
5841
5842         case TYP_DOUBLE:
5843             dval = *((double*)fldAddr);
5844             op1  = gtNewDconNode(dval);
5845             break;
5846
5847         default:
5848             assert(!"Unexpected lclTyp");
5849             break;
5850     }
5851
5852     return op1;
5853 }
5854
5855 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5856                                                 CORINFO_ACCESS_FLAGS    access,
5857                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5858                                                 var_types               lclTyp)
5859 {
5860     GenTreePtr op1;
5861
5862     switch (pFieldInfo->fieldAccessor)
5863     {
5864         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5865         {
5866             assert(!compIsForInlining());
5867
5868             // We first call a special helper to get the statics base pointer
5869             op1 = impParentClassTokenToHandle(pResolvedToken);
5870
5871             // compIsForInlining() is false so we should not neve get NULL here
5872             assert(op1 != nullptr);
5873
5874             var_types type = TYP_BYREF;
5875
5876             switch (pFieldInfo->helper)
5877             {
5878                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5879                     type = TYP_I_IMPL;
5880                     break;
5881                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5882                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5883                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5884                     break;
5885                 default:
5886                     assert(!"unknown generic statics helper");
5887                     break;
5888             }
5889
5890             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5891
5892             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5893             op1              = gtNewOperNode(GT_ADD, type, op1,
5894                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5895         }
5896         break;
5897
5898         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5899         {
5900 #ifdef FEATURE_READYTORUN_COMPILER
5901             if (opts.IsReadyToRun())
5902             {
5903                 unsigned callFlags = 0;
5904
5905                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5906                 {
5907                     callFlags |= GTF_CALL_HOISTABLE;
5908                 }
5909
5910                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5911
5912                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5913             }
5914             else
5915 #endif
5916             {
5917                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5918             }
5919
5920             {
5921                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5923                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5924             }
5925             break;
5926         }
5927 #if COR_JIT_EE_VERSION > 460
5928         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5929         {
5930 #ifdef FEATURE_READYTORUN_COMPILER
5931             noway_assert(opts.IsReadyToRun());
5932             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5933             assert(kind.needsRuntimeLookup);
5934
5935             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5936             GenTreeArgList* args    = gtNewArgList(ctxTree);
5937
5938             unsigned callFlags = 0;
5939
5940             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5941             {
5942                 callFlags |= GTF_CALL_HOISTABLE;
5943             }
5944             var_types type = TYP_BYREF;
5945             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5946
5947             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5948             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5949             op1              = gtNewOperNode(GT_ADD, type, op1,
5950                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5951 #else
5952             unreached();
5953 #endif // FEATURE_READYTORUN_COMPILER
5954         }
5955         break;
5956 #endif // COR_JIT_EE_VERSION > 460
5957         default:
5958         {
5959             if (!(access & CORINFO_ACCESS_ADDRESS))
5960             {
5961                 // In future, it may be better to just create the right tree here instead of folding it later.
5962                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5963
5964                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5965                 {
5966                     op1->gtType = TYP_REF; // points at boxed object
5967                     FieldSeqNode* firstElemFldSeq =
5968                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5969                     op1 =
5970                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5971                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5972
5973                     if (varTypeIsStruct(lclTyp))
5974                     {
5975                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5976                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5977                     }
5978                     else
5979                     {
5980                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5981                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5982                     }
5983                 }
5984
5985                 return op1;
5986             }
5987             else
5988             {
5989                 void** pFldAddr = nullptr;
5990                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5991
5992                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5993
5994                 /* Create the data member node */
5995                 if (pFldAddr == nullptr)
5996                 {
5997                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5998                 }
5999                 else
6000                 {
6001                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6002
6003                     // There are two cases here, either the static is RVA based,
6004                     // in which case the type of the FIELD node is not a GC type
6005                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6006                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6007                     // because handles to statics now go into the large object heap
6008
6009                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6010                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6011                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6012                 }
6013             }
6014             break;
6015         }
6016     }
6017
6018     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6019     {
6020         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6021
6022         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6023
6024         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6025                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6026     }
6027
6028     if (!(access & CORINFO_ACCESS_ADDRESS))
6029     {
6030         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6031         op1->gtFlags |= GTF_GLOB_REF;
6032     }
6033
6034     return op1;
6035 }
6036
6037 // In general try to call this before most of the verification work.  Most people expect the access
6038 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6039 // out if you can't access something we also think that you're unverifiable for other reasons.
6040 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6041 {
6042     if (result != CORINFO_ACCESS_ALLOWED)
6043     {
6044         impHandleAccessAllowedInternal(result, helperCall);
6045     }
6046 }
6047
6048 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6049 {
6050     switch (result)
6051     {
6052         case CORINFO_ACCESS_ALLOWED:
6053             break;
6054         case CORINFO_ACCESS_ILLEGAL:
6055             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6056             // method is verifiable.  Otherwise, delay the exception to runtime.
6057             if (compIsForImportOnly())
6058             {
6059                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6060             }
6061             else
6062             {
6063                 impInsertHelperCall(helperCall);
6064             }
6065             break;
6066         case CORINFO_ACCESS_RUNTIME_CHECK:
6067             impInsertHelperCall(helperCall);
6068             break;
6069     }
6070 }
6071
6072 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6073 {
6074     // Construct the argument list
6075     GenTreeArgList* args = nullptr;
6076     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6077     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6078     {
6079         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6080         GenTreePtr                currentArg = nullptr;
6081         switch (helperArg.argType)
6082         {
6083             case CORINFO_HELPER_ARG_TYPE_Field:
6084                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6085                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6086                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6087                 break;
6088             case CORINFO_HELPER_ARG_TYPE_Method:
6089                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6090                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6091                 break;
6092             case CORINFO_HELPER_ARG_TYPE_Class:
6093                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6094                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6095                 break;
6096             case CORINFO_HELPER_ARG_TYPE_Module:
6097                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6098                 break;
6099             case CORINFO_HELPER_ARG_TYPE_Const:
6100                 currentArg = gtNewIconNode(helperArg.constant);
6101                 break;
6102             default:
6103                 NO_WAY("Illegal helper arg type");
6104         }
6105         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6106     }
6107
6108     /* TODO-Review:
6109      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6110      * Also, consider sticking this in the first basic block.
6111      */
6112     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6113     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6114 }
6115
6116 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6117                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6118                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6119 {
6120 #ifdef FEATURE_CORECLR
6121     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6122     {
6123         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6124         // This helper throws an exception if the CLR host disallows the call.
6125
6126         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6127                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6128                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6129         // Append the callout statement
6130         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6131     }
6132 #endif // FEATURE_CORECLR
6133 }
6134
6135 // Checks whether the return types of caller and callee are compatible
6136 // so that callee can be tail called. Note that here we don't check
6137 // compatibility in IL Verifier sense, but on the lines of return type
6138 // sizes are equal and get returned in the same return register.
6139 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6140                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6141                                             var_types            calleeRetType,
6142                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6143 {
6144     // Note that we can not relax this condition with genActualType() as the
6145     // calling convention dictates that the caller of a function with a small
6146     // typed return value is responsible for normalizing the return val.
6147     if (callerRetType == calleeRetType)
6148     {
6149         return true;
6150     }
6151
6152 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6153     // Jit64 compat:
6154     if (callerRetType == TYP_VOID)
6155     {
6156         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6157         //     tail.call
6158         //     pop
6159         //     ret
6160         //
6161         // Note that the above IL pattern is not valid as per IL verification rules.
6162         // Therefore, only full trust code can take advantage of this pattern.
6163         return true;
6164     }
6165
6166     // These checks return true if the return value type sizes are the same and
6167     // get returned in the same return register i.e. caller doesn't need to normalize
6168     // return value. Some of the tail calls permitted by below checks would have
6169     // been rejected by IL Verifier before we reached here.  Therefore, only full
6170     // trust code can make those tail calls.
6171     unsigned callerRetTypeSize = 0;
6172     unsigned calleeRetTypeSize = 0;
6173     bool     isCallerRetTypMBEnreg =
6174         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6175     bool isCalleeRetTypMBEnreg =
6176         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6177
6178     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6179     {
6180         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6181     }
6182 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6183
6184     return false;
6185 }
6186
6187 // For prefixFlags
6188 enum
6189 {
6190     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6191     PREFIX_TAILCALL_IMPLICIT =
6192         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6193     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6194     PREFIX_VOLATILE    = 0x00000100,
6195     PREFIX_UNALIGNED   = 0x00001000,
6196     PREFIX_CONSTRAINED = 0x00010000,
6197     PREFIX_READONLY    = 0x00100000
6198 };
6199
6200 /********************************************************************************
6201  *
6202  * Returns true if the current opcode and and the opcodes following it correspond
6203  * to a supported tail call IL pattern.
6204  *
6205  */
6206 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6207                                       OPCODE      curOpcode,
6208                                       const BYTE* codeAddrOfNextOpcode,
6209                                       const BYTE* codeEnd,
6210                                       bool        isRecursive,
6211                                       bool*       isCallPopAndRet /* = nullptr */)
6212 {
6213     // Bail out if the current opcode is not a call.
6214     if (!impOpcodeIsCallOpcode(curOpcode))
6215     {
6216         return false;
6217     }
6218
6219 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6220     // If shared ret tail opt is not enabled, we will enable
6221     // it for recursive methods.
6222     if (isRecursive)
6223 #endif
6224     {
6225         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6226         // sequence. Make sure we don't go past the end of the IL however.
6227         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6228     }
6229
6230     // Bail out if there is no next opcode after call
6231     if (codeAddrOfNextOpcode >= codeEnd)
6232     {
6233         return false;
6234     }
6235
6236     // Scan the opcodes to look for the following IL patterns if either
6237     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6238     //  ii) if tail prefixed, IL verification is not needed for the method.
6239     //
6240     // Only in the above two cases we can allow the below tail call patterns
6241     // violating ECMA spec.
6242     //
6243     // Pattern1:
6244     //       call
6245     //       nop*
6246     //       ret
6247     //
6248     // Pattern2:
6249     //       call
6250     //       nop*
6251     //       pop
6252     //       nop*
6253     //       ret
6254     int    cntPop = 0;
6255     OPCODE nextOpcode;
6256
6257 #ifdef _TARGET_AMD64_
6258     do
6259     {
6260         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6261         codeAddrOfNextOpcode += sizeof(__int8);
6262     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6263              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6264              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6265                                                                                          // one pop seen so far.
6266 #else
6267     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6268 #endif
6269
6270     if (isCallPopAndRet)
6271     {
6272         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6273         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6274     }
6275
6276 #ifdef _TARGET_AMD64_
6277     // Jit64 Compat:
6278     // Tail call IL pattern could be either of the following
6279     // 1) call/callvirt/calli + ret
6280     // 2) call/callvirt/calli + pop + ret in a method returning void.
6281     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6282 #else //!_TARGET_AMD64_
6283     return (nextOpcode == CEE_RET) && (cntPop == 0);
6284 #endif
6285 }
6286
6287 /*****************************************************************************
6288  *
6289  * Determine whether the call could be converted to an implicit tail call
6290  *
6291  */
6292 bool Compiler::impIsImplicitTailCallCandidate(
6293     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6294 {
6295
6296 #if FEATURE_TAILCALL_OPT
6297     if (!opts.compTailCallOpt)
6298     {
6299         return false;
6300     }
6301
6302     if (opts.compDbgCode || opts.MinOpts())
6303     {
6304         return false;
6305     }
6306
6307     // must not be tail prefixed
6308     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6309     {
6310         return false;
6311     }
6312
6313 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6314     // the block containing call is marked as BBJ_RETURN
6315     // We allow shared ret tail call optimization on recursive calls even under
6316     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6317     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6318         return false;
6319 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6320
6321     // must be call+ret or call+pop+ret
6322     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6323     {
6324         return false;
6325     }
6326
6327     return true;
6328 #else
6329     return false;
6330 #endif // FEATURE_TAILCALL_OPT
6331 }
6332
6333 //------------------------------------------------------------------------
6334 // impImportCall: import a call-inspiring opcode
6335 //
6336 // Arguments:
6337 //    opcode                    - opcode that inspires the call
6338 //    pResolvedToken            - resolved token for the call target
6339 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6340 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6341 //    prefixFlags               - IL prefix flags for the call
6342 //    callInfo                  - EE supplied info for the call
6343 //    rawILOffset               - IL offset of the opcode
6344 //
6345 // Returns:
6346 //    Type of the call's return value.
6347 //
6348 // Notes:
6349 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6350 //
6351 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6352 //    uninitalized object.
6353
6354 #ifdef _PREFAST_
6355 #pragma warning(push)
6356 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6357 #endif
6358
6359 var_types Compiler::impImportCall(OPCODE                  opcode,
6360                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6361                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6362                                   GenTreePtr              newobjThis,
6363                                   int                     prefixFlags,
6364                                   CORINFO_CALL_INFO*      callInfo,
6365                                   IL_OFFSET               rawILOffset)
6366 {
6367     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6368
6369     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6370     var_types              callRetTyp                     = TYP_COUNT;
6371     CORINFO_SIG_INFO*      sig                            = nullptr;
6372     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6373     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6374     unsigned               clsFlags                       = 0;
6375     unsigned               mflags                         = 0;
6376     unsigned               argFlags                       = 0;
6377     GenTreePtr             call                           = nullptr;
6378     GenTreeArgList*        args                           = nullptr;
6379     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6380     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6381     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6382     bool                   canTailCall                    = true;
6383     const char*            szCanTailCallFailReason        = nullptr;
6384     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6385     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6386
6387     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6388     // do that before tailcalls, but that is probably not the intended
6389     // semantic. So just disallow tailcalls from synchronized methods.
6390     // Also, popping arguments in a varargs function is more work and NYI
6391     // If we have a security object, we have to keep our frame around for callers
6392     // to see any imperative security.
6393     if (info.compFlags & CORINFO_FLG_SYNCH)
6394     {
6395         canTailCall             = false;
6396         szCanTailCallFailReason = "Caller is synchronized";
6397     }
6398 #if !FEATURE_FIXED_OUT_ARGS
6399     else if (info.compIsVarArgs)
6400     {
6401         canTailCall             = false;
6402         szCanTailCallFailReason = "Caller is varargs";
6403     }
6404 #endif // FEATURE_FIXED_OUT_ARGS
6405     else if (opts.compNeedSecurityCheck)
6406     {
6407         canTailCall             = false;
6408         szCanTailCallFailReason = "Caller requires a security check.";
6409     }
6410
6411     // We only need to cast the return value of pinvoke inlined calls that return small types
6412
6413     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6414     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6415     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6416     // the time being that the callee might be compiled by the other JIT and thus the return
6417     // value will need to be widened by us (or not widened at all...)
6418
6419     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6420
6421     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6422     bool bIntrinsicImported = false;
6423
6424     CORINFO_SIG_INFO calliSig;
6425     GenTreeArgList*  extraArg = nullptr;
6426
6427     /*-------------------------------------------------------------------------
6428      * First create the call node
6429      */
6430
6431     if (opcode == CEE_CALLI)
6432     {
6433         /* Get the call site sig */
6434         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6435
6436         callRetTyp = JITtype2varType(calliSig.retType);
6437
6438         call = impImportIndirectCall(&calliSig, ilOffset);
6439
6440         // We don't know the target method, so we have to infer the flags, or
6441         // assume the worst-case.
6442         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6443
6444 #ifdef DEBUG
6445         if (verbose)
6446         {
6447             unsigned structSize =
6448                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6449             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6450                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6451         }
6452 #endif
6453         // This should be checked in impImportBlockCode.
6454         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6455
6456         sig = &calliSig;
6457
6458 #ifdef DEBUG
6459         // We cannot lazily obtain the signature of a CALLI call because it has no method
6460         // handle that we can use, so we need to save its full call signature here.
6461         assert(call->gtCall.callSig == nullptr);
6462         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6463         *call->gtCall.callSig = calliSig;
6464 #endif // DEBUG
6465     }
6466     else // (opcode != CEE_CALLI)
6467     {
6468         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6469
6470         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6471         // supply the instantiation parameters necessary to make direct calls to underlying
6472         // shared generic code, rather than calling through instantiating stubs.  If the
6473         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6474         // must indeed pass an instantiation parameter.
6475
6476         methHnd = callInfo->hMethod;
6477
6478         sig        = &(callInfo->sig);
6479         callRetTyp = JITtype2varType(sig->retType);
6480
6481         mflags = callInfo->methodFlags;
6482
6483 #ifdef DEBUG
6484         if (verbose)
6485         {
6486             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6487             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6488                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6489         }
6490 #endif
6491         if (compIsForInlining())
6492         {
6493             /* Does this call site have security boundary restrictions? */
6494
6495             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6496             {
6497                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6498                 return callRetTyp;
6499             }
6500
6501             /* Does the inlinee need a security check token on the frame */
6502
6503             if (mflags & CORINFO_FLG_SECURITYCHECK)
6504             {
6505                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6506                 return callRetTyp;
6507             }
6508
6509             /* Does the inlinee use StackCrawlMark */
6510
6511             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6512             {
6513                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6514                 return callRetTyp;
6515             }
6516
6517             /* For now ignore delegate invoke */
6518
6519             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6520             {
6521                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6522                 return callRetTyp;
6523             }
6524
6525             /* For now ignore varargs */
6526             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6527             {
6528                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6529                 return callRetTyp;
6530             }
6531
6532             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6533             {
6534                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6535                 return callRetTyp;
6536             }
6537
6538             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6539             {
6540                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6541                 return callRetTyp;
6542             }
6543         }
6544
6545         clsHnd = pResolvedToken->hClass;
6546
6547         clsFlags = callInfo->classFlags;
6548
6549 #ifdef DEBUG
6550         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6551
6552         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6553         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6554         const char* modName;
6555         const char* className;
6556         const char* methodName;
6557         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6558             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6559             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6560         {
6561             return impImportJitTestLabelMark(sig->numArgs);
6562         }
6563 #endif // DEBUG
6564
6565         // <NICE> Factor this into getCallInfo </NICE>
6566         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6567         {
6568             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6569                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6570
6571             if (call != nullptr)
6572             {
6573                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6574                        (clsFlags & CORINFO_FLG_FINAL));
6575
6576 #ifdef FEATURE_READYTORUN_COMPILER
6577                 if (call->OperGet() == GT_INTRINSIC)
6578                 {
6579                     if (opts.IsReadyToRun())
6580                     {
6581                         noway_assert(callInfo->kind == CORINFO_CALL);
6582                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6583                     }
6584                     else
6585                     {
6586                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6587                     }
6588                 }
6589 #endif
6590
6591                 bIntrinsicImported = true;
6592                 goto DONE_CALL;
6593             }
6594         }
6595
6596 #ifdef FEATURE_SIMD
6597         if (featureSIMD)
6598         {
6599             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6600             if (call != nullptr)
6601             {
6602                 bIntrinsicImported = true;
6603                 goto DONE_CALL;
6604             }
6605         }
6606 #endif // FEATURE_SIMD
6607
6608         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6609         {
6610             NO_WAY("Virtual call to a function added via EnC is not supported");
6611             goto DONE_CALL;
6612         }
6613
6614         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6615             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6616             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6617         {
6618             BADCODE("Bad calling convention");
6619         }
6620
6621         //-------------------------------------------------------------------------
6622         //  Construct the call node
6623         //
6624         // Work out what sort of call we're making.
6625         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6626
6627         constraintCallThisTransform = callInfo->thisTransform;
6628
6629         exactContextHnd                = callInfo->contextHandle;
6630         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6631
6632         // Recursive call is treaded as a loop to the begining of the method.
6633         if (methHnd == info.compMethodHnd)
6634         {
6635 #ifdef DEBUG
6636             if (verbose)
6637             {
6638                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6639                         fgFirstBB->bbNum, compCurBB->bbNum);
6640             }
6641 #endif
6642             fgMarkBackwardJump(fgFirstBB, compCurBB);
6643         }
6644
6645         switch (callInfo->kind)
6646         {
6647
6648             case CORINFO_VIRTUALCALL_STUB:
6649             {
6650                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6651                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6652                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6653                 {
6654
6655                     if (compIsForInlining())
6656                     {
6657                         // Don't import runtime lookups when inlining
6658                         // Inlining has to be aborted in such a case
6659                         /* XXX Fri 3/20/2009
6660                          * By the way, this would never succeed.  If the handle lookup is into the generic
6661                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6662                          * inlined code will crash.
6663                          *
6664                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6665                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6666                          * failing here.
6667                          */
6668                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6669                         return callRetTyp;
6670                     }
6671
6672                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6673                     assert(!compDonotInline());
6674
6675                     // This is the rough code to set up an indirect stub call
6676                     assert(stubAddr != nullptr);
6677
6678                     // The stubAddr may be a
6679                     // complex expression. As it is evaluated after the args,
6680                     // it may cause registered args to be spilled. Simply spill it.
6681
6682                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6683                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6684                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6685
6686                     // Create the actual call node
6687
6688                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6689                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6690
6691                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6692
6693                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6694                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6695
6696 #ifdef _TARGET_X86_
6697                     // No tailcalls allowed for these yet...
6698                     canTailCall             = false;
6699                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6700 #endif
6701                 }
6702                 else
6703                 {
6704                     // ok, the stub is available at compile type.
6705
6706                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6707                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6708                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6709                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6710                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6711                     {
6712                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6713                     }
6714                 }
6715
6716 #ifdef FEATURE_READYTORUN_COMPILER
6717                 if (opts.IsReadyToRun())
6718                 {
6719                     // Null check is sometimes needed for ready to run to handle
6720                     // non-virtual <-> virtual changes between versions
6721                     if (callInfo->nullInstanceCheck)
6722                     {
6723                         call->gtFlags |= GTF_CALL_NULLCHECK;
6724                     }
6725                 }
6726 #endif
6727
6728                 break;
6729             }
6730
6731             case CORINFO_VIRTUALCALL_VTABLE:
6732             {
6733                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6734                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6735                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6736                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6737                 break;
6738             }
6739
6740             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6741             {
6742                 if (compIsForInlining())
6743                 {
6744                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6745                     return callRetTyp;
6746                 }
6747
6748                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6749                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6750                 // OK, We've been told to call via LDVIRTFTN, so just
6751                 // take the call now....
6752
6753                 args = impPopList(sig->numArgs, &argFlags, sig);
6754
6755                 GenTreePtr thisPtr = impPopStack().val;
6756                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6757                 if (compDonotInline())
6758                 {
6759                     return callRetTyp;
6760                 }
6761
6762                 // Clone the (possibly transformed) "this" pointer
6763                 GenTreePtr thisPtrCopy;
6764                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6765                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6766
6767                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6768                 if (compDonotInline())
6769                 {
6770                     return callRetTyp;
6771                 }
6772
6773                 thisPtr = nullptr; // can't reuse it
6774
6775                 // Now make an indirect call through the function pointer
6776
6777                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6778                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6779                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6780
6781                 // Create the actual call node
6782
6783                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6784                 call->gtCall.gtCallObjp = thisPtrCopy;
6785                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6786
6787 #ifdef FEATURE_READYTORUN_COMPILER
6788                 if (opts.IsReadyToRun())
6789                 {
6790                     // Null check is needed for ready to run to handle
6791                     // non-virtual <-> virtual changes between versions
6792                     call->gtFlags |= GTF_CALL_NULLCHECK;
6793                 }
6794 #endif
6795
6796                 // Sine we are jumping over some code, check that its OK to skip that code
6797                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6798                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6799                 goto DONE;
6800             }
6801
6802             case CORINFO_CALL:
6803             {
6804                 // This is for a non-virtual, non-interface etc. call
6805                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6806
6807                 // We remove the nullcheck for the GetType call instrinsic.
6808                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6809                 // and instrinsics.
6810                 if (callInfo->nullInstanceCheck &&
6811                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6812                 {
6813                     call->gtFlags |= GTF_CALL_NULLCHECK;
6814                 }
6815
6816 #ifdef FEATURE_READYTORUN_COMPILER
6817                 if (opts.IsReadyToRun())
6818                 {
6819                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6820                 }
6821 #endif
6822                 break;
6823             }
6824
6825             case CORINFO_CALL_CODE_POINTER:
6826             {
6827                 // The EE has asked us to call by computing a code pointer and then doing an
6828                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6829
6830                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6831                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6832
6833                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6834                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6835
6836                 GenTreePtr fptr =
6837                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6838
6839                 if (compDonotInline())
6840                 {
6841                     return callRetTyp;
6842                 }
6843
6844                 // Now make an indirect call through the function pointer
6845
6846                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6847                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6848                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6849
6850                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6851                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6852                 if (callInfo->nullInstanceCheck)
6853                 {
6854                     call->gtFlags |= GTF_CALL_NULLCHECK;
6855                 }
6856
6857                 break;
6858             }
6859
6860             default:
6861                 assert(!"unknown call kind");
6862                 break;
6863         }
6864
6865         //-------------------------------------------------------------------------
6866         // Set more flags
6867
6868         PREFIX_ASSUME(call != nullptr);
6869
6870         if (mflags & CORINFO_FLG_NOGCCHECK)
6871         {
6872             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6873         }
6874
6875         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6876         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6877             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6878             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6879         {
6880             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6881         }
6882     }
6883     assert(sig);
6884     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6885
6886     /* Some sanity checks */
6887
6888     // CALL_VIRT and NEWOBJ must have a THIS pointer
6889     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6890     // static bit and hasThis are negations of one another
6891     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6892     assert(call != nullptr);
6893
6894     /*-------------------------------------------------------------------------
6895      * Check special-cases etc
6896      */
6897
6898     /* Special case - Check if it is a call to Delegate.Invoke(). */
6899
6900     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6901     {
6902         assert(!compIsForInlining());
6903         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6904         assert(mflags & CORINFO_FLG_FINAL);
6905
6906         /* Set the delegate flag */
6907         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6908
6909         if (callInfo->secureDelegateInvoke)
6910         {
6911             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6912         }
6913
6914         if (opcode == CEE_CALLVIRT)
6915         {
6916             assert(mflags & CORINFO_FLG_FINAL);
6917
6918             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6919             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6920             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6921         }
6922     }
6923
6924     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6925     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6926     if (varTypeIsStruct(callRetTyp))
6927     {
6928         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6929         call->gtType = callRetTyp;
6930     }
6931
6932 #if !FEATURE_VARARG
6933     /* Check for varargs */
6934     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6935         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6936     {
6937         BADCODE("Varargs not supported.");
6938     }
6939 #endif // !FEATURE_VARARG
6940
6941     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6942         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6943     {
6944         assert(!compIsForInlining());
6945
6946         /* Set the right flags */
6947
6948         call->gtFlags |= GTF_CALL_POP_ARGS;
6949         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6950
6951         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6952            will be expecting to pop a certain number of arguments, but if we
6953            tailcall to a function with a different number of arguments, we
6954            are hosed. There are ways around this (caller remembers esp value,
6955            varargs is not caller-pop, etc), but not worth it. */
6956         CLANG_FORMAT_COMMENT_ANCHOR;
6957
6958 #ifdef _TARGET_X86_
6959         if (canTailCall)
6960         {
6961             canTailCall             = false;
6962             szCanTailCallFailReason = "Callee is varargs";
6963         }
6964 #endif
6965
6966         /* Get the total number of arguments - this is already correct
6967          * for CALLI - for methods we have to get it from the call site */
6968
6969         if (opcode != CEE_CALLI)
6970         {
6971 #ifdef DEBUG
6972             unsigned numArgsDef = sig->numArgs;
6973 #endif
6974             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6975
6976 #ifdef DEBUG
6977             // We cannot lazily obtain the signature of a vararg call because using its method
6978             // handle will give us only the declared argument list, not the full argument list.
6979             assert(call->gtCall.callSig == nullptr);
6980             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6981             *call->gtCall.callSig = *sig;
6982 #endif
6983
6984             // For vararg calls we must be sure to load the return type of the
6985             // method actually being called, as well as the return types of the
6986             // specified in the vararg signature. With type equivalency, these types
6987             // may not be the same.
6988             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6989             {
6990                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6991                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6992                     sig->retType != CORINFO_TYPE_VAR)
6993                 {
6994                     // Make sure that all valuetypes (including enums) that we push are loaded.
6995                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6996                     // all valuetypes in the method signature are already loaded.
6997                     // We need to be able to find the size of the valuetypes, but we cannot
6998                     // do a class-load from within GC.
6999                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7000                 }
7001             }
7002
7003             assert(numArgsDef <= sig->numArgs);
7004         }
7005
7006         /* We will have "cookie" as the last argument but we cannot push
7007          * it on the operand stack because we may overflow, so we append it
7008          * to the arg list next after we pop them */
7009     }
7010
7011     if (mflags & CORINFO_FLG_SECURITYCHECK)
7012     {
7013         assert(!compIsForInlining());
7014
7015         // Need security prolog/epilog callouts when there is
7016         // imperative security in the method. This is to give security a
7017         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7018
7019         if (compIsForInlining())
7020         {
7021             // Cannot handle this if the method being imported is an inlinee by itself.
7022             // Because inlinee method does not have its own frame.
7023
7024             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7025             return callRetTyp;
7026         }
7027         else
7028         {
7029             tiSecurityCalloutNeeded = true;
7030
7031             // If the current method calls a method which needs a security check,
7032             // (i.e. the method being compiled has imperative security)
7033             // we need to reserve a slot for the security object in
7034             // the current method's stack frame
7035             opts.compNeedSecurityCheck = true;
7036         }
7037     }
7038
7039     //--------------------------- Inline NDirect ------------------------------
7040
7041     // For inline cases we technically should look at both the current
7042     // block and the call site block (or just the latter if we've
7043     // fused the EH trees). However the block-related checks pertain to
7044     // EH and we currently won't inline a method with EH. So for
7045     // inlinees, just checking the call site block is sufficient.
7046     {
7047         // New lexical block here to avoid compilation errors because of GOTOs.
7048         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7049         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7050     }
7051
7052     if (call->gtFlags & GTF_CALL_UNMANAGED)
7053     {
7054         // We set up the unmanaged call by linking the frame, disabling GC, etc
7055         // This needs to be cleaned up on return
7056         if (canTailCall)
7057         {
7058             canTailCall             = false;
7059             szCanTailCallFailReason = "Callee is native";
7060         }
7061
7062         checkForSmallType = true;
7063
7064         impPopArgsForUnmanagedCall(call, sig);
7065
7066         goto DONE;
7067     }
7068     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7069                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7070                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7071                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7072     {
7073         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7074         {
7075             // Normally this only happens with inlining.
7076             // However, a generic method (or type) being NGENd into another module
7077             // can run into this issue as well.  There's not an easy fall-back for NGEN
7078             // so instead we fallback to JIT.
7079             if (compIsForInlining())
7080             {
7081                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7082             }
7083             else
7084             {
7085                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7086             }
7087
7088             return callRetTyp;
7089         }
7090
7091         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7092
7093         // This cookie is required to be either a simple GT_CNS_INT or
7094         // an indirection of a GT_CNS_INT
7095         //
7096         GenTreePtr cookieConst = cookie;
7097         if (cookie->gtOper == GT_IND)
7098         {
7099             cookieConst = cookie->gtOp.gtOp1;
7100         }
7101         assert(cookieConst->gtOper == GT_CNS_INT);
7102
7103         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7104         // we won't allow this tree to participate in any CSE logic
7105         //
7106         cookie->gtFlags |= GTF_DONT_CSE;
7107         cookieConst->gtFlags |= GTF_DONT_CSE;
7108
7109         call->gtCall.gtCallCookie = cookie;
7110
7111         if (canTailCall)
7112         {
7113             canTailCall             = false;
7114             szCanTailCallFailReason = "PInvoke calli";
7115         }
7116     }
7117
7118     /*-------------------------------------------------------------------------
7119      * Create the argument list
7120      */
7121
7122     //-------------------------------------------------------------------------
7123     // Special case - for varargs we have an implicit last argument
7124
7125     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7126     {
7127         assert(!compIsForInlining());
7128
7129         void *varCookie, *pVarCookie;
7130         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7131         {
7132             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7133             return callRetTyp;
7134         }
7135
7136         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7137         assert((!varCookie) != (!pVarCookie));
7138         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7139
7140         assert(extraArg == nullptr);
7141         extraArg = gtNewArgList(cookie);
7142     }
7143
7144     //-------------------------------------------------------------------------
7145     // Extra arg for shared generic code and array methods
7146     //
7147     // Extra argument containing instantiation information is passed in the
7148     // following circumstances:
7149     // (a) To the "Address" method on array classes; the extra parameter is
7150     //     the array's type handle (a TypeDesc)
7151     // (b) To shared-code instance methods in generic structs; the extra parameter
7152     //     is the struct's type handle (a vtable ptr)
7153     // (c) To shared-code per-instantiation non-generic static methods in generic
7154     //     classes and structs; the extra parameter is the type handle
7155     // (d) To shared-code generic methods; the extra parameter is an
7156     //     exact-instantiation MethodDesc
7157     //
7158     // We also set the exact type context associated with the call so we can
7159     // inline the call correctly later on.
7160
7161     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7162     {
7163         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7164         if (clsHnd == nullptr)
7165         {
7166             NO_WAY("CALLI on parameterized type");
7167         }
7168
7169         assert(opcode != CEE_CALLI);
7170
7171         GenTreePtr instParam;
7172         BOOL       runtimeLookup;
7173
7174         // Instantiated generic method
7175         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7176         {
7177             CORINFO_METHOD_HANDLE exactMethodHandle =
7178                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7179
7180             if (!exactContextNeedsRuntimeLookup)
7181             {
7182 #ifdef FEATURE_READYTORUN_COMPILER
7183                 if (opts.IsReadyToRun())
7184                 {
7185                     instParam =
7186                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7187                     if (instParam == nullptr)
7188                     {
7189                         return callRetTyp;
7190                     }
7191                 }
7192                 else
7193 #endif
7194                 {
7195                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7196                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7197                 }
7198             }
7199             else
7200             {
7201                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7202                 if (instParam == nullptr)
7203                 {
7204                     return callRetTyp;
7205                 }
7206             }
7207         }
7208
7209         // otherwise must be an instance method in a generic struct,
7210         // a static method in a generic type, or a runtime-generated array method
7211         else
7212         {
7213             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7214             CORINFO_CLASS_HANDLE exactClassHandle =
7215                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7216
7217             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7218             {
7219                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7220                 return callRetTyp;
7221             }
7222
7223             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7224             {
7225                 // We indicate "readonly" to the Address operation by using a null
7226                 // instParam.
7227                 instParam = gtNewIconNode(0, TYP_REF);
7228             }
7229
7230             if (!exactContextNeedsRuntimeLookup)
7231             {
7232 #ifdef FEATURE_READYTORUN_COMPILER
7233                 if (opts.IsReadyToRun())
7234                 {
7235                     instParam =
7236                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7237                     if (instParam == nullptr)
7238                     {
7239                         return callRetTyp;
7240                     }
7241                 }
7242                 else
7243 #endif
7244                 {
7245                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7246                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7247                 }
7248             }
7249             else
7250             {
7251                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7252                 if (instParam == nullptr)
7253                 {
7254                     return callRetTyp;
7255                 }
7256             }
7257         }
7258
7259         assert(extraArg == nullptr);
7260         extraArg = gtNewArgList(instParam);
7261     }
7262
7263     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7264     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7265     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7266     // exactContextHnd is not currently required when inlining shared generic code into shared
7267     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7268     // (e.g. anything marked needsRuntimeLookup)
7269     if (exactContextNeedsRuntimeLookup)
7270     {
7271         exactContextHnd = nullptr;
7272     }
7273
7274     //-------------------------------------------------------------------------
7275     // The main group of arguments
7276
7277     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7278
7279     if (args)
7280     {
7281         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7282     }
7283
7284     //-------------------------------------------------------------------------
7285     // The "this" pointer
7286
7287     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7288     {
7289         GenTreePtr obj;
7290
7291         if (opcode == CEE_NEWOBJ)
7292         {
7293             obj = newobjThis;
7294         }
7295         else
7296         {
7297             obj = impPopStack().val;
7298             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7299             if (compDonotInline())
7300             {
7301                 return callRetTyp;
7302             }
7303         }
7304
7305         /* Is this a virtual or interface call? */
7306
7307         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7308         {
7309             /* only true object pointers can be virtual */
7310
7311             assert(obj->gtType == TYP_REF);
7312         }
7313         else
7314         {
7315             if (impIsThis(obj))
7316             {
7317                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7318             }
7319         }
7320
7321         /* Store the "this" value in the call */
7322
7323         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7324         call->gtCall.gtCallObjp = obj;
7325     }
7326
7327     //-------------------------------------------------------------------------
7328     // The "this" pointer for "newobj"
7329
7330     if (opcode == CEE_NEWOBJ)
7331     {
7332         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7333         {
7334             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7335             // This is a 'new' of a variable sized object, wher
7336             // the constructor is to return the object.  In this case
7337             // the constructor claims to return VOID but we know it
7338             // actually returns the new object
7339             assert(callRetTyp == TYP_VOID);
7340             callRetTyp   = TYP_REF;
7341             call->gtType = TYP_REF;
7342             impSpillSpecialSideEff();
7343
7344             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7345         }
7346         else
7347         {
7348             if (clsFlags & CORINFO_FLG_DELEGATE)
7349             {
7350                 // New inliner morph it in impImportCall.
7351                 // This will allow us to inline the call to the delegate constructor.
7352                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7353             }
7354
7355             if (!bIntrinsicImported)
7356             {
7357
7358 #if defined(DEBUG) || defined(INLINE_DATA)
7359
7360                 // Keep track of the raw IL offset of the call
7361                 call->gtCall.gtRawILOffset = rawILOffset;
7362
7363 #endif // defined(DEBUG) || defined(INLINE_DATA)
7364
7365                 // Is it an inline candidate?
7366                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7367             }
7368
7369             // append the call node.
7370             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7371
7372             // Now push the value of the 'new onto the stack
7373
7374             // This is a 'new' of a non-variable sized object.
7375             // Append the new node (op1) to the statement list,
7376             // and then push the local holding the value of this
7377             // new instruction on the stack.
7378
7379             if (clsFlags & CORINFO_FLG_VALUECLASS)
7380             {
7381                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7382
7383                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7384                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7385             }
7386             else
7387             {
7388                 if (newobjThis->gtOper == GT_COMMA)
7389                 {
7390                     // In coreclr the callout can be inserted even if verification is disabled
7391                     // so we cannot rely on tiVerificationNeeded alone
7392
7393                     // We must have inserted the callout. Get the real newobj.
7394                     newobjThis = newobjThis->gtOp.gtOp2;
7395                 }
7396
7397                 assert(newobjThis->gtOper == GT_LCL_VAR);
7398                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7399             }
7400         }
7401         return callRetTyp;
7402     }
7403
7404 DONE:
7405
7406     if (tailCall)
7407     {
7408         // This check cannot be performed for implicit tail calls for the reason
7409         // that impIsImplicitTailCallCandidate() is not checking whether return
7410         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7411         // As a result it is possible that in the following case, we find that
7412         // the type stack is non-empty if Callee() is considered for implicit
7413         // tail calling.
7414         //      int Caller(..) { .... void Callee(); ret val; ... }
7415         //
7416         // Note that we cannot check return type compatibility before ImpImportCall()
7417         // as we don't have required info or need to duplicate some of the logic of
7418         // ImpImportCall().
7419         //
7420         // For implicit tail calls, we perform this check after return types are
7421         // known to be compatible.
7422         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7423         {
7424             BADCODE("Stack should be empty after tailcall");
7425         }
7426
7427         // Note that we can not relax this condition with genActualType() as
7428         // the calling convention dictates that the caller of a function with
7429         // a small-typed return value is responsible for normalizing the return val
7430
7431         if (canTailCall &&
7432             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7433                                           callInfo->sig.retTypeClass))
7434         {
7435             canTailCall             = false;
7436             szCanTailCallFailReason = "Return types are not tail call compatible";
7437         }
7438
7439         // Stack empty check for implicit tail calls.
7440         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7441         {
7442 #ifdef _TARGET_AMD64_
7443             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7444             // in JIT64, not an InvalidProgramException.
7445             Verify(false, "Stack should be empty after tailcall");
7446 #else  // _TARGET_64BIT_
7447             BADCODE("Stack should be empty after tailcall");
7448 #endif //!_TARGET_64BIT_
7449         }
7450
7451         // assert(compCurBB is not a catch, finally or filter block);
7452         // assert(compCurBB is not a try block protected by a finally block);
7453
7454         // Check for permission to tailcall
7455         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7456
7457         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7458
7459         if (canTailCall)
7460         {
7461             // True virtual or indirect calls, shouldn't pass in a callee handle.
7462             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7463                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7464                                                        ? nullptr
7465                                                        : methHnd;
7466             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7467
7468             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7469             {
7470                 canTailCall = true;
7471                 if (explicitTailCall)
7472                 {
7473                     // In case of explicit tail calls, mark it so that it is not considered
7474                     // for in-lining.
7475                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7476 #ifdef DEBUG
7477                     if (verbose)
7478                     {
7479                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7480                         printTreeID(call);
7481                         printf("\n");
7482                     }
7483 #endif
7484                 }
7485                 else
7486                 {
7487 #if FEATURE_TAILCALL_OPT
7488                     // Must be an implicit tail call.
7489                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7490
7491                     // It is possible that a call node is both an inline candidate and marked
7492                     // for opportunistic tail calling.  In-lining happens before morhphing of
7493                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7494                     // reason, it will survive to the morphing stage at which point it will be
7495                     // transformed into a tail call after performing additional checks.
7496
7497                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7498 #ifdef DEBUG
7499                     if (verbose)
7500                     {
7501                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7502                         printTreeID(call);
7503                         printf("\n");
7504                     }
7505 #endif
7506
7507 #else //! FEATURE_TAILCALL_OPT
7508                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7509
7510 #endif // FEATURE_TAILCALL_OPT
7511                 }
7512
7513                 // we can't report success just yet...
7514             }
7515             else
7516             {
7517                 canTailCall = false;
7518 // canTailCall reported its reasons already
7519 #ifdef DEBUG
7520                 if (verbose)
7521                 {
7522                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7523                     printTreeID(call);
7524                     printf("\n");
7525                 }
7526 #endif
7527             }
7528         }
7529         else
7530         {
7531             // If this assert fires it means that canTailCall was set to false without setting a reason!
7532             assert(szCanTailCallFailReason != nullptr);
7533
7534 #ifdef DEBUG
7535             if (verbose)
7536             {
7537                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7538                 printTreeID(call);
7539                 printf(": %s\n", szCanTailCallFailReason);
7540             }
7541 #endif
7542             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7543                                                      szCanTailCallFailReason);
7544         }
7545     }
7546
7547 // Note: we assume that small return types are already normalized by the managed callee
7548 // or by the pinvoke stub for calls to unmanaged code.
7549
7550 DONE_CALL:
7551
7552     if (!bIntrinsicImported)
7553     {
7554         //
7555         // Things needed to be checked when bIntrinsicImported is false.
7556         //
7557
7558         assert(call->gtOper == GT_CALL);
7559         assert(sig != nullptr);
7560
7561         // Tail calls require us to save the call site's sig info so we can obtain an argument
7562         // copying thunk from the EE later on.
7563         if (call->gtCall.callSig == nullptr)
7564         {
7565             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7566             *call->gtCall.callSig = *sig;
7567         }
7568
7569         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7570         {
7571             GenTreePtr callObj = call->gtCall.gtCallObjp;
7572             assert(callObj != nullptr);
7573
7574             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7575
7576             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7577                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7578                                                                    impInlineInfo->inlArgInfo))
7579             {
7580                 impInlineInfo->thisDereferencedFirst = true;
7581             }
7582         }
7583
7584 #if defined(DEBUG) || defined(INLINE_DATA)
7585
7586         // Keep track of the raw IL offset of the call
7587         call->gtCall.gtRawILOffset = rawILOffset;
7588
7589 #endif // defined(DEBUG) || defined(INLINE_DATA)
7590
7591         // Is it an inline candidate?
7592         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7593     }
7594
7595     // Push or append the result of the call
7596     if (callRetTyp == TYP_VOID)
7597     {
7598         if (opcode == CEE_NEWOBJ)
7599         {
7600             // we actually did push something, so don't spill the thing we just pushed.
7601             assert(verCurrentState.esStackDepth > 0);
7602             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7603         }
7604         else
7605         {
7606             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7607         }
7608     }
7609     else
7610     {
7611         impSpillSpecialSideEff();
7612
7613         if (clsFlags & CORINFO_FLG_ARRAY)
7614         {
7615             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7616         }
7617
7618         // Find the return type used for verification by interpreting the method signature.
7619         // NB: we are clobbering the already established sig.
7620         if (tiVerificationNeeded)
7621         {
7622             // Actually, we never get the sig for the original method.
7623             sig = &(callInfo->verSig);
7624         }
7625
7626         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7627         tiRetVal.NormaliseForStack();
7628
7629         // The CEE_READONLY prefix modifies the verification semantics of an Address
7630         // operation on an array type.
7631         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7632         {
7633             tiRetVal.SetIsReadonlyByRef();
7634         }
7635
7636         if (tiVerificationNeeded)
7637         {
7638             // We assume all calls return permanent home byrefs. If they
7639             // didn't they wouldn't be verifiable. This is also covering
7640             // the Address() helper for multidimensional arrays.
7641             if (tiRetVal.IsByRef())
7642             {
7643                 tiRetVal.SetIsPermanentHomeByRef();
7644             }
7645         }
7646
7647         if (call->gtOper == GT_CALL)
7648         {
7649             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7650             if (varTypeIsStruct(callRetTyp))
7651             {
7652                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7653             }
7654
7655             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7656             {
7657                 assert(opts.OptEnabled(CLFLG_INLINING));
7658
7659                 // Make the call its own tree (spill the stack if needed).
7660                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7661
7662                 // TODO: Still using the widened type.
7663                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7664             }
7665             else
7666             {
7667                 // For non-candidates we must also spill, since we
7668                 // might have locals live on the eval stack that this
7669                 // call can modify.
7670                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7671             }
7672         }
7673
7674         if (!bIntrinsicImported)
7675         {
7676             //-------------------------------------------------------------------------
7677             //
7678             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7679                 before returning.
7680                 However, we need to normalize small type values returned by unmanaged
7681                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7682                 if we use the shorter inlined pinvoke stub. */
7683
7684             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7685             {
7686                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7687             }
7688         }
7689
7690         impPushOnStack(call, tiRetVal);
7691     }
7692
7693     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7694     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7695     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7696     //  callInfoCache.uncacheCallInfo();
7697
7698     return callRetTyp;
7699 }
7700 #ifdef _PREFAST_
7701 #pragma warning(pop)
7702 #endif
7703
7704 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7705 {
7706     CorInfoType corType = methInfo->args.retType;
7707
7708     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7709     {
7710         // We have some kind of STRUCT being returned
7711
7712         structPassingKind howToReturnStruct = SPK_Unknown;
7713
7714         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7715
7716         if (howToReturnStruct == SPK_ByReference)
7717         {
7718             return true;
7719         }
7720     }
7721
7722     return false;
7723 }
7724
7725 #ifdef DEBUG
7726 //
7727 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7728 {
7729     TestLabelAndNum tlAndN;
7730     if (numArgs == 2)
7731     {
7732         tlAndN.m_num  = 0;
7733         StackEntry se = impPopStack();
7734         assert(se.seTypeInfo.GetType() == TI_INT);
7735         GenTreePtr val = se.val;
7736         assert(val->IsCnsIntOrI());
7737         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7738     }
7739     else if (numArgs == 3)
7740     {
7741         StackEntry se = impPopStack();
7742         assert(se.seTypeInfo.GetType() == TI_INT);
7743         GenTreePtr val = se.val;
7744         assert(val->IsCnsIntOrI());
7745         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7746         se           = impPopStack();
7747         assert(se.seTypeInfo.GetType() == TI_INT);
7748         val = se.val;
7749         assert(val->IsCnsIntOrI());
7750         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7751     }
7752     else
7753     {
7754         assert(false);
7755     }
7756
7757     StackEntry expSe = impPopStack();
7758     GenTreePtr node  = expSe.val;
7759
7760     // There are a small number of special cases, where we actually put the annotation on a subnode.
7761     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7762     {
7763         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7764         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7765         // offset within the the static field block whose address is returned by the helper call.
7766         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7767         GenTreePtr helperCall = nullptr;
7768         assert(node->OperGet() == GT_IND);
7769         tlAndN.m_num -= 100;
7770         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7771         GetNodeTestData()->Remove(node);
7772     }
7773     else
7774     {
7775         GetNodeTestData()->Set(node, tlAndN);
7776     }
7777
7778     impPushOnStack(node, expSe.seTypeInfo);
7779     return node->TypeGet();
7780 }
7781 #endif // DEBUG
7782
7783 //-----------------------------------------------------------------------------------
7784 //  impFixupCallStructReturn: For a call node that returns a struct type either
7785 //  adjust the return type to an enregisterable type, or set the flag to indicate
7786 //  struct return via retbuf arg.
7787 //
7788 //  Arguments:
7789 //    call       -  GT_CALL GenTree node
7790 //    retClsHnd  -  Class handle of return type of the call
7791 //
7792 //  Return Value:
7793 //    Returns new GenTree node after fixing struct return of call node
7794 //
7795 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7796 {
7797     assert(call->gtOper == GT_CALL);
7798
7799     if (!varTypeIsStruct(call))
7800     {
7801         return call;
7802     }
7803
7804     call->gtCall.gtRetClsHnd = retClsHnd;
7805
7806     GenTreeCall* callNode = call->AsCall();
7807
7808 #if FEATURE_MULTIREG_RET
7809     // Initialize Return type descriptor of call node
7810     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7811     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7812 #endif // FEATURE_MULTIREG_RET
7813
7814 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7815
7816     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7817     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7818
7819     // The return type will remain as the incoming struct type unless normalized to a
7820     // single eightbyte return type below.
7821     callNode->gtReturnType = call->gtType;
7822
7823     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7824     if (retRegCount != 0)
7825     {
7826         if (retRegCount == 1)
7827         {
7828             // struct returned in a single register
7829             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7830         }
7831         else
7832         {
7833             // must be a struct returned in two registers
7834             assert(retRegCount == 2);
7835
7836             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7837             {
7838                 // Force a call returning multi-reg struct to be always of the IR form
7839                 //   tmp = call
7840                 //
7841                 // No need to assign a multi-reg struct to a local var if:
7842                 //  - It is a tail call or
7843                 //  - The call is marked for in-lining later
7844                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7845             }
7846         }
7847     }
7848     else
7849     {
7850         // struct not returned in registers i.e returned via hiddden retbuf arg.
7851         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7852     }
7853
7854 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7855
7856 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7857     // There is no fixup necessary if the return type is a HFA struct.
7858     // HFA structs are returned in registers for ARM32 and ARM64
7859     //
7860     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7861     {
7862         if (call->gtCall.CanTailCall())
7863         {
7864             if (info.compIsVarArgs)
7865             {
7866                 // We cannot tail call because control needs to return to fixup the calling
7867                 // convention for result return.
7868                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7869             }
7870             else
7871             {
7872                 // If we can tail call returning HFA, then don't assign it to
7873                 // a variable back and forth.
7874                 return call;
7875             }
7876         }
7877
7878         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7879         {
7880             return call;
7881         }
7882
7883         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7884         if (retRegCount >= 2)
7885         {
7886             return impAssignMultiRegTypeToVar(call, retClsHnd);
7887         }
7888     }
7889 #endif // _TARGET_ARM_
7890
7891     // Check for TYP_STRUCT type that wraps a primitive type
7892     // Such structs are returned using a single register
7893     // and we change the return type on those calls here.
7894     //
7895     structPassingKind howToReturnStruct;
7896     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7897
7898     if (howToReturnStruct == SPK_ByReference)
7899     {
7900         assert(returnType == TYP_UNKNOWN);
7901         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7902     }
7903     else
7904     {
7905         assert(returnType != TYP_UNKNOWN);
7906         call->gtCall.gtReturnType = returnType;
7907
7908         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7909         if ((returnType == TYP_LONG) && (compLongUsed == false))
7910         {
7911             compLongUsed = true;
7912         }
7913         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7914         {
7915             compFloatingPointUsed = true;
7916         }
7917
7918 #if FEATURE_MULTIREG_RET
7919         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7920         assert(retRegCount != 0);
7921
7922         if (retRegCount >= 2)
7923         {
7924             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7925             {
7926                 // Force a call returning multi-reg struct to be always of the IR form
7927                 //   tmp = call
7928                 //
7929                 // No need to assign a multi-reg struct to a local var if:
7930                 //  - It is a tail call or
7931                 //  - The call is marked for in-lining later
7932                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7933             }
7934         }
7935 #endif // FEATURE_MULTIREG_RET
7936     }
7937
7938 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7939
7940     return call;
7941 }
7942
7943 /*****************************************************************************
7944    For struct return values, re-type the operand in the case where the ABI
7945    does not use a struct return buffer
7946    Note that this method is only call for !_TARGET_X86_
7947  */
7948
7949 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7950 {
7951     assert(varTypeIsStruct(info.compRetType));
7952     assert(info.compRetBuffArg == BAD_VAR_NUM);
7953
7954 #if defined(_TARGET_XARCH_)
7955
7956 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7957     // No VarArgs for CoreCLR on x64 Unix
7958     assert(!info.compIsVarArgs);
7959
7960     // Is method returning a multi-reg struct?
7961     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7962     {
7963         // In case of multi-reg struct return, we force IR to be one of the following:
7964         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7965         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7966
7967         if (op->gtOper == GT_LCL_VAR)
7968         {
7969             // Make sure that this struct stays in memory and doesn't get promoted.
7970             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7971             lvaTable[lclNum].lvIsMultiRegRet = true;
7972
7973             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7974             op->gtFlags |= GTF_DONT_CSE;
7975
7976             return op;
7977         }
7978
7979         if (op->gtOper == GT_CALL)
7980         {
7981             return op;
7982         }
7983
7984         return impAssignMultiRegTypeToVar(op, retClsHnd);
7985     }
7986 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7987     assert(info.compRetNativeType != TYP_STRUCT);
7988 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7989
7990 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7991
7992     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7993     {
7994         if (op->gtOper == GT_LCL_VAR)
7995         {
7996             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7997             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7998             // Make sure this struct type stays as struct so that we can return it as an HFA
7999             lvaTable[lclNum].lvIsMultiRegRet = true;
8000
8001             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8002             op->gtFlags |= GTF_DONT_CSE;
8003
8004             return op;
8005         }
8006
8007         if (op->gtOper == GT_CALL)
8008         {
8009             if (op->gtCall.IsVarargs())
8010             {
8011                 // We cannot tail call because control needs to return to fixup the calling
8012                 // convention for result return.
8013                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8014                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8015             }
8016             else
8017             {
8018                 return op;
8019             }
8020         }
8021         return impAssignMultiRegTypeToVar(op, retClsHnd);
8022     }
8023
8024 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8025
8026     // Is method returning a multi-reg struct?
8027     if (IsMultiRegReturnedType(retClsHnd))
8028     {
8029         if (op->gtOper == GT_LCL_VAR)
8030         {
8031             // This LCL_VAR stays as a TYP_STRUCT
8032             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8033
8034             // Make sure this struct type is not struct promoted
8035             lvaTable[lclNum].lvIsMultiRegRet = true;
8036
8037             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8038             op->gtFlags |= GTF_DONT_CSE;
8039
8040             return op;
8041         }
8042
8043         if (op->gtOper == GT_CALL)
8044         {
8045             if (op->gtCall.IsVarargs())
8046             {
8047                 // We cannot tail call because control needs to return to fixup the calling
8048                 // convention for result return.
8049                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8050                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8051             }
8052             else
8053             {
8054                 return op;
8055             }
8056         }
8057         return impAssignMultiRegTypeToVar(op, retClsHnd);
8058     }
8059
8060 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8061
8062 REDO_RETURN_NODE:
8063     // adjust the type away from struct to integral
8064     // and no normalizing
8065     if (op->gtOper == GT_LCL_VAR)
8066     {
8067         op->ChangeOper(GT_LCL_FLD);
8068     }
8069     else if (op->gtOper == GT_OBJ)
8070     {
8071         GenTreePtr op1 = op->AsObj()->Addr();
8072
8073         // We will fold away OBJ/ADDR
8074         // except for OBJ/ADDR/INDEX
8075         //     as the array type influences the array element's offset
8076         //     Later in this method we change op->gtType to info.compRetNativeType
8077         //     This is not correct when op is a GT_INDEX as the starting offset
8078         //     for the array elements 'elemOffs' is different for an array of
8079         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8080         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8081         //
8082         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8083         {
8084             // Change '*(&X)' to 'X' and see if we can do better
8085             op = op1->gtOp.gtOp1;
8086             goto REDO_RETURN_NODE;
8087         }
8088         op->gtObj.gtClass = NO_CLASS_HANDLE;
8089         op->ChangeOperUnchecked(GT_IND);
8090         op->gtFlags |= GTF_IND_TGTANYWHERE;
8091     }
8092     else if (op->gtOper == GT_CALL)
8093     {
8094         if (op->AsCall()->TreatAsHasRetBufArg(this))
8095         {
8096             // This must be one of those 'special' helpers that don't
8097             // really have a return buffer, but instead use it as a way
8098             // to keep the trees cleaner with fewer address-taken temps.
8099             //
8100             // Well now we have to materialize the the return buffer as
8101             // an address-taken temp. Then we can return the temp.
8102             //
8103             // NOTE: this code assumes that since the call directly
8104             // feeds the return, then the call must be returning the
8105             // same structure/class/type.
8106             //
8107             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8108
8109             // No need to spill anything as we're about to return.
8110             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8111
8112             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8113             // jump directly to a GT_LCL_FLD.
8114             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8115             op->ChangeOper(GT_LCL_FLD);
8116         }
8117         else
8118         {
8119             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8120
8121             // Don't change the gtType of the node just yet, it will get changed later.
8122             return op;
8123         }
8124     }
8125     else if (op->gtOper == GT_COMMA)
8126     {
8127         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8128     }
8129
8130     op->gtType = info.compRetNativeType;
8131
8132     return op;
8133 }
8134
8135 /*****************************************************************************
8136    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8137    finally-protected try. We find the finally blocks protecting the current
8138    offset (in order) by walking over the complete exception table and
8139    finding enclosing clauses. This assumes that the table is sorted.
8140    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8141
8142    If we are leaving a catch handler, we need to attach the
8143    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8144
8145    After this function, the BBJ_LEAVE block has been converted to a different type.
8146  */
8147
8148 #if !FEATURE_EH_FUNCLETS
8149
8150 void Compiler::impImportLeave(BasicBlock* block)
8151 {
8152 #ifdef DEBUG
8153     if (verbose)
8154     {
8155         printf("\nBefore import CEE_LEAVE:\n");
8156         fgDispBasicBlocks();
8157         fgDispHandlerTab();
8158     }
8159 #endif // DEBUG
8160
8161     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8162     unsigned    blkAddr         = block->bbCodeOffs;
8163     BasicBlock* leaveTarget     = block->bbJumpDest;
8164     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8165
8166     // LEAVE clears the stack, spill side effects, and set stack to 0
8167
8168     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8169     verCurrentState.esStackDepth = 0;
8170
8171     assert(block->bbJumpKind == BBJ_LEAVE);
8172     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8173
8174     BasicBlock* step         = DUMMY_INIT(NULL);
8175     unsigned    encFinallies = 0; // Number of enclosing finallies.
8176     GenTreePtr  endCatches   = NULL;
8177     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8178
8179     unsigned  XTnum;
8180     EHblkDsc* HBtab;
8181
8182     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8183     {
8184         // Grab the handler offsets
8185
8186         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8187         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8188         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8189         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8190
8191         /* Is this a catch-handler we are CEE_LEAVEing out of?
8192          * If so, we need to call CORINFO_HELP_ENDCATCH.
8193          */
8194
8195         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8196         {
8197             // Can't CEE_LEAVE out of a finally/fault handler
8198             if (HBtab->HasFinallyOrFaultHandler())
8199                 BADCODE("leave out of fault/finally block");
8200
8201             // Create the call to CORINFO_HELP_ENDCATCH
8202             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8203
8204             // Make a list of all the currently pending endCatches
8205             if (endCatches)
8206                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8207             else
8208                 endCatches = endCatch;
8209
8210 #ifdef DEBUG
8211             if (verbose)
8212             {
8213                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8214                        "CORINFO_HELP_ENDCATCH\n",
8215                        block->bbNum, XTnum);
8216             }
8217 #endif
8218         }
8219         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8220                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8221         {
8222             /* This is a finally-protected try we are jumping out of */
8223
8224             /* If there are any pending endCatches, and we have already
8225                jumped out of a finally-protected try, then the endCatches
8226                have to be put in a block in an outer try for async
8227                exceptions to work correctly.
8228                Else, just use append to the original block */
8229
8230             BasicBlock* callBlock;
8231
8232             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8233
8234             if (encFinallies == 0)
8235             {
8236                 assert(step == DUMMY_INIT(NULL));
8237                 callBlock             = block;
8238                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8239
8240                 if (endCatches)
8241                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8242
8243 #ifdef DEBUG
8244                 if (verbose)
8245                 {
8246                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8247                            "block BB%02u [%08p]\n",
8248                            callBlock->bbNum, dspPtr(callBlock));
8249                 }
8250 #endif
8251             }
8252             else
8253             {
8254                 assert(step != DUMMY_INIT(NULL));
8255
8256                 /* Calling the finally block */
8257                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8258                 assert(step->bbJumpKind == BBJ_ALWAYS);
8259                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8260                                               // finally in the chain)
8261                 step->bbJumpDest->bbRefs++;
8262
8263                 /* The new block will inherit this block's weight */
8264                 callBlock->setBBWeight(block->bbWeight);
8265                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8266
8267 #ifdef DEBUG
8268                 if (verbose)
8269                 {
8270                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8271                            "[%08p]\n",
8272                            callBlock->bbNum, dspPtr(callBlock));
8273                 }
8274 #endif
8275
8276                 GenTreePtr lastStmt;
8277
8278                 if (endCatches)
8279                 {
8280                     lastStmt         = gtNewStmt(endCatches);
8281                     endLFin->gtNext  = lastStmt;
8282                     lastStmt->gtPrev = endLFin;
8283                 }
8284                 else
8285                 {
8286                     lastStmt = endLFin;
8287                 }
8288
8289                 // note that this sets BBF_IMPORTED on the block
8290                 impEndTreeList(callBlock, endLFin, lastStmt);
8291             }
8292
8293             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8294             /* The new block will inherit this block's weight */
8295             step->setBBWeight(block->bbWeight);
8296             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8297
8298 #ifdef DEBUG
8299             if (verbose)
8300             {
8301                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8302                        "BB%02u [%08p]\n",
8303                        step->bbNum, dspPtr(step));
8304             }
8305 #endif
8306
8307             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8308             assert(finallyNesting <= compHndBBtabCount);
8309
8310             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8311             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8312             endLFin               = gtNewStmt(endLFin);
8313             endCatches            = NULL;
8314
8315             encFinallies++;
8316
8317             invalidatePreds = true;
8318         }
8319     }
8320
8321     /* Append any remaining endCatches, if any */
8322
8323     assert(!encFinallies == !endLFin);
8324
8325     if (encFinallies == 0)
8326     {
8327         assert(step == DUMMY_INIT(NULL));
8328         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8329
8330         if (endCatches)
8331             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8332
8333 #ifdef DEBUG
8334         if (verbose)
8335         {
8336             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8337                    "block BB%02u [%08p]\n",
8338                    block->bbNum, dspPtr(block));
8339         }
8340 #endif
8341     }
8342     else
8343     {
8344         // If leaveTarget is the start of another try block, we want to make sure that
8345         // we do not insert finalStep into that try block. Hence, we find the enclosing
8346         // try block.
8347         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8348
8349         // Insert a new BB either in the try region indicated by tryIndex or
8350         // the handler region indicated by leaveTarget->bbHndIndex,
8351         // depending on which is the inner region.
8352         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8353         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8354         step->bbJumpDest = finalStep;
8355
8356         /* The new block will inherit this block's weight */
8357         finalStep->setBBWeight(block->bbWeight);
8358         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8359
8360 #ifdef DEBUG
8361         if (verbose)
8362         {
8363             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8364                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8365         }
8366 #endif
8367
8368         GenTreePtr lastStmt;
8369
8370         if (endCatches)
8371         {
8372             lastStmt         = gtNewStmt(endCatches);
8373             endLFin->gtNext  = lastStmt;
8374             lastStmt->gtPrev = endLFin;
8375         }
8376         else
8377         {
8378             lastStmt = endLFin;
8379         }
8380
8381         impEndTreeList(finalStep, endLFin, lastStmt);
8382
8383         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8384
8385         // Queue up the jump target for importing
8386
8387         impImportBlockPending(leaveTarget);
8388
8389         invalidatePreds = true;
8390     }
8391
8392     if (invalidatePreds && fgComputePredsDone)
8393     {
8394         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8395         fgRemovePreds();
8396     }
8397
8398 #ifdef DEBUG
8399     fgVerifyHandlerTab();
8400
8401     if (verbose)
8402     {
8403         printf("\nAfter import CEE_LEAVE:\n");
8404         fgDispBasicBlocks();
8405         fgDispHandlerTab();
8406     }
8407 #endif // DEBUG
8408 }
8409
8410 #else // FEATURE_EH_FUNCLETS
8411
8412 void Compiler::impImportLeave(BasicBlock* block)
8413 {
8414 #ifdef DEBUG
8415     if (verbose)
8416     {
8417         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8418         fgDispBasicBlocks();
8419         fgDispHandlerTab();
8420     }
8421 #endif // DEBUG
8422
8423     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8424     unsigned    blkAddr         = block->bbCodeOffs;
8425     BasicBlock* leaveTarget     = block->bbJumpDest;
8426     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8427
8428     // LEAVE clears the stack, spill side effects, and set stack to 0
8429
8430     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8431     verCurrentState.esStackDepth = 0;
8432
8433     assert(block->bbJumpKind == BBJ_LEAVE);
8434     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8435
8436     BasicBlock* step = nullptr;
8437
8438     enum StepType
8439     {
8440         // No step type; step == NULL.
8441         ST_None,
8442
8443         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8444         // That is, is step->bbJumpDest where a finally will return to?
8445         ST_FinallyReturn,
8446
8447         // The step block is a catch return.
8448         ST_Catch,
8449
8450         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8451         ST_Try
8452     };
8453     StepType stepType = ST_None;
8454
8455     unsigned  XTnum;
8456     EHblkDsc* HBtab;
8457
8458     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8459     {
8460         // Grab the handler offsets
8461
8462         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8463         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8464         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8465         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8466
8467         /* Is this a catch-handler we are CEE_LEAVEing out of?
8468          */
8469
8470         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8471         {
8472             // Can't CEE_LEAVE out of a finally/fault handler
8473             if (HBtab->HasFinallyOrFaultHandler())
8474             {
8475                 BADCODE("leave out of fault/finally block");
8476             }
8477
8478             /* We are jumping out of a catch */
8479
8480             if (step == nullptr)
8481             {
8482                 step             = block;
8483                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8484                 stepType         = ST_Catch;
8485
8486 #ifdef DEBUG
8487                 if (verbose)
8488                 {
8489                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8490                            "block\n",
8491                            XTnum, step->bbNum);
8492                 }
8493 #endif
8494             }
8495             else
8496             {
8497                 BasicBlock* exitBlock;
8498
8499                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8500                  * scope */
8501                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8502
8503                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8504                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8505                                               // exit) returns to this block
8506                 step->bbJumpDest->bbRefs++;
8507
8508 #if defined(_TARGET_ARM_)
8509                 if (stepType == ST_FinallyReturn)
8510                 {
8511                     assert(step->bbJumpKind == BBJ_ALWAYS);
8512                     // Mark the target of a finally return
8513                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8514                 }
8515 #endif // defined(_TARGET_ARM_)
8516
8517                 /* The new block will inherit this block's weight */
8518                 exitBlock->setBBWeight(block->bbWeight);
8519                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8520
8521                 /* This exit block is the new step */
8522                 step     = exitBlock;
8523                 stepType = ST_Catch;
8524
8525                 invalidatePreds = true;
8526
8527 #ifdef DEBUG
8528                 if (verbose)
8529                 {
8530                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8531                            exitBlock->bbNum);
8532                 }
8533 #endif
8534             }
8535         }
8536         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8537                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8538         {
8539             /* We are jumping out of a finally-protected try */
8540
8541             BasicBlock* callBlock;
8542
8543             if (step == nullptr)
8544             {
8545 #if FEATURE_EH_CALLFINALLY_THUNKS
8546
8547                 // Put the call to the finally in the enclosing region.
8548                 unsigned callFinallyTryIndex =
8549                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8550                 unsigned callFinallyHndIndex =
8551                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8552                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8553
8554                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8555                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8556                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8557                 // next block, and flow optimizations will remove it.
8558                 block->bbJumpKind = BBJ_ALWAYS;
8559                 block->bbJumpDest = callBlock;
8560                 block->bbJumpDest->bbRefs++;
8561
8562                 /* The new block will inherit this block's weight */
8563                 callBlock->setBBWeight(block->bbWeight);
8564                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8565
8566 #ifdef DEBUG
8567                 if (verbose)
8568                 {
8569                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8570                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8571                            XTnum, block->bbNum, callBlock->bbNum);
8572                 }
8573 #endif
8574
8575 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8576
8577                 callBlock             = block;
8578                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8579
8580 #ifdef DEBUG
8581                 if (verbose)
8582                 {
8583                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8584                            "BBJ_CALLFINALLY block\n",
8585                            XTnum, callBlock->bbNum);
8586                 }
8587 #endif
8588
8589 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8590             }
8591             else
8592             {
8593                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8594                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8595                 // a 'finally'), or the step block is the return from a catch.
8596                 //
8597                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8598                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8599                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8600                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8601                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8602                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8603                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8604                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8605                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8606                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8607                 // stack walks.)
8608
8609                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8610
8611 #if FEATURE_EH_CALLFINALLY_THUNKS
8612                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8613                 {
8614                     // Need to create another step block in the 'try' region that will actually branch to the
8615                     // call-to-finally thunk.
8616                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8617                     step->bbJumpDest  = step2;
8618                     step->bbJumpDest->bbRefs++;
8619                     step2->setBBWeight(block->bbWeight);
8620                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8621
8622 #ifdef DEBUG
8623                     if (verbose)
8624                     {
8625                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8626                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8627                                XTnum, step->bbNum, step2->bbNum);
8628                     }
8629 #endif
8630
8631                     step = step2;
8632                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8633                 }
8634 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8635
8636 #if FEATURE_EH_CALLFINALLY_THUNKS
8637                 unsigned callFinallyTryIndex =
8638                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8639                 unsigned callFinallyHndIndex =
8640                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8641 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8642                 unsigned callFinallyTryIndex = XTnum + 1;
8643                 unsigned callFinallyHndIndex = 0; // don't care
8644 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8645
8646                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8647                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8648                                               // finally in the chain)
8649                 step->bbJumpDest->bbRefs++;
8650
8651 #if defined(_TARGET_ARM_)
8652                 if (stepType == ST_FinallyReturn)
8653                 {
8654                     assert(step->bbJumpKind == BBJ_ALWAYS);
8655                     // Mark the target of a finally return
8656                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8657                 }
8658 #endif // defined(_TARGET_ARM_)
8659
8660                 /* The new block will inherit this block's weight */
8661                 callBlock->setBBWeight(block->bbWeight);
8662                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8663
8664 #ifdef DEBUG
8665                 if (verbose)
8666                 {
8667                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8668                            "BB%02u\n",
8669                            XTnum, callBlock->bbNum);
8670                 }
8671 #endif
8672             }
8673
8674             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8675             stepType = ST_FinallyReturn;
8676
8677             /* The new block will inherit this block's weight */
8678             step->setBBWeight(block->bbWeight);
8679             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8680
8681 #ifdef DEBUG
8682             if (verbose)
8683             {
8684                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8685                        "block BB%02u\n",
8686                        XTnum, step->bbNum);
8687             }
8688 #endif
8689
8690             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8691
8692             invalidatePreds = true;
8693         }
8694         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8695                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8696         {
8697             // We are jumping out of a catch-protected try.
8698             //
8699             // If we are returning from a call to a finally, then we must have a step block within a try
8700             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8701             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8702             // and invoke the appropriate catch.
8703             //
8704             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8705             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8706             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8707             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8708             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8709             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8710             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8711             // For example:
8712             //
8713             // try {
8714             //    try {
8715             //       // something here raises ThreadAbortException
8716             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8717             //    } catch (Exception) {
8718             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8719             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8720             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8721             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8722             //       // need to do this transformation if the current EH block is a try/catch that catches
8723             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8724             //       // information, so currently we do it for all catch types.
8725             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8726             //    }
8727             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8728             // } catch (ThreadAbortException) {
8729             // }
8730             // LABEL_1:
8731             //
8732             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8733             // compiler.
8734
8735             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8736             {
8737                 BasicBlock* catchStep;
8738
8739                 assert(step);
8740
8741                 if (stepType == ST_FinallyReturn)
8742                 {
8743                     assert(step->bbJumpKind == BBJ_ALWAYS);
8744                 }
8745                 else
8746                 {
8747                     assert(stepType == ST_Catch);
8748                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8749                 }
8750
8751                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8752                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8753                 step->bbJumpDest = catchStep;
8754                 step->bbJumpDest->bbRefs++;
8755
8756 #if defined(_TARGET_ARM_)
8757                 if (stepType == ST_FinallyReturn)
8758                 {
8759                     // Mark the target of a finally return
8760                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8761                 }
8762 #endif // defined(_TARGET_ARM_)
8763
8764                 /* The new block will inherit this block's weight */
8765                 catchStep->setBBWeight(block->bbWeight);
8766                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8767
8768 #ifdef DEBUG
8769                 if (verbose)
8770                 {
8771                     if (stepType == ST_FinallyReturn)
8772                     {
8773                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8774                                "BBJ_ALWAYS block BB%02u\n",
8775                                XTnum, catchStep->bbNum);
8776                     }
8777                     else
8778                     {
8779                         assert(stepType == ST_Catch);
8780                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8781                                "BBJ_ALWAYS block BB%02u\n",
8782                                XTnum, catchStep->bbNum);
8783                     }
8784                 }
8785 #endif // DEBUG
8786
8787                 /* This block is the new step */
8788                 step     = catchStep;
8789                 stepType = ST_Try;
8790
8791                 invalidatePreds = true;
8792             }
8793         }
8794     }
8795
8796     if (step == nullptr)
8797     {
8798         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8799
8800 #ifdef DEBUG
8801         if (verbose)
8802         {
8803             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8804                    "block BB%02u to BBJ_ALWAYS\n",
8805                    block->bbNum);
8806         }
8807 #endif
8808     }
8809     else
8810     {
8811         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8812
8813 #if defined(_TARGET_ARM_)
8814         if (stepType == ST_FinallyReturn)
8815         {
8816             assert(step->bbJumpKind == BBJ_ALWAYS);
8817             // Mark the target of a finally return
8818             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8819         }
8820 #endif // defined(_TARGET_ARM_)
8821
8822 #ifdef DEBUG
8823         if (verbose)
8824         {
8825             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8826         }
8827 #endif
8828
8829         // Queue up the jump target for importing
8830
8831         impImportBlockPending(leaveTarget);
8832     }
8833
8834     if (invalidatePreds && fgComputePredsDone)
8835     {
8836         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8837         fgRemovePreds();
8838     }
8839
8840 #ifdef DEBUG
8841     fgVerifyHandlerTab();
8842
8843     if (verbose)
8844     {
8845         printf("\nAfter import CEE_LEAVE:\n");
8846         fgDispBasicBlocks();
8847         fgDispHandlerTab();
8848     }
8849 #endif // DEBUG
8850 }
8851
8852 #endif // FEATURE_EH_FUNCLETS
8853
8854 /*****************************************************************************/
8855 // This is called when reimporting a leave block. It resets the JumpKind,
8856 // JumpDest, and bbNext to the original values
8857
8858 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8859 {
8860 #if FEATURE_EH_FUNCLETS
8861     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8862     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8863     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8864     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8865     // only predecessor are also considered orphans and attempted to be deleted.
8866     //
8867     //  try  {
8868     //     ....
8869     //     try
8870     //     {
8871     //         ....
8872     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8873     //     } finally { }
8874     //  } finally { }
8875     //  OUTSIDE:
8876     //
8877     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8878     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8879     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8880     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8881     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8882     // will be treated as pair and handled correctly.
8883     if (block->bbJumpKind == BBJ_CALLFINALLY)
8884     {
8885         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8886         dupBlock->bbFlags    = block->bbFlags;
8887         dupBlock->bbJumpDest = block->bbJumpDest;
8888         dupBlock->copyEHRegion(block);
8889         dupBlock->bbCatchTyp = block->bbCatchTyp;
8890
8891         // Mark this block as
8892         //  a) not referenced by any other block to make sure that it gets deleted
8893         //  b) weight zero
8894         //  c) prevent from being imported
8895         //  d) as internal
8896         //  e) as rarely run
8897         dupBlock->bbRefs   = 0;
8898         dupBlock->bbWeight = 0;
8899         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8900
8901         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8902         // will be next to each other.
8903         fgInsertBBafter(block, dupBlock);
8904
8905 #ifdef DEBUG
8906         if (verbose)
8907         {
8908             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8909         }
8910 #endif
8911     }
8912 #endif // FEATURE_EH_FUNCLETS
8913
8914     block->bbJumpKind = BBJ_LEAVE;
8915     fgInitBBLookup();
8916     block->bbJumpDest = fgLookupBB(jmpAddr);
8917
8918     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8919     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8920     // reason we don't want to remove the block at this point is that if we call
8921     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8922     // added and the linked list length will be different than fgBBcount.
8923 }
8924
8925 /*****************************************************************************/
8926 // Get the first non-prefix opcode. Used for verification of valid combinations
8927 // of prefixes and actual opcodes.
8928
8929 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8930 {
8931     while (codeAddr < codeEndp)
8932     {
8933         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8934         codeAddr += sizeof(__int8);
8935
8936         if (opcode == CEE_PREFIX1)
8937         {
8938             if (codeAddr >= codeEndp)
8939             {
8940                 break;
8941             }
8942             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8943             codeAddr += sizeof(__int8);
8944         }
8945
8946         switch (opcode)
8947         {
8948             case CEE_UNALIGNED:
8949             case CEE_VOLATILE:
8950             case CEE_TAILCALL:
8951             case CEE_CONSTRAINED:
8952             case CEE_READONLY:
8953                 break;
8954             default:
8955                 return opcode;
8956         }
8957
8958         codeAddr += opcodeSizes[opcode];
8959     }
8960
8961     return CEE_ILLEGAL;
8962 }
8963
8964 /*****************************************************************************/
8965 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8966
8967 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8968 {
8969     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8970
8971     if (!(
8972             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8973             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8974             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8975             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8976             // volatile. prefix is allowed with the ldsfld and stsfld
8977             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8978     {
8979         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8980     }
8981 }
8982
8983 /*****************************************************************************/
8984
8985 #ifdef DEBUG
8986
8987 #undef RETURN // undef contracts RETURN macro
8988
8989 enum controlFlow_t
8990 {
8991     NEXT,
8992     CALL,
8993     RETURN,
8994     THROW,
8995     BRANCH,
8996     COND_BRANCH,
8997     BREAK,
8998     PHI,
8999     META,
9000 };
9001
9002 const static controlFlow_t controlFlow[] = {
9003 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9004 #include "opcode.def"
9005 #undef OPDEF
9006 };
9007
9008 #endif // DEBUG
9009
9010 /*****************************************************************************
9011  *  Determine the result type of an arithemetic operation
9012  *  On 64-bit inserts upcasts when native int is mixed with int32
9013  */
9014 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9015 {
9016     var_types  type = TYP_UNDEF;
9017     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9018
9019     // Arithemetic operations are generally only allowed with
9020     // primitive types, but certain operations are allowed
9021     // with byrefs
9022
9023     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9024     {
9025         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9026         {
9027             // byref1-byref2 => gives a native int
9028             type = TYP_I_IMPL;
9029         }
9030         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9031         {
9032             // [native] int - byref => gives a native int
9033
9034             //
9035             // The reason is that it is possible, in managed C++,
9036             // to have a tree like this:
9037             //
9038             //              -
9039             //             / \
9040             //            /   \
9041             //           /     \
9042             //          /       \
9043             // const(h) int     addr byref
9044             //
9045             // <BUGNUM> VSW 318822 </BUGNUM>
9046             //
9047             // So here we decide to make the resulting type to be a native int.
9048             CLANG_FORMAT_COMMENT_ANCHOR;
9049
9050 #ifdef _TARGET_64BIT_
9051             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9052             {
9053                 // insert an explicit upcast
9054                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9055             }
9056 #endif // _TARGET_64BIT_
9057
9058             type = TYP_I_IMPL;
9059         }
9060         else
9061         {
9062             // byref - [native] int => gives a byref
9063             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9064
9065 #ifdef _TARGET_64BIT_
9066             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9067             {
9068                 // insert an explicit upcast
9069                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9070             }
9071 #endif // _TARGET_64BIT_
9072
9073             type = TYP_BYREF;
9074         }
9075     }
9076     else if ((oper == GT_ADD) &&
9077              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9078     {
9079         // byref + [native] int => gives a byref
9080         // (or)
9081         // [native] int + byref => gives a byref
9082
9083         // only one can be a byref : byref op byref not allowed
9084         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9085         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9086
9087 #ifdef _TARGET_64BIT_
9088         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9089         {
9090             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9091             {
9092                 // insert an explicit upcast
9093                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9094             }
9095         }
9096         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9097         {
9098             // insert an explicit upcast
9099             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9100         }
9101 #endif // _TARGET_64BIT_
9102
9103         type = TYP_BYREF;
9104     }
9105 #ifdef _TARGET_64BIT_
9106     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9107     {
9108         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9109
9110         // int + long => gives long
9111         // long + int => gives long
9112         // we get this because in the IL the long isn't Int64, it's just IntPtr
9113
9114         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9115         {
9116             // insert an explicit upcast
9117             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9118         }
9119         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9120         {
9121             // insert an explicit upcast
9122             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9123         }
9124
9125         type = TYP_I_IMPL;
9126     }
9127 #else  // 32-bit TARGET
9128     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9129     {
9130         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9131
9132         // int + long => gives long
9133         // long + int => gives long
9134
9135         type = TYP_LONG;
9136     }
9137 #endif // _TARGET_64BIT_
9138     else
9139     {
9140         // int + int => gives an int
9141         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9142
9143         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9144                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9145
9146         type = genActualType(op1->gtType);
9147
9148 #if FEATURE_X87_DOUBLES
9149
9150         // For x87, since we only have 1 size of registers, prefer double
9151         // For everybody else, be more precise
9152         if (type == TYP_FLOAT)
9153             type = TYP_DOUBLE;
9154
9155 #else // !FEATURE_X87_DOUBLES
9156
9157         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9158         // Otherwise, turn floats into doubles
9159         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9160         {
9161             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9162             type = TYP_DOUBLE;
9163         }
9164
9165 #endif // FEATURE_X87_DOUBLES
9166     }
9167
9168 #if FEATURE_X87_DOUBLES
9169     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9170 #else  // FEATURE_X87_DOUBLES
9171     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9172 #endif // FEATURE_X87_DOUBLES
9173
9174     return type;
9175 }
9176
9177 /*****************************************************************************
9178  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9179  *
9180  * typeRef contains the token, op1 to contain the value being cast,
9181  * and op2 to contain code that creates the type handle corresponding to typeRef
9182  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9183  */
9184 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9185                                                 GenTreePtr              op2,
9186                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9187                                                 bool                    isCastClass)
9188 {
9189     bool expandInline;
9190
9191     assert(op1->TypeGet() == TYP_REF);
9192
9193     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9194
9195     if (isCastClass)
9196     {
9197         // We only want to expand inline the normal CHKCASTCLASS helper;
9198         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9199     }
9200     else
9201     {
9202         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9203         {
9204             // Get the Class Handle abd class attributes for the type we are casting to
9205             //
9206             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9207
9208             //
9209             // If the class handle is marked as final we can also expand the IsInst check inline
9210             //
9211             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9212
9213             //
9214             // But don't expand inline these two cases
9215             //
9216             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9217             {
9218                 expandInline = false;
9219             }
9220             else if (flags & CORINFO_FLG_CONTEXTFUL)
9221             {
9222                 expandInline = false;
9223             }
9224         }
9225         else
9226         {
9227             //
9228             // We can't expand inline any other helpers
9229             //
9230             expandInline = false;
9231         }
9232     }
9233
9234     if (expandInline)
9235     {
9236         if (compCurBB->isRunRarely())
9237         {
9238             expandInline = false; // not worth the code expansion in a rarely run block
9239         }
9240
9241         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9242         {
9243             expandInline = false; // not worth creating an untracked local variable
9244         }
9245     }
9246
9247     if (!expandInline)
9248     {
9249         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9250         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9251         //
9252         op2->gtFlags |= GTF_DONT_CSE;
9253
9254         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9255     }
9256
9257     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9258
9259     GenTreePtr temp;
9260     GenTreePtr condMT;
9261     //
9262     // expand the methodtable match:
9263     //
9264     //  condMT ==>   GT_NE
9265     //               /    \
9266     //           GT_IND   op2 (typically CNS_INT)
9267     //              |
9268     //           op1Copy
9269     //
9270
9271     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9272     //
9273     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9274     //
9275     // op1 is now known to be a non-complex tree
9276     // thus we can use gtClone(op1) from now on
9277     //
9278
9279     GenTreePtr op2Var = op2;
9280     if (isCastClass)
9281     {
9282         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9283         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9284     }
9285     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9286     temp->gtFlags |= GTF_EXCEPT;
9287     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9288
9289     GenTreePtr condNull;
9290     //
9291     // expand the null check:
9292     //
9293     //  condNull ==>   GT_EQ
9294     //                 /    \
9295     //             op1Copy CNS_INT
9296     //                      null
9297     //
9298     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9299
9300     //
9301     // expand the true and false trees for the condMT
9302     //
9303     GenTreePtr condFalse = gtClone(op1);
9304     GenTreePtr condTrue;
9305     if (isCastClass)
9306     {
9307         //
9308         // use the special helper that skips the cases checked by our inlined cast
9309         //
9310         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9311
9312         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9313     }
9314     else
9315     {
9316         condTrue = gtNewIconNode(0, TYP_REF);
9317     }
9318
9319 #define USE_QMARK_TREES
9320
9321 #ifdef USE_QMARK_TREES
9322     GenTreePtr qmarkMT;
9323     //
9324     // Generate first QMARK - COLON tree
9325     //
9326     //  qmarkMT ==>   GT_QMARK
9327     //                 /     \
9328     //            condMT   GT_COLON
9329     //                      /     \
9330     //                condFalse  condTrue
9331     //
9332     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9333     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9334     condMT->gtFlags |= GTF_RELOP_QMARK;
9335
9336     GenTreePtr qmarkNull;
9337     //
9338     // Generate second QMARK - COLON tree
9339     //
9340     //  qmarkNull ==>  GT_QMARK
9341     //                 /     \
9342     //           condNull  GT_COLON
9343     //                      /     \
9344     //                qmarkMT   op1Copy
9345     //
9346     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9347     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9348     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9349     condNull->gtFlags |= GTF_RELOP_QMARK;
9350
9351     // Make QMark node a top level node by spilling it.
9352     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9353     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9354     return gtNewLclvNode(tmp, TYP_REF);
9355 #endif
9356 }
9357
9358 #ifndef DEBUG
9359 #define assertImp(cond) ((void)0)
9360 #else
9361 #define assertImp(cond)                                                                                                \
9362     do                                                                                                                 \
9363     {                                                                                                                  \
9364         if (!(cond))                                                                                                   \
9365         {                                                                                                              \
9366             const int cchAssertImpBuf = 600;                                                                           \
9367             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9368             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9369                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9370                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9371                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9372             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9373         }                                                                                                              \
9374     } while (0)
9375 #endif // DEBUG
9376
9377 #ifdef _PREFAST_
9378 #pragma warning(push)
9379 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9380 #endif
9381 /*****************************************************************************
9382  *  Import the instr for the given basic block
9383  */
9384 void Compiler::impImportBlockCode(BasicBlock* block)
9385 {
9386 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9387
9388 #ifdef DEBUG
9389
9390     if (verbose)
9391     {
9392         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9393     }
9394 #endif
9395
9396     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9397     IL_OFFSET nxtStmtOffs;
9398
9399     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9400     bool                         expandInline;
9401     CorInfoHelpFunc              helper;
9402     CorInfoIsAccessAllowedResult accessAllowedResult;
9403     CORINFO_HELPER_DESC          calloutHelper;
9404     const BYTE*                  lastLoadToken = nullptr;
9405
9406     // reject cyclic constraints
9407     if (tiVerificationNeeded)
9408     {
9409         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9410         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9411     }
9412
9413     /* Get the tree list started */
9414
9415     impBeginTreeList();
9416
9417     /* Walk the opcodes that comprise the basic block */
9418
9419     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9420     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9421
9422     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9423     IL_OFFSET lastSpillOffs = opcodeOffs;
9424
9425     signed jmpDist;
9426
9427     /* remember the start of the delegate creation sequence (used for verification) */
9428     const BYTE* delegateCreateStart = nullptr;
9429
9430     int  prefixFlags = 0;
9431     bool explicitTailCall, constraintCall, readonlyCall;
9432
9433     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9434     typeInfo tiRetVal;
9435
9436     unsigned numArgs = info.compArgsCount;
9437
9438     /* Now process all the opcodes in the block */
9439
9440     var_types callTyp    = TYP_COUNT;
9441     OPCODE    prevOpcode = CEE_ILLEGAL;
9442
9443     if (block->bbCatchTyp)
9444     {
9445         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9446         {
9447             impCurStmtOffsSet(block->bbCodeOffs);
9448         }
9449
9450         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9451         // to a temp. This is a trade off for code simplicity
9452         impSpillSpecialSideEff();
9453     }
9454
9455     while (codeAddr < codeEndp)
9456     {
9457         bool                   usingReadyToRunHelper = false;
9458         CORINFO_RESOLVED_TOKEN resolvedToken;
9459         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9460         CORINFO_CALL_INFO      callInfo;
9461         CORINFO_FIELD_INFO     fieldInfo;
9462
9463         tiRetVal = typeInfo(); // Default type info
9464
9465         //---------------------------------------------------------------------
9466
9467         /* We need to restrict the max tree depth as many of the Compiler
9468            functions are recursive. We do this by spilling the stack */
9469
9470         if (verCurrentState.esStackDepth)
9471         {
9472             /* Has it been a while since we last saw a non-empty stack (which
9473                guarantees that the tree depth isnt accumulating. */
9474
9475             if ((opcodeOffs - lastSpillOffs) > 200)
9476             {
9477                 impSpillStackEnsure();
9478                 lastSpillOffs = opcodeOffs;
9479             }
9480         }
9481         else
9482         {
9483             lastSpillOffs   = opcodeOffs;
9484             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9485         }
9486
9487         /* Compute the current instr offset */
9488
9489         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9490
9491 #ifndef DEBUG
9492         if (opts.compDbgInfo)
9493 #endif
9494         {
9495             if (!compIsForInlining())
9496             {
9497                 nxtStmtOffs =
9498                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9499
9500                 /* Have we reached the next stmt boundary ? */
9501
9502                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9503                 {
9504                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9505
9506                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9507                     {
9508                         /* We need to provide accurate IP-mapping at this point.
9509                            So spill anything on the stack so that it will form
9510                            gtStmts with the correct stmt offset noted */
9511
9512                         impSpillStackEnsure(true);
9513                     }
9514
9515                     // Has impCurStmtOffs been reported in any tree?
9516
9517                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9518                     {
9519                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9520                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9521
9522                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9523                     }
9524
9525                     if (impCurStmtOffs == BAD_IL_OFFSET)
9526                     {
9527                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9528                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9529
9530                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9531                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9532                         {
9533                             nxtStmtIndex++;
9534                         }
9535
9536                         /* Go to the new stmt */
9537
9538                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9539
9540                         /* Update the stmt boundary index */
9541
9542                         nxtStmtIndex++;
9543                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9544
9545                         /* Are there any more line# entries after this one? */
9546
9547                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9548                         {
9549                             /* Remember where the next line# starts */
9550
9551                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9552                         }
9553                         else
9554                         {
9555                             /* No more line# entries */
9556
9557                             nxtStmtOffs = BAD_IL_OFFSET;
9558                         }
9559                     }
9560                 }
9561                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9562                          (verCurrentState.esStackDepth == 0))
9563                 {
9564                     /* At stack-empty locations, we have already added the tree to
9565                        the stmt list with the last offset. We just need to update
9566                        impCurStmtOffs
9567                      */
9568
9569                     impCurStmtOffsSet(opcodeOffs);
9570                 }
9571                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9572                          impOpcodeIsCallSiteBoundary(prevOpcode))
9573                 {
9574                     /* Make sure we have a type cached */
9575                     assert(callTyp != TYP_COUNT);
9576
9577                     if (callTyp == TYP_VOID)
9578                     {
9579                         impCurStmtOffsSet(opcodeOffs);
9580                     }
9581                     else if (opts.compDbgCode)
9582                     {
9583                         impSpillStackEnsure(true);
9584                         impCurStmtOffsSet(opcodeOffs);
9585                     }
9586                 }
9587                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9588                 {
9589                     if (opts.compDbgCode)
9590                     {
9591                         impSpillStackEnsure(true);
9592                     }
9593
9594                     impCurStmtOffsSet(opcodeOffs);
9595                 }
9596
9597                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9598                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9599             }
9600         }
9601
9602         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9603         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9604         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9605
9606         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9607         GenTreePtr      op1           = DUMMY_INIT(NULL);
9608         GenTreePtr      op2           = DUMMY_INIT(NULL);
9609         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9610         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9611         bool            uns           = DUMMY_INIT(false);
9612
9613         /* Get the next opcode and the size of its parameters */
9614
9615         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9616         codeAddr += sizeof(__int8);
9617
9618 #ifdef DEBUG
9619         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9620         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9621 #endif
9622
9623     DECODE_OPCODE:
9624
9625         // Return if any previous code has caused inline to fail.
9626         if (compDonotInline())
9627         {
9628             return;
9629         }
9630
9631         /* Get the size of additional parameters */
9632
9633         signed int sz = opcodeSizes[opcode];
9634
9635 #ifdef DEBUG
9636         clsHnd  = NO_CLASS_HANDLE;
9637         lclTyp  = TYP_COUNT;
9638         callTyp = TYP_COUNT;
9639
9640         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9641         impCurOpcName = opcodeNames[opcode];
9642
9643         if (verbose && (opcode != CEE_PREFIX1))
9644         {
9645             printf("%s", impCurOpcName);
9646         }
9647
9648         /* Use assertImp() to display the opcode */
9649
9650         op1 = op2 = nullptr;
9651 #endif
9652
9653         /* See what kind of an opcode we have, then */
9654
9655         unsigned mflags   = 0;
9656         unsigned clsFlags = 0;
9657
9658         switch (opcode)
9659         {
9660             unsigned  lclNum;
9661             var_types type;
9662
9663             GenTreePtr op3;
9664             genTreeOps oper;
9665             unsigned   size;
9666
9667             int val;
9668
9669             CORINFO_SIG_INFO     sig;
9670             unsigned             flags;
9671             IL_OFFSET            jmpAddr;
9672             bool                 ovfl, unordered, callNode;
9673             bool                 ldstruct;
9674             CORINFO_CLASS_HANDLE tokenType;
9675
9676             union {
9677                 int     intVal;
9678                 float   fltVal;
9679                 __int64 lngVal;
9680                 double  dblVal;
9681             } cval;
9682
9683             case CEE_PREFIX1:
9684                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9685                 codeAddr += sizeof(__int8);
9686                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9687                 goto DECODE_OPCODE;
9688
9689             SPILL_APPEND:
9690
9691                 // We need to call impSpillLclRefs() for a struct type lclVar.
9692                 // This is done for non-block assignments in the handling of stloc.
9693                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9694                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9695                 {
9696                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9697                 }
9698
9699                 /* Append 'op1' to the list of statements */
9700                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9701                 goto DONE_APPEND;
9702
9703             APPEND:
9704
9705                 /* Append 'op1' to the list of statements */
9706
9707                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9708                 goto DONE_APPEND;
9709
9710             DONE_APPEND:
9711
9712 #ifdef DEBUG
9713                 // Remember at which BC offset the tree was finished
9714                 impNoteLastILoffs();
9715 #endif
9716                 break;
9717
9718             case CEE_LDNULL:
9719                 impPushNullObjRefOnStack();
9720                 break;
9721
9722             case CEE_LDC_I4_M1:
9723             case CEE_LDC_I4_0:
9724             case CEE_LDC_I4_1:
9725             case CEE_LDC_I4_2:
9726             case CEE_LDC_I4_3:
9727             case CEE_LDC_I4_4:
9728             case CEE_LDC_I4_5:
9729             case CEE_LDC_I4_6:
9730             case CEE_LDC_I4_7:
9731             case CEE_LDC_I4_8:
9732                 cval.intVal = (opcode - CEE_LDC_I4_0);
9733                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9734                 goto PUSH_I4CON;
9735
9736             case CEE_LDC_I4_S:
9737                 cval.intVal = getI1LittleEndian(codeAddr);
9738                 goto PUSH_I4CON;
9739             case CEE_LDC_I4:
9740                 cval.intVal = getI4LittleEndian(codeAddr);
9741                 goto PUSH_I4CON;
9742             PUSH_I4CON:
9743                 JITDUMP(" %d", cval.intVal);
9744                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9745                 break;
9746
9747             case CEE_LDC_I8:
9748                 cval.lngVal = getI8LittleEndian(codeAddr);
9749                 JITDUMP(" 0x%016llx", cval.lngVal);
9750                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9751                 break;
9752
9753             case CEE_LDC_R8:
9754                 cval.dblVal = getR8LittleEndian(codeAddr);
9755                 JITDUMP(" %#.17g", cval.dblVal);
9756                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9757                 break;
9758
9759             case CEE_LDC_R4:
9760                 cval.dblVal = getR4LittleEndian(codeAddr);
9761                 JITDUMP(" %#.17g", cval.dblVal);
9762                 {
9763                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9764 #if !FEATURE_X87_DOUBLES
9765                     // X87 stack doesn't differentiate between float/double
9766                     // so R4 is treated as R8, but everybody else does
9767                     cnsOp->gtType = TYP_FLOAT;
9768 #endif // FEATURE_X87_DOUBLES
9769                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9770                 }
9771                 break;
9772
9773             case CEE_LDSTR:
9774
9775                 if (compIsForInlining())
9776                 {
9777                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9778                     {
9779                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9780                         return;
9781                     }
9782                 }
9783
9784                 val = getU4LittleEndian(codeAddr);
9785                 JITDUMP(" %08X", val);
9786                 if (tiVerificationNeeded)
9787                 {
9788                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9789                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9790                 }
9791                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9792
9793                 break;
9794
9795             case CEE_LDARG:
9796                 lclNum = getU2LittleEndian(codeAddr);
9797                 JITDUMP(" %u", lclNum);
9798                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9799                 break;
9800
9801             case CEE_LDARG_S:
9802                 lclNum = getU1LittleEndian(codeAddr);
9803                 JITDUMP(" %u", lclNum);
9804                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9805                 break;
9806
9807             case CEE_LDARG_0:
9808             case CEE_LDARG_1:
9809             case CEE_LDARG_2:
9810             case CEE_LDARG_3:
9811                 lclNum = (opcode - CEE_LDARG_0);
9812                 assert(lclNum >= 0 && lclNum < 4);
9813                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9814                 break;
9815
9816             case CEE_LDLOC:
9817                 lclNum = getU2LittleEndian(codeAddr);
9818                 JITDUMP(" %u", lclNum);
9819                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9820                 break;
9821
9822             case CEE_LDLOC_S:
9823                 lclNum = getU1LittleEndian(codeAddr);
9824                 JITDUMP(" %u", lclNum);
9825                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9826                 break;
9827
9828             case CEE_LDLOC_0:
9829             case CEE_LDLOC_1:
9830             case CEE_LDLOC_2:
9831             case CEE_LDLOC_3:
9832                 lclNum = (opcode - CEE_LDLOC_0);
9833                 assert(lclNum >= 0 && lclNum < 4);
9834                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9835                 break;
9836
9837             case CEE_STARG:
9838                 lclNum = getU2LittleEndian(codeAddr);
9839                 goto STARG;
9840
9841             case CEE_STARG_S:
9842                 lclNum = getU1LittleEndian(codeAddr);
9843             STARG:
9844                 JITDUMP(" %u", lclNum);
9845
9846                 if (tiVerificationNeeded)
9847                 {
9848                     Verify(lclNum < info.compILargsCount, "bad arg num");
9849                 }
9850
9851                 if (compIsForInlining())
9852                 {
9853                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9854                     noway_assert(op1->gtOper == GT_LCL_VAR);
9855                     lclNum = op1->AsLclVar()->gtLclNum;
9856
9857                     goto VAR_ST_VALID;
9858                 }
9859
9860                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9861                 assertImp(lclNum < numArgs);
9862
9863                 if (lclNum == info.compThisArg)
9864                 {
9865                     lclNum = lvaArg0Var;
9866                 }
9867                 lvaTable[lclNum].lvArgWrite = 1;
9868
9869                 if (tiVerificationNeeded)
9870                 {
9871                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9872                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9873                            "type mismatch");
9874
9875                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9876                     {
9877                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9878                     }
9879                 }
9880
9881                 goto VAR_ST;
9882
9883             case CEE_STLOC:
9884                 lclNum = getU2LittleEndian(codeAddr);
9885                 JITDUMP(" %u", lclNum);
9886                 goto LOC_ST;
9887
9888             case CEE_STLOC_S:
9889                 lclNum = getU1LittleEndian(codeAddr);
9890                 JITDUMP(" %u", lclNum);
9891                 goto LOC_ST;
9892
9893             case CEE_STLOC_0:
9894             case CEE_STLOC_1:
9895             case CEE_STLOC_2:
9896             case CEE_STLOC_3:
9897                 lclNum = (opcode - CEE_STLOC_0);
9898                 assert(lclNum >= 0 && lclNum < 4);
9899
9900             LOC_ST:
9901                 if (tiVerificationNeeded)
9902                 {
9903                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9904                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9905                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9906                            "type mismatch");
9907                 }
9908
9909                 if (compIsForInlining())
9910                 {
9911                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9912
9913                     /* Have we allocated a temp for this local? */
9914
9915                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9916
9917                     goto _PopValue;
9918                 }
9919
9920                 lclNum += numArgs;
9921
9922             VAR_ST:
9923
9924                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9925                 {
9926                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9927                     BADCODE("Bad IL");
9928                 }
9929
9930             VAR_ST_VALID:
9931
9932                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9933                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9934
9935                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9936                 {
9937                     lclTyp = lvaGetRealType(lclNum);
9938                 }
9939                 else
9940                 {
9941                     lclTyp = lvaGetActualType(lclNum);
9942                 }
9943
9944             _PopValue:
9945                 /* Pop the value being assigned */
9946
9947                 {
9948                     StackEntry se = impPopStack(clsHnd);
9949                     op1           = se.val;
9950                     tiRetVal      = se.seTypeInfo;
9951                 }
9952
9953 #ifdef FEATURE_SIMD
9954                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9955                 {
9956                     assert(op1->TypeGet() == TYP_STRUCT);
9957                     op1->gtType = lclTyp;
9958                 }
9959 #endif // FEATURE_SIMD
9960
9961                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9962
9963 #ifdef _TARGET_64BIT_
9964                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9965                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9966                 {
9967                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9968                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9969                 }
9970 #endif // _TARGET_64BIT_
9971
9972                 // We had better assign it a value of the correct type
9973                 assertImp(
9974                     genActualType(lclTyp) == genActualType(op1->gtType) ||
9975                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9976                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9977                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9978                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9979                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9980
9981                 /* If op1 is "&var" then its type is the transient "*" and it can
9982                    be used either as TYP_BYREF or TYP_I_IMPL */
9983
9984                 if (op1->IsVarAddr())
9985                 {
9986                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9987
9988                     /* When "&var" is created, we assume it is a byref. If it is
9989                        being assigned to a TYP_I_IMPL var, change the type to
9990                        prevent unnecessary GC info */
9991
9992                     if (genActualType(lclTyp) == TYP_I_IMPL)
9993                     {
9994                         op1->gtType = TYP_I_IMPL;
9995                     }
9996                 }
9997
9998                 /* Filter out simple assignments to itself */
9999
10000                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10001                 {
10002                     if (insertLdloc)
10003                     {
10004                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
10005                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
10006                         CLANG_FORMAT_COMMENT_ANCHOR;
10007
10008 #ifdef DEBUG
10009                         if (tiVerificationNeeded)
10010                         {
10011                             assert(
10012                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10013                         }
10014 #endif
10015
10016                         op1         = nullptr;
10017                         insertLdloc = false;
10018
10019                         impLoadVar(lclNum, opcodeOffs + sz + 1);
10020                         break;
10021                     }
10022                     else if (opts.compDbgCode)
10023                     {
10024                         op1 = gtNewNothingNode();
10025                         goto SPILL_APPEND;
10026                     }
10027                     else
10028                     {
10029                         break;
10030                     }
10031                 }
10032
10033                 /* Create the assignment node */
10034
10035                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10036
10037                 /* If the local is aliased, we need to spill calls and
10038                    indirections from the stack. */
10039
10040                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10041                     verCurrentState.esStackDepth > 0)
10042                 {
10043                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10044                 }
10045
10046                 /* Spill any refs to the local from the stack */
10047
10048                 impSpillLclRefs(lclNum);
10049
10050 #if !FEATURE_X87_DOUBLES
10051                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10052                 // We insert a cast to the dest 'op2' type
10053                 //
10054                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10055                     varTypeIsFloating(op2->gtType))
10056                 {
10057                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10058                 }
10059 #endif // !FEATURE_X87_DOUBLES
10060
10061                 if (varTypeIsStruct(lclTyp))
10062                 {
10063                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10064                 }
10065                 else
10066                 {
10067                     // The code generator generates GC tracking information
10068                     // based on the RHS of the assignment.  Later the LHS (which is
10069                     // is a BYREF) gets used and the emitter checks that that variable
10070                     // is being tracked.  It is not (since the RHS was an int and did
10071                     // not need tracking).  To keep this assert happy, we change the RHS
10072                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10073                     {
10074                         op1->gtType = TYP_BYREF;
10075                     }
10076                     op1 = gtNewAssignNode(op2, op1);
10077                 }
10078
10079                 /* If insertLdloc is true, then we need to insert a ldloc following the
10080                    stloc.  This is done when converting a (dup, stloc) sequence into
10081                    a (stloc, ldloc) sequence. */
10082
10083                 if (insertLdloc)
10084                 {
10085                     // From SPILL_APPEND
10086                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10087
10088 #ifdef DEBUG
10089                     // From DONE_APPEND
10090                     impNoteLastILoffs();
10091 #endif
10092                     op1         = nullptr;
10093                     insertLdloc = false;
10094
10095                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10096                     break;
10097                 }
10098
10099                 goto SPILL_APPEND;
10100
10101             case CEE_LDLOCA:
10102                 lclNum = getU2LittleEndian(codeAddr);
10103                 goto LDLOCA;
10104
10105             case CEE_LDLOCA_S:
10106                 lclNum = getU1LittleEndian(codeAddr);
10107             LDLOCA:
10108                 JITDUMP(" %u", lclNum);
10109                 if (tiVerificationNeeded)
10110                 {
10111                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10112                     Verify(info.compInitMem, "initLocals not set");
10113                 }
10114
10115                 if (compIsForInlining())
10116                 {
10117                     // Get the local type
10118                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10119
10120                     /* Have we allocated a temp for this local? */
10121
10122                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10123
10124                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10125
10126                     goto _PUSH_ADRVAR;
10127                 }
10128
10129                 lclNum += numArgs;
10130                 assertImp(lclNum < info.compLocalsCount);
10131                 goto ADRVAR;
10132
10133             case CEE_LDARGA:
10134                 lclNum = getU2LittleEndian(codeAddr);
10135                 goto LDARGA;
10136
10137             case CEE_LDARGA_S:
10138                 lclNum = getU1LittleEndian(codeAddr);
10139             LDARGA:
10140                 JITDUMP(" %u", lclNum);
10141                 Verify(lclNum < info.compILargsCount, "bad arg num");
10142
10143                 if (compIsForInlining())
10144                 {
10145                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10146                     // followed by a ldfld to load the field.
10147
10148                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10149                     if (op1->gtOper != GT_LCL_VAR)
10150                     {
10151                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10152                         return;
10153                     }
10154
10155                     assert(op1->gtOper == GT_LCL_VAR);
10156
10157                     goto _PUSH_ADRVAR;
10158                 }
10159
10160                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10161                 assertImp(lclNum < numArgs);
10162
10163                 if (lclNum == info.compThisArg)
10164                 {
10165                     lclNum = lvaArg0Var;
10166                 }
10167
10168                 goto ADRVAR;
10169
10170             ADRVAR:
10171
10172                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10173
10174             _PUSH_ADRVAR:
10175                 assert(op1->gtOper == GT_LCL_VAR);
10176
10177                 /* Note that this is supposed to create the transient type "*"
10178                    which may be used as a TYP_I_IMPL. However we catch places
10179                    where it is used as a TYP_I_IMPL and change the node if needed.
10180                    Thus we are pessimistic and may report byrefs in the GC info
10181                    where it was not absolutely needed, but it is safer this way.
10182                  */
10183                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10184
10185                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10186                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10187
10188                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10189                 if (tiVerificationNeeded)
10190                 {
10191                     // Don't allow taking address of uninit this ptr.
10192                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10193                     {
10194                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10195                     }
10196
10197                     if (!tiRetVal.IsByRef())
10198                     {
10199                         tiRetVal.MakeByRef();
10200                     }
10201                     else
10202                     {
10203                         Verify(false, "byref to byref");
10204                     }
10205                 }
10206
10207                 impPushOnStack(op1, tiRetVal);
10208                 break;
10209
10210             case CEE_ARGLIST:
10211
10212                 if (!info.compIsVarArgs)
10213                 {
10214                     BADCODE("arglist in non-vararg method");
10215                 }
10216
10217                 if (tiVerificationNeeded)
10218                 {
10219                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10220                 }
10221                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10222
10223                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10224                    adjusted the arg count cos this is like fetching the last param */
10225                 assertImp(0 < numArgs);
10226                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10227                 lclNum = lvaVarargsHandleArg;
10228                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10229                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10230                 impPushOnStack(op1, tiRetVal);
10231                 break;
10232
10233             case CEE_ENDFINALLY:
10234
10235                 if (compIsForInlining())
10236                 {
10237                     assert(!"Shouldn't have exception handlers in the inliner!");
10238                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10239                     return;
10240                 }
10241
10242                 if (verCurrentState.esStackDepth > 0)
10243                 {
10244                     impEvalSideEffects();
10245                 }
10246
10247                 if (info.compXcptnsCount == 0)
10248                 {
10249                     BADCODE("endfinally outside finally");
10250                 }
10251
10252                 assert(verCurrentState.esStackDepth == 0);
10253
10254                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10255                 goto APPEND;
10256
10257             case CEE_ENDFILTER:
10258
10259                 if (compIsForInlining())
10260                 {
10261                     assert(!"Shouldn't have exception handlers in the inliner!");
10262                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10263                     return;
10264                 }
10265
10266                 block->bbSetRunRarely(); // filters are rare
10267
10268                 if (info.compXcptnsCount == 0)
10269                 {
10270                     BADCODE("endfilter outside filter");
10271                 }
10272
10273                 if (tiVerificationNeeded)
10274                 {
10275                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10276                 }
10277
10278                 op1 = impPopStack().val;
10279                 assertImp(op1->gtType == TYP_INT);
10280                 if (!bbInFilterILRange(block))
10281                 {
10282                     BADCODE("EndFilter outside a filter handler");
10283                 }
10284
10285                 /* Mark current bb as end of filter */
10286
10287                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10288                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10289
10290                 /* Mark catch handler as successor */
10291
10292                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10293                 if (verCurrentState.esStackDepth != 0)
10294                 {
10295                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10296                                                 DEBUGARG(__LINE__));
10297                 }
10298                 goto APPEND;
10299
10300             case CEE_RET:
10301                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10302             RET:
10303                 if (!impReturnInstruction(block, prefixFlags, opcode))
10304                 {
10305                     return; // abort
10306                 }
10307                 else
10308                 {
10309                     break;
10310                 }
10311
10312             case CEE_JMP:
10313
10314                 assert(!compIsForInlining());
10315
10316                 if (tiVerificationNeeded)
10317                 {
10318                     Verify(false, "Invalid opcode: CEE_JMP");
10319                 }
10320
10321                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10322                 {
10323                     /* CEE_JMP does not make sense in some "protected" regions. */
10324
10325                     BADCODE("Jmp not allowed in protected region");
10326                 }
10327
10328                 if (verCurrentState.esStackDepth != 0)
10329                 {
10330                     BADCODE("Stack must be empty after CEE_JMPs");
10331                 }
10332
10333                 _impResolveToken(CORINFO_TOKENKIND_Method);
10334
10335                 JITDUMP(" %08X", resolvedToken.token);
10336
10337                 /* The signature of the target has to be identical to ours.
10338                    At least check that argCnt and returnType match */
10339
10340                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10341                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10342                     sig.retType != info.compMethodInfo->args.retType ||
10343                     sig.callConv != info.compMethodInfo->args.callConv)
10344                 {
10345                     BADCODE("Incompatible target for CEE_JMPs");
10346                 }
10347
10348 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10349
10350                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10351
10352                 /* Mark the basic block as being a JUMP instead of RETURN */
10353
10354                 block->bbFlags |= BBF_HAS_JMP;
10355
10356                 /* Set this flag to make sure register arguments have a location assigned
10357                  * even if we don't use them inside the method */
10358
10359                 compJmpOpUsed = true;
10360
10361                 fgNoStructPromotion = true;
10362
10363                 goto APPEND;
10364
10365 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10366
10367                 // Import this just like a series of LDARGs + tail. + call + ret
10368
10369                 if (info.compIsVarArgs)
10370                 {
10371                     // For now we don't implement true tail calls, so this breaks varargs.
10372                     // So warn the user instead of generating bad code.
10373                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10374                     // implement true tail calls.
10375                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10376                 }
10377
10378                 // First load up the arguments (0 - N)
10379                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10380                 {
10381                     impLoadArg(argNum, opcodeOffs + sz + 1);
10382                 }
10383
10384                 // Now generate the tail call
10385                 noway_assert(prefixFlags == 0);
10386                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10387                 opcode      = CEE_CALL;
10388
10389                 eeGetCallInfo(&resolvedToken, NULL,
10390                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10391
10392                 // All calls and delegates need a security callout.
10393                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10394
10395                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10396                                         opcodeOffs);
10397
10398                 // And finish with the ret
10399                 goto RET;
10400
10401 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10402
10403             case CEE_LDELEMA:
10404                 assertImp(sz == sizeof(unsigned));
10405
10406                 _impResolveToken(CORINFO_TOKENKIND_Class);
10407
10408                 JITDUMP(" %08X", resolvedToken.token);
10409
10410                 ldelemClsHnd = resolvedToken.hClass;
10411
10412                 if (tiVerificationNeeded)
10413                 {
10414                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10415                     typeInfo tiIndex = impStackTop().seTypeInfo;
10416
10417                     // As per ECMA 'index' specified can be either int32 or native int.
10418                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10419
10420                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10421                     Verify(tiArray.IsNullObjRef() ||
10422                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10423                            "bad array");
10424
10425                     tiRetVal = arrayElemType;
10426                     tiRetVal.MakeByRef();
10427                     if (prefixFlags & PREFIX_READONLY)
10428                     {
10429                         tiRetVal.SetIsReadonlyByRef();
10430                     }
10431
10432                     // an array interior pointer is always in the heap
10433                     tiRetVal.SetIsPermanentHomeByRef();
10434                 }
10435
10436                 // If it's a value class array we just do a simple address-of
10437                 if (eeIsValueClass(ldelemClsHnd))
10438                 {
10439                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10440                     if (cit == CORINFO_TYPE_UNDEF)
10441                     {
10442                         lclTyp = TYP_STRUCT;
10443                     }
10444                     else
10445                     {
10446                         lclTyp = JITtype2varType(cit);
10447                     }
10448                     goto ARR_LD_POST_VERIFY;
10449                 }
10450
10451                 // Similarly, if its a readonly access, we can do a simple address-of
10452                 // without doing a runtime type-check
10453                 if (prefixFlags & PREFIX_READONLY)
10454                 {
10455                     lclTyp = TYP_REF;
10456                     goto ARR_LD_POST_VERIFY;
10457                 }
10458
10459                 // Otherwise we need the full helper function with run-time type check
10460                 op1 = impTokenToHandle(&resolvedToken);
10461                 if (op1 == nullptr)
10462                 { // compDonotInline()
10463                     return;
10464                 }
10465
10466                 args = gtNewArgList(op1);                      // Type
10467                 args = gtNewListNode(impPopStack().val, args); // index
10468                 args = gtNewListNode(impPopStack().val, args); // array
10469                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10470
10471                 impPushOnStack(op1, tiRetVal);
10472                 break;
10473
10474             // ldelem for reference and value types
10475             case CEE_LDELEM:
10476                 assertImp(sz == sizeof(unsigned));
10477
10478                 _impResolveToken(CORINFO_TOKENKIND_Class);
10479
10480                 JITDUMP(" %08X", resolvedToken.token);
10481
10482                 ldelemClsHnd = resolvedToken.hClass;
10483
10484                 if (tiVerificationNeeded)
10485                 {
10486                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10487                     typeInfo tiIndex = impStackTop().seTypeInfo;
10488
10489                     // As per ECMA 'index' specified can be either int32 or native int.
10490                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10491                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10492
10493                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10494                            "type of array incompatible with type operand");
10495                     tiRetVal.NormaliseForStack();
10496                 }
10497
10498                 // If it's a reference type or generic variable type
10499                 // then just generate code as though it's a ldelem.ref instruction
10500                 if (!eeIsValueClass(ldelemClsHnd))
10501                 {
10502                     lclTyp = TYP_REF;
10503                     opcode = CEE_LDELEM_REF;
10504                 }
10505                 else
10506                 {
10507                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10508                     lclTyp             = JITtype2varType(jitTyp);
10509                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10510                     tiRetVal.NormaliseForStack();
10511                 }
10512                 goto ARR_LD_POST_VERIFY;
10513
10514             case CEE_LDELEM_I1:
10515                 lclTyp = TYP_BYTE;
10516                 goto ARR_LD;
10517             case CEE_LDELEM_I2:
10518                 lclTyp = TYP_SHORT;
10519                 goto ARR_LD;
10520             case CEE_LDELEM_I:
10521                 lclTyp = TYP_I_IMPL;
10522                 goto ARR_LD;
10523
10524             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10525             // and treating it as TYP_INT avoids other asserts.
10526             case CEE_LDELEM_U4:
10527                 lclTyp = TYP_INT;
10528                 goto ARR_LD;
10529
10530             case CEE_LDELEM_I4:
10531                 lclTyp = TYP_INT;
10532                 goto ARR_LD;
10533             case CEE_LDELEM_I8:
10534                 lclTyp = TYP_LONG;
10535                 goto ARR_LD;
10536             case CEE_LDELEM_REF:
10537                 lclTyp = TYP_REF;
10538                 goto ARR_LD;
10539             case CEE_LDELEM_R4:
10540                 lclTyp = TYP_FLOAT;
10541                 goto ARR_LD;
10542             case CEE_LDELEM_R8:
10543                 lclTyp = TYP_DOUBLE;
10544                 goto ARR_LD;
10545             case CEE_LDELEM_U1:
10546                 lclTyp = TYP_UBYTE;
10547                 goto ARR_LD;
10548             case CEE_LDELEM_U2:
10549                 lclTyp = TYP_CHAR;
10550                 goto ARR_LD;
10551
10552             ARR_LD:
10553
10554                 if (tiVerificationNeeded)
10555                 {
10556                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10557                     typeInfo tiIndex = impStackTop().seTypeInfo;
10558
10559                     // As per ECMA 'index' specified can be either int32 or native int.
10560                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10561                     if (tiArray.IsNullObjRef())
10562                     {
10563                         if (lclTyp == TYP_REF)
10564                         { // we will say a deref of a null array yields a null ref
10565                             tiRetVal = typeInfo(TI_NULL);
10566                         }
10567                         else
10568                         {
10569                             tiRetVal = typeInfo(lclTyp);
10570                         }
10571                     }
10572                     else
10573                     {
10574                         tiRetVal             = verGetArrayElemType(tiArray);
10575                         typeInfo arrayElemTi = typeInfo(lclTyp);
10576 #ifdef _TARGET_64BIT_
10577                         if (opcode == CEE_LDELEM_I)
10578                         {
10579                             arrayElemTi = typeInfo::nativeInt();
10580                         }
10581
10582                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10583                         {
10584                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10585                         }
10586                         else
10587 #endif // _TARGET_64BIT_
10588                         {
10589                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10590                         }
10591                     }
10592                     tiRetVal.NormaliseForStack();
10593                 }
10594             ARR_LD_POST_VERIFY:
10595
10596                 /* Pull the index value and array address */
10597                 op2 = impPopStack().val;
10598                 op1 = impPopStack().val;
10599                 assertImp(op1->gtType == TYP_REF);
10600
10601                 /* Check for null pointer - in the inliner case we simply abort */
10602
10603                 if (compIsForInlining())
10604                 {
10605                     if (op1->gtOper == GT_CNS_INT)
10606                     {
10607                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10608                         return;
10609                     }
10610                 }
10611
10612                 op1 = impCheckForNullPointer(op1);
10613
10614                 /* Mark the block as containing an index expression */
10615
10616                 if (op1->gtOper == GT_LCL_VAR)
10617                 {
10618                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10619                     {
10620                         block->bbFlags |= BBF_HAS_IDX_LEN;
10621                         optMethodFlags |= OMF_HAS_ARRAYREF;
10622                     }
10623                 }
10624
10625                 /* Create the index node and push it on the stack */
10626
10627                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10628
10629                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10630
10631                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10632                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10633                 {
10634                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10635
10636                     // remember the element size
10637                     if (lclTyp == TYP_REF)
10638                     {
10639                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10640                     }
10641                     else
10642                     {
10643                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10644                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10645                         {
10646                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10647                         }
10648                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10649                         if (lclTyp == TYP_STRUCT)
10650                         {
10651                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10652                             op1->gtIndex.gtIndElemSize = size;
10653                             op1->gtType                = lclTyp;
10654                         }
10655                     }
10656
10657                     if ((opcode == CEE_LDELEMA) || ldstruct)
10658                     {
10659                         // wrap it in a &
10660                         lclTyp = TYP_BYREF;
10661
10662                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10663                     }
10664                     else
10665                     {
10666                         assert(lclTyp != TYP_STRUCT);
10667                     }
10668                 }
10669
10670                 if (ldstruct)
10671                 {
10672                     // Create an OBJ for the result
10673                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10674                     op1->gtFlags |= GTF_EXCEPT;
10675                 }
10676                 impPushOnStack(op1, tiRetVal);
10677                 break;
10678
10679             // stelem for reference and value types
10680             case CEE_STELEM:
10681
10682                 assertImp(sz == sizeof(unsigned));
10683
10684                 _impResolveToken(CORINFO_TOKENKIND_Class);
10685
10686                 JITDUMP(" %08X", resolvedToken.token);
10687
10688                 stelemClsHnd = resolvedToken.hClass;
10689
10690                 if (tiVerificationNeeded)
10691                 {
10692                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10693                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10694                     typeInfo tiValue = impStackTop().seTypeInfo;
10695
10696                     // As per ECMA 'index' specified can be either int32 or native int.
10697                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10698                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10699
10700                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10701                            "type operand incompatible with array element type");
10702                     arrayElem.NormaliseForStack();
10703                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10704                 }
10705
10706                 // If it's a reference type just behave as though it's a stelem.ref instruction
10707                 if (!eeIsValueClass(stelemClsHnd))
10708                 {
10709                     goto STELEM_REF_POST_VERIFY;
10710                 }
10711
10712                 // Otherwise extract the type
10713                 {
10714                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10715                     lclTyp             = JITtype2varType(jitTyp);
10716                     goto ARR_ST_POST_VERIFY;
10717                 }
10718
10719             case CEE_STELEM_REF:
10720
10721                 if (tiVerificationNeeded)
10722                 {
10723                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10724                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10725                     typeInfo tiValue = impStackTop().seTypeInfo;
10726
10727                     // As per ECMA 'index' specified can be either int32 or native int.
10728                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10729                     Verify(tiValue.IsObjRef(), "bad value");
10730
10731                     // we only check that it is an object referece, The helper does additional checks
10732                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10733                 }
10734
10735                 arrayNodeTo      = impStackTop(2).val;
10736                 arrayNodeToIndex = impStackTop(1).val;
10737                 arrayNodeFrom    = impStackTop().val;
10738
10739                 //
10740                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10741                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10742                 //
10743
10744                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10745                 // This does not need CORINFO_HELP_ARRADDR_ST
10746
10747                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10748                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10749                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10750                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10751                 {
10752                     lclTyp = TYP_REF;
10753                     goto ARR_ST_POST_VERIFY;
10754                 }
10755
10756                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10757
10758                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10759                 {
10760                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10761
10762                     lclTyp = TYP_REF;
10763                     goto ARR_ST_POST_VERIFY;
10764                 }
10765
10766             STELEM_REF_POST_VERIFY:
10767
10768                 /* Call a helper function to do the assignment */
10769                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10770
10771                 goto SPILL_APPEND;
10772
10773             case CEE_STELEM_I1:
10774                 lclTyp = TYP_BYTE;
10775                 goto ARR_ST;
10776             case CEE_STELEM_I2:
10777                 lclTyp = TYP_SHORT;
10778                 goto ARR_ST;
10779             case CEE_STELEM_I:
10780                 lclTyp = TYP_I_IMPL;
10781                 goto ARR_ST;
10782             case CEE_STELEM_I4:
10783                 lclTyp = TYP_INT;
10784                 goto ARR_ST;
10785             case CEE_STELEM_I8:
10786                 lclTyp = TYP_LONG;
10787                 goto ARR_ST;
10788             case CEE_STELEM_R4:
10789                 lclTyp = TYP_FLOAT;
10790                 goto ARR_ST;
10791             case CEE_STELEM_R8:
10792                 lclTyp = TYP_DOUBLE;
10793                 goto ARR_ST;
10794
10795             ARR_ST:
10796
10797                 if (tiVerificationNeeded)
10798                 {
10799                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10800                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10801                     typeInfo tiValue = impStackTop().seTypeInfo;
10802
10803                     // As per ECMA 'index' specified can be either int32 or native int.
10804                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10805                     typeInfo arrayElem = typeInfo(lclTyp);
10806 #ifdef _TARGET_64BIT_
10807                     if (opcode == CEE_STELEM_I)
10808                     {
10809                         arrayElem = typeInfo::nativeInt();
10810                     }
10811 #endif // _TARGET_64BIT_
10812                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10813                            "bad array");
10814
10815                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10816                            "bad value");
10817                 }
10818
10819             ARR_ST_POST_VERIFY:
10820                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10821                    range-check, and then assignment. However, codegen currently
10822                    does the range-check before evaluation the RHS-operands. So to
10823                    maintain strict ordering, we spill the stack. */
10824
10825                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10826                 {
10827                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10828                                                    "Strict ordering of exceptions for Array store"));
10829                 }
10830
10831                 /* Pull the new value from the stack */
10832                 op2 = impPopStack().val;
10833
10834                 /* Pull the index value */
10835                 op1 = impPopStack().val;
10836
10837                 /* Pull the array address */
10838                 op3 = impPopStack().val;
10839
10840                 assertImp(op3->gtType == TYP_REF);
10841                 if (op2->IsVarAddr())
10842                 {
10843                     op2->gtType = TYP_I_IMPL;
10844                 }
10845
10846                 op3 = impCheckForNullPointer(op3);
10847
10848                 // Mark the block as containing an index expression
10849
10850                 if (op3->gtOper == GT_LCL_VAR)
10851                 {
10852                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10853                     {
10854                         block->bbFlags |= BBF_HAS_IDX_LEN;
10855                         optMethodFlags |= OMF_HAS_ARRAYREF;
10856                     }
10857                 }
10858
10859                 /* Create the index node */
10860
10861                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10862
10863                 /* Create the assignment node and append it */
10864
10865                 if (lclTyp == TYP_STRUCT)
10866                 {
10867                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10868
10869                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10870                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10871                 }
10872                 if (varTypeIsStruct(op1))
10873                 {
10874                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10875                 }
10876                 else
10877                 {
10878                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10879                     op1 = gtNewAssignNode(op1, op2);
10880                 }
10881
10882                 /* Mark the expression as containing an assignment */
10883
10884                 op1->gtFlags |= GTF_ASG;
10885
10886                 goto SPILL_APPEND;
10887
10888             case CEE_ADD:
10889                 oper = GT_ADD;
10890                 goto MATH_OP2;
10891
10892             case CEE_ADD_OVF:
10893                 uns = false;
10894                 goto ADD_OVF;
10895             case CEE_ADD_OVF_UN:
10896                 uns = true;
10897                 goto ADD_OVF;
10898
10899             ADD_OVF:
10900                 ovfl     = true;
10901                 callNode = false;
10902                 oper     = GT_ADD;
10903                 goto MATH_OP2_FLAGS;
10904
10905             case CEE_SUB:
10906                 oper = GT_SUB;
10907                 goto MATH_OP2;
10908
10909             case CEE_SUB_OVF:
10910                 uns = false;
10911                 goto SUB_OVF;
10912             case CEE_SUB_OVF_UN:
10913                 uns = true;
10914                 goto SUB_OVF;
10915
10916             SUB_OVF:
10917                 ovfl     = true;
10918                 callNode = false;
10919                 oper     = GT_SUB;
10920                 goto MATH_OP2_FLAGS;
10921
10922             case CEE_MUL:
10923                 oper = GT_MUL;
10924                 goto MATH_MAYBE_CALL_NO_OVF;
10925
10926             case CEE_MUL_OVF:
10927                 uns = false;
10928                 goto MUL_OVF;
10929             case CEE_MUL_OVF_UN:
10930                 uns = true;
10931                 goto MUL_OVF;
10932
10933             MUL_OVF:
10934                 ovfl = true;
10935                 oper = GT_MUL;
10936                 goto MATH_MAYBE_CALL_OVF;
10937
10938             // Other binary math operations
10939
10940             case CEE_DIV:
10941                 oper = GT_DIV;
10942                 goto MATH_MAYBE_CALL_NO_OVF;
10943
10944             case CEE_DIV_UN:
10945                 oper = GT_UDIV;
10946                 goto MATH_MAYBE_CALL_NO_OVF;
10947
10948             case CEE_REM:
10949                 oper = GT_MOD;
10950                 goto MATH_MAYBE_CALL_NO_OVF;
10951
10952             case CEE_REM_UN:
10953                 oper = GT_UMOD;
10954                 goto MATH_MAYBE_CALL_NO_OVF;
10955
10956             MATH_MAYBE_CALL_NO_OVF:
10957                 ovfl = false;
10958             MATH_MAYBE_CALL_OVF:
10959                 // Morpher has some complex logic about when to turn different
10960                 // typed nodes on different platforms into helper calls. We
10961                 // need to either duplicate that logic here, or just
10962                 // pessimistically make all the nodes large enough to become
10963                 // call nodes.  Since call nodes aren't that much larger and
10964                 // these opcodes are infrequent enough I chose the latter.
10965                 callNode = true;
10966                 goto MATH_OP2_FLAGS;
10967
10968             case CEE_AND:
10969                 oper = GT_AND;
10970                 goto MATH_OP2;
10971             case CEE_OR:
10972                 oper = GT_OR;
10973                 goto MATH_OP2;
10974             case CEE_XOR:
10975                 oper = GT_XOR;
10976                 goto MATH_OP2;
10977
10978             MATH_OP2: // For default values of 'ovfl' and 'callNode'
10979
10980                 ovfl     = false;
10981                 callNode = false;
10982
10983             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10984
10985                 /* Pull two values and push back the result */
10986
10987                 if (tiVerificationNeeded)
10988                 {
10989                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10990                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10991
10992                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10993                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10994                     {
10995                         Verify(tiOp1.IsNumberType(), "not number");
10996                     }
10997                     else
10998                     {
10999                         Verify(tiOp1.IsIntegerType(), "not integer");
11000                     }
11001
11002                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11003
11004                     tiRetVal = tiOp1;
11005
11006 #ifdef _TARGET_64BIT_
11007                     if (tiOp2.IsNativeIntType())
11008                     {
11009                         tiRetVal = tiOp2;
11010                     }
11011 #endif // _TARGET_64BIT_
11012                 }
11013
11014                 op2 = impPopStack().val;
11015                 op1 = impPopStack().val;
11016
11017 #if !CPU_HAS_FP_SUPPORT
11018                 if (varTypeIsFloating(op1->gtType))
11019                 {
11020                     callNode = true;
11021                 }
11022 #endif
11023                 /* Can't do arithmetic with references */
11024                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11025
11026                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11027                 // if it is in the stack)
11028                 impBashVarAddrsToI(op1, op2);
11029
11030                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11031
11032                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11033
11034                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11035
11036                 if (op2->gtOper == GT_CNS_INT)
11037                 {
11038                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11039                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11040
11041                     {
11042                         impPushOnStack(op1, tiRetVal);
11043                         break;
11044                     }
11045                 }
11046
11047 #if !FEATURE_X87_DOUBLES
11048                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11049                 //
11050                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11051                 {
11052                     if (op1->TypeGet() != type)
11053                     {
11054                         // We insert a cast of op1 to 'type'
11055                         op1 = gtNewCastNode(type, op1, type);
11056                     }
11057                     if (op2->TypeGet() != type)
11058                     {
11059                         // We insert a cast of op2 to 'type'
11060                         op2 = gtNewCastNode(type, op2, type);
11061                     }
11062                 }
11063 #endif // !FEATURE_X87_DOUBLES
11064
11065 #if SMALL_TREE_NODES
11066                 if (callNode)
11067                 {
11068                     /* These operators can later be transformed into 'GT_CALL' */
11069
11070                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11071 #ifndef _TARGET_ARM_
11072                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11073                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11074                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11075                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11076 #endif
11077                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11078                     // that we'll need to transform into a general large node, but rather specifically
11079                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11080                     // and a CALL is no longer the largest.
11081                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11082                     // than an "if".
11083                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11084                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11085                 }
11086                 else
11087 #endif // SMALL_TREE_NODES
11088                 {
11089                     op1 = gtNewOperNode(oper, type, op1, op2);
11090                 }
11091
11092                 /* Special case: integer/long division may throw an exception */
11093
11094                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11095                 {
11096                     op1->gtFlags |= GTF_EXCEPT;
11097                 }
11098
11099                 if (ovfl)
11100                 {
11101                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11102                     if (ovflType != TYP_UNKNOWN)
11103                     {
11104                         op1->gtType = ovflType;
11105                     }
11106                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11107                     if (uns)
11108                     {
11109                         op1->gtFlags |= GTF_UNSIGNED;
11110                     }
11111                 }
11112
11113                 impPushOnStack(op1, tiRetVal);
11114                 break;
11115
11116             case CEE_SHL:
11117                 oper = GT_LSH;
11118                 goto CEE_SH_OP2;
11119
11120             case CEE_SHR:
11121                 oper = GT_RSH;
11122                 goto CEE_SH_OP2;
11123             case CEE_SHR_UN:
11124                 oper = GT_RSZ;
11125                 goto CEE_SH_OP2;
11126
11127             CEE_SH_OP2:
11128                 if (tiVerificationNeeded)
11129                 {
11130                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11131                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11132                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11133                     tiRetVal = tiVal;
11134                 }
11135                 op2 = impPopStack().val;
11136                 op1 = impPopStack().val; // operand to be shifted
11137                 impBashVarAddrsToI(op1, op2);
11138
11139                 type = genActualType(op1->TypeGet());
11140                 op1  = gtNewOperNode(oper, type, op1, op2);
11141
11142                 impPushOnStack(op1, tiRetVal);
11143                 break;
11144
11145             case CEE_NOT:
11146                 if (tiVerificationNeeded)
11147                 {
11148                     tiRetVal = impStackTop().seTypeInfo;
11149                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11150                 }
11151
11152                 op1 = impPopStack().val;
11153                 impBashVarAddrsToI(op1, nullptr);
11154                 type = genActualType(op1->TypeGet());
11155                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11156                 break;
11157
11158             case CEE_CKFINITE:
11159                 if (tiVerificationNeeded)
11160                 {
11161                     tiRetVal = impStackTop().seTypeInfo;
11162                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11163                 }
11164                 op1  = impPopStack().val;
11165                 type = op1->TypeGet();
11166                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11167                 op1->gtFlags |= GTF_EXCEPT;
11168
11169                 impPushOnStack(op1, tiRetVal);
11170                 break;
11171
11172             case CEE_LEAVE:
11173
11174                 val     = getI4LittleEndian(codeAddr); // jump distance
11175                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11176                 goto LEAVE;
11177
11178             case CEE_LEAVE_S:
11179                 val     = getI1LittleEndian(codeAddr); // jump distance
11180                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11181
11182             LEAVE:
11183
11184                 if (compIsForInlining())
11185                 {
11186                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11187                     return;
11188                 }
11189
11190                 JITDUMP(" %04X", jmpAddr);
11191                 if (block->bbJumpKind != BBJ_LEAVE)
11192                 {
11193                     impResetLeaveBlock(block, jmpAddr);
11194                 }
11195
11196                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11197                 impImportLeave(block);
11198                 impNoteBranchOffs();
11199
11200                 break;
11201
11202             case CEE_BR:
11203             case CEE_BR_S:
11204                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11205
11206                 if (compIsForInlining() && jmpDist == 0)
11207                 {
11208                     break; /* NOP */
11209                 }
11210
11211                 impNoteBranchOffs();
11212                 break;
11213
11214             case CEE_BRTRUE:
11215             case CEE_BRTRUE_S:
11216             case CEE_BRFALSE:
11217             case CEE_BRFALSE_S:
11218
11219                 /* Pop the comparand (now there's a neat term) from the stack */
11220                 if (tiVerificationNeeded)
11221                 {
11222                     typeInfo& tiVal = impStackTop().seTypeInfo;
11223                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11224                            "bad value");
11225                 }
11226
11227                 op1  = impPopStack().val;
11228                 type = op1->TypeGet();
11229
11230                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11231                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11232                 {
11233                     block->bbJumpKind = BBJ_NONE;
11234
11235                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11236                     {
11237                         op1 = gtUnusedValNode(op1);
11238                         goto SPILL_APPEND;
11239                     }
11240                     else
11241                     {
11242                         break;
11243                     }
11244                 }
11245
11246                 if (op1->OperIsCompare())
11247                 {
11248                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11249                     {
11250                         // Flip the sense of the compare
11251
11252                         op1 = gtReverseCond(op1);
11253                     }
11254                 }
11255                 else
11256                 {
11257                     /* We'll compare against an equally-sized integer 0 */
11258                     /* For small types, we always compare against int   */
11259                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11260
11261                     /* Create the comparison operator and try to fold it */
11262
11263                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11264                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11265                 }
11266
11267             // fall through
11268
11269             COND_JUMP:
11270
11271                 /* Fold comparison if we can */
11272
11273                 op1 = gtFoldExpr(op1);
11274
11275                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11276                 /* Don't make any blocks unreachable in import only mode */
11277
11278                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11279                 {
11280                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11281                        unreachable under compDbgCode */
11282                     assert(!opts.compDbgCode);
11283
11284                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11285                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11286                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11287                                                                          // block for the second time
11288
11289                     block->bbJumpKind = foldedJumpKind;
11290 #ifdef DEBUG
11291                     if (verbose)
11292                     {
11293                         if (op1->gtIntCon.gtIconVal)
11294                         {
11295                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11296                                    block->bbJumpDest->bbNum);
11297                         }
11298                         else
11299                         {
11300                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11301                         }
11302                     }
11303 #endif
11304                     break;
11305                 }
11306
11307                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11308
11309                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11310                    in impImportBlock(block). For correct line numbers, spill stack. */
11311
11312                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11313                 {
11314                     impSpillStackEnsure(true);
11315                 }
11316
11317                 goto SPILL_APPEND;
11318
11319             case CEE_CEQ:
11320                 oper = GT_EQ;
11321                 uns  = false;
11322                 goto CMP_2_OPs;
11323             case CEE_CGT_UN:
11324                 oper = GT_GT;
11325                 uns  = true;
11326                 goto CMP_2_OPs;
11327             case CEE_CGT:
11328                 oper = GT_GT;
11329                 uns  = false;
11330                 goto CMP_2_OPs;
11331             case CEE_CLT_UN:
11332                 oper = GT_LT;
11333                 uns  = true;
11334                 goto CMP_2_OPs;
11335             case CEE_CLT:
11336                 oper = GT_LT;
11337                 uns  = false;
11338                 goto CMP_2_OPs;
11339
11340             CMP_2_OPs:
11341                 if (tiVerificationNeeded)
11342                 {
11343                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11344                     tiRetVal = typeInfo(TI_INT);
11345                 }
11346
11347                 op2 = impPopStack().val;
11348                 op1 = impPopStack().val;
11349
11350 #ifdef _TARGET_64BIT_
11351                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11352                 {
11353                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11354                 }
11355                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11356                 {
11357                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11358                 }
11359 #endif // _TARGET_64BIT_
11360
11361                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11362                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11363                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11364
11365                 /* Create the comparison node */
11366
11367                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11368
11369                 /* TODO: setting both flags when only one is appropriate */
11370                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11371                 {
11372                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11373                 }
11374
11375                 impPushOnStack(op1, tiRetVal);
11376                 break;
11377
11378             case CEE_BEQ_S:
11379             case CEE_BEQ:
11380                 oper = GT_EQ;
11381                 goto CMP_2_OPs_AND_BR;
11382
11383             case CEE_BGE_S:
11384             case CEE_BGE:
11385                 oper = GT_GE;
11386                 goto CMP_2_OPs_AND_BR;
11387
11388             case CEE_BGE_UN_S:
11389             case CEE_BGE_UN:
11390                 oper = GT_GE;
11391                 goto CMP_2_OPs_AND_BR_UN;
11392
11393             case CEE_BGT_S:
11394             case CEE_BGT:
11395                 oper = GT_GT;
11396                 goto CMP_2_OPs_AND_BR;
11397
11398             case CEE_BGT_UN_S:
11399             case CEE_BGT_UN:
11400                 oper = GT_GT;
11401                 goto CMP_2_OPs_AND_BR_UN;
11402
11403             case CEE_BLE_S:
11404             case CEE_BLE:
11405                 oper = GT_LE;
11406                 goto CMP_2_OPs_AND_BR;
11407
11408             case CEE_BLE_UN_S:
11409             case CEE_BLE_UN:
11410                 oper = GT_LE;
11411                 goto CMP_2_OPs_AND_BR_UN;
11412
11413             case CEE_BLT_S:
11414             case CEE_BLT:
11415                 oper = GT_LT;
11416                 goto CMP_2_OPs_AND_BR;
11417
11418             case CEE_BLT_UN_S:
11419             case CEE_BLT_UN:
11420                 oper = GT_LT;
11421                 goto CMP_2_OPs_AND_BR_UN;
11422
11423             case CEE_BNE_UN_S:
11424             case CEE_BNE_UN:
11425                 oper = GT_NE;
11426                 goto CMP_2_OPs_AND_BR_UN;
11427
11428             CMP_2_OPs_AND_BR_UN:
11429                 uns       = true;
11430                 unordered = true;
11431                 goto CMP_2_OPs_AND_BR_ALL;
11432             CMP_2_OPs_AND_BR:
11433                 uns       = false;
11434                 unordered = false;
11435                 goto CMP_2_OPs_AND_BR_ALL;
11436             CMP_2_OPs_AND_BR_ALL:
11437
11438                 if (tiVerificationNeeded)
11439                 {
11440                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11441                 }
11442
11443                 /* Pull two values */
11444                 op2 = impPopStack().val;
11445                 op1 = impPopStack().val;
11446
11447 #ifdef _TARGET_64BIT_
11448                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11449                 {
11450                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11451                 }
11452                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11453                 {
11454                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11455                 }
11456 #endif // _TARGET_64BIT_
11457
11458                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11459                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11460                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11461
11462                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11463                 {
11464                     block->bbJumpKind = BBJ_NONE;
11465
11466                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11467                     {
11468                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11469                                                        "Branch to next Optimization, op1 side effect"));
11470                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11471                     }
11472                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11473                     {
11474                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11475                                                        "Branch to next Optimization, op2 side effect"));
11476                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11477                     }
11478
11479 #ifdef DEBUG
11480                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11481                     {
11482                         impNoteLastILoffs();
11483                     }
11484 #endif
11485                     break;
11486                 }
11487 #if !FEATURE_X87_DOUBLES
11488                 // We can generate an compare of different sized floating point op1 and op2
11489                 // We insert a cast
11490                 //
11491                 if (varTypeIsFloating(op1->TypeGet()))
11492                 {
11493                     if (op1->TypeGet() != op2->TypeGet())
11494                     {
11495                         assert(varTypeIsFloating(op2->TypeGet()));
11496
11497                         // say op1=double, op2=float. To avoid loss of precision
11498                         // while comparing, op2 is converted to double and double
11499                         // comparison is done.
11500                         if (op1->TypeGet() == TYP_DOUBLE)
11501                         {
11502                             // We insert a cast of op2 to TYP_DOUBLE
11503                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11504                         }
11505                         else if (op2->TypeGet() == TYP_DOUBLE)
11506                         {
11507                             // We insert a cast of op1 to TYP_DOUBLE
11508                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11509                         }
11510                     }
11511                 }
11512 #endif // !FEATURE_X87_DOUBLES
11513
11514                 /* Create and append the operator */
11515
11516                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11517
11518                 if (uns)
11519                 {
11520                     op1->gtFlags |= GTF_UNSIGNED;
11521                 }
11522
11523                 if (unordered)
11524                 {
11525                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11526                 }
11527
11528                 goto COND_JUMP;
11529
11530             case CEE_SWITCH:
11531                 assert(!compIsForInlining());
11532
11533                 if (tiVerificationNeeded)
11534                 {
11535                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11536                 }
11537                 /* Pop the switch value off the stack */
11538                 op1 = impPopStack().val;
11539                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11540
11541 #ifdef _TARGET_64BIT_
11542                 // Widen 'op1' on 64-bit targets
11543                 if (op1->TypeGet() != TYP_I_IMPL)
11544                 {
11545                     if (op1->OperGet() == GT_CNS_INT)
11546                     {
11547                         op1->gtType = TYP_I_IMPL;
11548                     }
11549                     else
11550                     {
11551                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11552                     }
11553                 }
11554 #endif // _TARGET_64BIT_
11555                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11556
11557                 /* We can create a switch node */
11558
11559                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11560
11561                 val = (int)getU4LittleEndian(codeAddr);
11562                 codeAddr += 4 + val * 4; // skip over the switch-table
11563
11564                 goto SPILL_APPEND;
11565
11566             /************************** Casting OPCODES ***************************/
11567
11568             case CEE_CONV_OVF_I1:
11569                 lclTyp = TYP_BYTE;
11570                 goto CONV_OVF;
11571             case CEE_CONV_OVF_I2:
11572                 lclTyp = TYP_SHORT;
11573                 goto CONV_OVF;
11574             case CEE_CONV_OVF_I:
11575                 lclTyp = TYP_I_IMPL;
11576                 goto CONV_OVF;
11577             case CEE_CONV_OVF_I4:
11578                 lclTyp = TYP_INT;
11579                 goto CONV_OVF;
11580             case CEE_CONV_OVF_I8:
11581                 lclTyp = TYP_LONG;
11582                 goto CONV_OVF;
11583
11584             case CEE_CONV_OVF_U1:
11585                 lclTyp = TYP_UBYTE;
11586                 goto CONV_OVF;
11587             case CEE_CONV_OVF_U2:
11588                 lclTyp = TYP_CHAR;
11589                 goto CONV_OVF;
11590             case CEE_CONV_OVF_U:
11591                 lclTyp = TYP_U_IMPL;
11592                 goto CONV_OVF;
11593             case CEE_CONV_OVF_U4:
11594                 lclTyp = TYP_UINT;
11595                 goto CONV_OVF;
11596             case CEE_CONV_OVF_U8:
11597                 lclTyp = TYP_ULONG;
11598                 goto CONV_OVF;
11599
11600             case CEE_CONV_OVF_I1_UN:
11601                 lclTyp = TYP_BYTE;
11602                 goto CONV_OVF_UN;
11603             case CEE_CONV_OVF_I2_UN:
11604                 lclTyp = TYP_SHORT;
11605                 goto CONV_OVF_UN;
11606             case CEE_CONV_OVF_I_UN:
11607                 lclTyp = TYP_I_IMPL;
11608                 goto CONV_OVF_UN;
11609             case CEE_CONV_OVF_I4_UN:
11610                 lclTyp = TYP_INT;
11611                 goto CONV_OVF_UN;
11612             case CEE_CONV_OVF_I8_UN:
11613                 lclTyp = TYP_LONG;
11614                 goto CONV_OVF_UN;
11615
11616             case CEE_CONV_OVF_U1_UN:
11617                 lclTyp = TYP_UBYTE;
11618                 goto CONV_OVF_UN;
11619             case CEE_CONV_OVF_U2_UN:
11620                 lclTyp = TYP_CHAR;
11621                 goto CONV_OVF_UN;
11622             case CEE_CONV_OVF_U_UN:
11623                 lclTyp = TYP_U_IMPL;
11624                 goto CONV_OVF_UN;
11625             case CEE_CONV_OVF_U4_UN:
11626                 lclTyp = TYP_UINT;
11627                 goto CONV_OVF_UN;
11628             case CEE_CONV_OVF_U8_UN:
11629                 lclTyp = TYP_ULONG;
11630                 goto CONV_OVF_UN;
11631
11632             CONV_OVF_UN:
11633                 uns = true;
11634                 goto CONV_OVF_COMMON;
11635             CONV_OVF:
11636                 uns = false;
11637                 goto CONV_OVF_COMMON;
11638
11639             CONV_OVF_COMMON:
11640                 ovfl = true;
11641                 goto _CONV;
11642
11643             case CEE_CONV_I1:
11644                 lclTyp = TYP_BYTE;
11645                 goto CONV;
11646             case CEE_CONV_I2:
11647                 lclTyp = TYP_SHORT;
11648                 goto CONV;
11649             case CEE_CONV_I:
11650                 lclTyp = TYP_I_IMPL;
11651                 goto CONV;
11652             case CEE_CONV_I4:
11653                 lclTyp = TYP_INT;
11654                 goto CONV;
11655             case CEE_CONV_I8:
11656                 lclTyp = TYP_LONG;
11657                 goto CONV;
11658
11659             case CEE_CONV_U1:
11660                 lclTyp = TYP_UBYTE;
11661                 goto CONV;
11662             case CEE_CONV_U2:
11663                 lclTyp = TYP_CHAR;
11664                 goto CONV;
11665 #if (REGSIZE_BYTES == 8)
11666             case CEE_CONV_U:
11667                 lclTyp = TYP_U_IMPL;
11668                 goto CONV_UN;
11669 #else
11670             case CEE_CONV_U:
11671                 lclTyp = TYP_U_IMPL;
11672                 goto CONV;
11673 #endif
11674             case CEE_CONV_U4:
11675                 lclTyp = TYP_UINT;
11676                 goto CONV;
11677             case CEE_CONV_U8:
11678                 lclTyp = TYP_ULONG;
11679                 goto CONV_UN;
11680
11681             case CEE_CONV_R4:
11682                 lclTyp = TYP_FLOAT;
11683                 goto CONV;
11684             case CEE_CONV_R8:
11685                 lclTyp = TYP_DOUBLE;
11686                 goto CONV;
11687
11688             case CEE_CONV_R_UN:
11689                 lclTyp = TYP_DOUBLE;
11690                 goto CONV_UN;
11691
11692             CONV_UN:
11693                 uns  = true;
11694                 ovfl = false;
11695                 goto _CONV;
11696
11697             CONV:
11698                 uns  = false;
11699                 ovfl = false;
11700                 goto _CONV;
11701
11702             _CONV:
11703                 // just check that we have a number on the stack
11704                 if (tiVerificationNeeded)
11705                 {
11706                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11707                     Verify(tiVal.IsNumberType(), "bad arg");
11708
11709 #ifdef _TARGET_64BIT_
11710                     bool isNative = false;
11711
11712                     switch (opcode)
11713                     {
11714                         case CEE_CONV_OVF_I:
11715                         case CEE_CONV_OVF_I_UN:
11716                         case CEE_CONV_I:
11717                         case CEE_CONV_OVF_U:
11718                         case CEE_CONV_OVF_U_UN:
11719                         case CEE_CONV_U:
11720                             isNative = true;
11721                         default:
11722                             // leave 'isNative' = false;
11723                             break;
11724                     }
11725                     if (isNative)
11726                     {
11727                         tiRetVal = typeInfo::nativeInt();
11728                     }
11729                     else
11730 #endif // _TARGET_64BIT_
11731                     {
11732                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11733                     }
11734                 }
11735
11736                 // only converts from FLOAT or DOUBLE to an integer type
11737                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11738
11739                 if (varTypeIsFloating(lclTyp))
11740                 {
11741                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11742 #ifdef _TARGET_64BIT_
11743                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11744                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11745                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11746                                // and generate SSE2 code instead of going through helper calls.
11747                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11748 #endif
11749                         ;
11750                 }
11751                 else
11752                 {
11753                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11754                 }
11755
11756                 // At this point uns, ovf, callNode all set
11757
11758                 op1 = impPopStack().val;
11759                 impBashVarAddrsToI(op1);
11760
11761                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11762                 {
11763                     op2 = op1->gtOp.gtOp2;
11764
11765                     if (op2->gtOper == GT_CNS_INT)
11766                     {
11767                         ssize_t ival = op2->gtIntCon.gtIconVal;
11768                         ssize_t mask, umask;
11769
11770                         switch (lclTyp)
11771                         {
11772                             case TYP_BYTE:
11773                             case TYP_UBYTE:
11774                                 mask  = 0x00FF;
11775                                 umask = 0x007F;
11776                                 break;
11777                             case TYP_CHAR:
11778                             case TYP_SHORT:
11779                                 mask  = 0xFFFF;
11780                                 umask = 0x7FFF;
11781                                 break;
11782
11783                             default:
11784                                 assert(!"unexpected type");
11785                                 return;
11786                         }
11787
11788                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11789                         {
11790                             /* Toss the cast, it's a waste of time */
11791
11792                             impPushOnStack(op1, tiRetVal);
11793                             break;
11794                         }
11795                         else if (ival == mask)
11796                         {
11797                             /* Toss the masking, it's a waste of time, since
11798                                we sign-extend from the small value anyways */
11799
11800                             op1 = op1->gtOp.gtOp1;
11801                         }
11802                     }
11803                 }
11804
11805                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11806                     since the result of a cast to one of the 'small' integer
11807                     types is an integer.
11808                  */
11809
11810                 type = genActualType(lclTyp);
11811
11812 #if SMALL_TREE_NODES
11813                 if (callNode)
11814                 {
11815                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11816                 }
11817                 else
11818 #endif // SMALL_TREE_NODES
11819                 {
11820                     op1 = gtNewCastNode(type, op1, lclTyp);
11821                 }
11822
11823                 if (ovfl)
11824                 {
11825                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11826                 }
11827                 if (uns)
11828                 {
11829                     op1->gtFlags |= GTF_UNSIGNED;
11830                 }
11831                 impPushOnStack(op1, tiRetVal);
11832                 break;
11833
11834             case CEE_NEG:
11835                 if (tiVerificationNeeded)
11836                 {
11837                     tiRetVal = impStackTop().seTypeInfo;
11838                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11839                 }
11840
11841                 op1 = impPopStack().val;
11842                 impBashVarAddrsToI(op1, nullptr);
11843                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11844                 break;
11845
11846             case CEE_POP:
11847                 if (tiVerificationNeeded)
11848                 {
11849                     impStackTop(0);
11850                 }
11851
11852                 /* Pull the top value from the stack */
11853
11854                 op1 = impPopStack(clsHnd).val;
11855
11856                 /* Get hold of the type of the value being duplicated */
11857
11858                 lclTyp = genActualType(op1->gtType);
11859
11860                 /* Does the value have any side effects? */
11861
11862                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11863                 {
11864                     // Since we are throwing away the value, just normalize
11865                     // it to its address.  This is more efficient.
11866
11867                     if (varTypeIsStruct(op1))
11868                     {
11869 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11870                         // Non-calls, such as obj or ret_expr, have to go through this.
11871                         // Calls with large struct return value have to go through this.
11872                         // Helper calls with small struct return value also have to go
11873                         // through this since they do not follow Unix calling convention.
11874                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11875                             op1->AsCall()->gtCallType == CT_HELPER)
11876 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11877                         {
11878                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11879                         }
11880                     }
11881
11882                     // If op1 is non-overflow cast, throw it away since it is useless.
11883                     // Another reason for throwing away the useless cast is in the context of
11884                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11885                     // The cast gets added as part of importing GT_CALL, which gets in the way
11886                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11887                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11888                     {
11889                         op1 = op1->gtOp.gtOp1;
11890                     }
11891
11892                     // If 'op1' is an expression, create an assignment node.
11893                     // Helps analyses (like CSE) to work fine.
11894
11895                     if (op1->gtOper != GT_CALL)
11896                     {
11897                         op1 = gtUnusedValNode(op1);
11898                     }
11899
11900                     /* Append the value to the tree list */
11901                     goto SPILL_APPEND;
11902                 }
11903
11904                 /* No side effects - just throw the <BEEP> thing away */
11905                 break;
11906
11907             case CEE_DUP:
11908
11909                 if (tiVerificationNeeded)
11910                 {
11911                     // Dup could start the begining of delegate creation sequence, remember that
11912                     delegateCreateStart = codeAddr - 1;
11913                     impStackTop(0);
11914                 }
11915
11916                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11917                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11918                 //   This helps eliminate a redundant bounds check in cases such as:
11919                 //       ariba[i+3] += some_value;
11920                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11921
11922                 if (codeAddr < codeEndp)
11923                 {
11924                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11925                     if (impIsAnySTLOC(nextOpcode))
11926                     {
11927                         if (!opts.compDbgCode)
11928                         {
11929                             insertLdloc = true;
11930                             break;
11931                         }
11932                         GenTree* stackTop = impStackTop().val;
11933                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11934                         {
11935                             insertLdloc = true;
11936                             break;
11937                         }
11938                     }
11939                 }
11940
11941                 /* Pull the top value from the stack */
11942                 op1 = impPopStack(tiRetVal);
11943
11944                 /* Clone the value */
11945                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11946                                    nullptr DEBUGARG("DUP instruction"));
11947
11948                 /* Either the tree started with no global effects, or impCloneExpr
11949                    evaluated the tree to a temp and returned two copies of that
11950                    temp. Either way, neither op1 nor op2 should have side effects.
11951                 */
11952                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11953
11954                 /* Push the tree/temp back on the stack */
11955                 impPushOnStack(op1, tiRetVal);
11956
11957                 /* Push the copy on the stack */
11958                 impPushOnStack(op2, tiRetVal);
11959
11960                 break;
11961
11962             case CEE_STIND_I1:
11963                 lclTyp = TYP_BYTE;
11964                 goto STIND;
11965             case CEE_STIND_I2:
11966                 lclTyp = TYP_SHORT;
11967                 goto STIND;
11968             case CEE_STIND_I4:
11969                 lclTyp = TYP_INT;
11970                 goto STIND;
11971             case CEE_STIND_I8:
11972                 lclTyp = TYP_LONG;
11973                 goto STIND;
11974             case CEE_STIND_I:
11975                 lclTyp = TYP_I_IMPL;
11976                 goto STIND;
11977             case CEE_STIND_REF:
11978                 lclTyp = TYP_REF;
11979                 goto STIND;
11980             case CEE_STIND_R4:
11981                 lclTyp = TYP_FLOAT;
11982                 goto STIND;
11983             case CEE_STIND_R8:
11984                 lclTyp = TYP_DOUBLE;
11985                 goto STIND;
11986             STIND:
11987
11988                 if (tiVerificationNeeded)
11989                 {
11990                     typeInfo instrType(lclTyp);
11991 #ifdef _TARGET_64BIT_
11992                     if (opcode == CEE_STIND_I)
11993                     {
11994                         instrType = typeInfo::nativeInt();
11995                     }
11996 #endif // _TARGET_64BIT_
11997                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11998                 }
11999                 else
12000                 {
12001                     compUnsafeCastUsed = true; // Have to go conservative
12002                 }
12003
12004             STIND_POST_VERIFY:
12005
12006                 op2 = impPopStack().val; // value to store
12007                 op1 = impPopStack().val; // address to store to
12008
12009                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12010                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12011
12012                 impBashVarAddrsToI(op1, op2);
12013
12014                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12015
12016 #ifdef _TARGET_64BIT_
12017                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12018                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12019                 {
12020                     op2->gtType = TYP_I_IMPL;
12021                 }
12022                 else
12023                 {
12024                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12025                     //
12026                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12027                     {
12028                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12029                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12030                     }
12031                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12032                     //
12033                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12034                     {
12035                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12036                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12037                     }
12038                 }
12039 #endif // _TARGET_64BIT_
12040
12041                 if (opcode == CEE_STIND_REF)
12042                 {
12043                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12044                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12045                     lclTyp = genActualType(op2->TypeGet());
12046                 }
12047
12048 // Check target type.
12049 #ifdef DEBUG
12050                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12051                 {
12052                     if (op2->gtType == TYP_BYREF)
12053                     {
12054                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12055                     }
12056                     else if (lclTyp == TYP_BYREF)
12057                     {
12058                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12059                     }
12060                 }
12061                 else
12062                 {
12063                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12064                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12065                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12066                 }
12067 #endif
12068
12069                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12070
12071                 // stind could point anywhere, example a boxed class static int
12072                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12073
12074                 if (prefixFlags & PREFIX_VOLATILE)
12075                 {
12076                     assert(op1->OperGet() == GT_IND);
12077                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12078                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12079                     op1->gtFlags |= GTF_IND_VOLATILE;
12080                 }
12081
12082                 if (prefixFlags & PREFIX_UNALIGNED)
12083                 {
12084                     assert(op1->OperGet() == GT_IND);
12085                     op1->gtFlags |= GTF_IND_UNALIGNED;
12086                 }
12087
12088                 op1 = gtNewAssignNode(op1, op2);
12089                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12090
12091                 // Spill side-effects AND global-data-accesses
12092                 if (verCurrentState.esStackDepth > 0)
12093                 {
12094                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12095                 }
12096
12097                 goto APPEND;
12098
12099             case CEE_LDIND_I1:
12100                 lclTyp = TYP_BYTE;
12101                 goto LDIND;
12102             case CEE_LDIND_I2:
12103                 lclTyp = TYP_SHORT;
12104                 goto LDIND;
12105             case CEE_LDIND_U4:
12106             case CEE_LDIND_I4:
12107                 lclTyp = TYP_INT;
12108                 goto LDIND;
12109             case CEE_LDIND_I8:
12110                 lclTyp = TYP_LONG;
12111                 goto LDIND;
12112             case CEE_LDIND_REF:
12113                 lclTyp = TYP_REF;
12114                 goto LDIND;
12115             case CEE_LDIND_I:
12116                 lclTyp = TYP_I_IMPL;
12117                 goto LDIND;
12118             case CEE_LDIND_R4:
12119                 lclTyp = TYP_FLOAT;
12120                 goto LDIND;
12121             case CEE_LDIND_R8:
12122                 lclTyp = TYP_DOUBLE;
12123                 goto LDIND;
12124             case CEE_LDIND_U1:
12125                 lclTyp = TYP_UBYTE;
12126                 goto LDIND;
12127             case CEE_LDIND_U2:
12128                 lclTyp = TYP_CHAR;
12129                 goto LDIND;
12130             LDIND:
12131
12132                 if (tiVerificationNeeded)
12133                 {
12134                     typeInfo lclTiType(lclTyp);
12135 #ifdef _TARGET_64BIT_
12136                     if (opcode == CEE_LDIND_I)
12137                     {
12138                         lclTiType = typeInfo::nativeInt();
12139                     }
12140 #endif // _TARGET_64BIT_
12141                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12142                     tiRetVal.NormaliseForStack();
12143                 }
12144                 else
12145                 {
12146                     compUnsafeCastUsed = true; // Have to go conservative
12147                 }
12148
12149             LDIND_POST_VERIFY:
12150
12151                 op1 = impPopStack().val; // address to load from
12152                 impBashVarAddrsToI(op1);
12153
12154 #ifdef _TARGET_64BIT_
12155                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12156                 //
12157                 if (genActualType(op1->gtType) == TYP_INT)
12158                 {
12159                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12160                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12161                 }
12162 #endif
12163
12164                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12165
12166                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12167
12168                 // ldind could point anywhere, example a boxed class static int
12169                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12170
12171                 if (prefixFlags & PREFIX_VOLATILE)
12172                 {
12173                     assert(op1->OperGet() == GT_IND);
12174                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12175                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12176                     op1->gtFlags |= GTF_IND_VOLATILE;
12177                 }
12178
12179                 if (prefixFlags & PREFIX_UNALIGNED)
12180                 {
12181                     assert(op1->OperGet() == GT_IND);
12182                     op1->gtFlags |= GTF_IND_UNALIGNED;
12183                 }
12184
12185                 impPushOnStack(op1, tiRetVal);
12186
12187                 break;
12188
12189             case CEE_UNALIGNED:
12190
12191                 assert(sz == 1);
12192                 val = getU1LittleEndian(codeAddr);
12193                 ++codeAddr;
12194                 JITDUMP(" %u", val);
12195                 if ((val != 1) && (val != 2) && (val != 4))
12196                 {
12197                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12198                 }
12199
12200                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12201                 prefixFlags |= PREFIX_UNALIGNED;
12202
12203                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12204
12205             PREFIX:
12206                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12207                 codeAddr += sizeof(__int8);
12208                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12209                 goto DECODE_OPCODE;
12210
12211             case CEE_VOLATILE:
12212
12213                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12214                 prefixFlags |= PREFIX_VOLATILE;
12215
12216                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12217
12218                 assert(sz == 0);
12219                 goto PREFIX;
12220
12221             case CEE_LDFTN:
12222             {
12223                 // Need to do a lookup here so that we perform an access check
12224                 // and do a NOWAY if protections are violated
12225                 _impResolveToken(CORINFO_TOKENKIND_Method);
12226
12227                 JITDUMP(" %08X", resolvedToken.token);
12228
12229                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12230                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12231                               &callInfo);
12232
12233                 // This check really only applies to intrinsic Array.Address methods
12234                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12235                 {
12236                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12237                 }
12238
12239                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12240                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12241
12242                 if (tiVerificationNeeded)
12243                 {
12244                     // LDFTN could start the begining of delegate creation sequence, remember that
12245                     delegateCreateStart = codeAddr - 2;
12246
12247                     // check any constraints on the callee's class and type parameters
12248                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12249                                    "method has unsatisfied class constraints");
12250                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12251                                                                                 resolvedToken.hMethod),
12252                                    "method has unsatisfied method constraints");
12253
12254                     mflags = callInfo.verMethodFlags;
12255                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12256                 }
12257
12258             DO_LDFTN:
12259                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12260                 if (compDonotInline())
12261                 {
12262                     return;
12263                 }
12264
12265                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12266
12267                 break;
12268             }
12269
12270             case CEE_LDVIRTFTN:
12271             {
12272                 /* Get the method token */
12273
12274                 _impResolveToken(CORINFO_TOKENKIND_Method);
12275
12276                 JITDUMP(" %08X", resolvedToken.token);
12277
12278                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12279                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12280                                                     CORINFO_CALLINFO_CALLVIRT)),
12281                               &callInfo);
12282
12283                 // This check really only applies to intrinsic Array.Address methods
12284                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12285                 {
12286                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12287                 }
12288
12289                 mflags = callInfo.methodFlags;
12290
12291                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12292
12293                 if (compIsForInlining())
12294                 {
12295                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12296                     {
12297                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12298                         return;
12299                     }
12300                 }
12301
12302                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12303
12304                 if (tiVerificationNeeded)
12305                 {
12306
12307                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12308                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12309
12310                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12311                     typeInfo declType =
12312                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12313
12314                     typeInfo arg = impStackTop().seTypeInfo;
12315                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12316                            "bad ldvirtftn");
12317
12318                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12319                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12320                     {
12321                         instanceClassHnd = arg.GetClassHandleForObjRef();
12322                     }
12323
12324                     // check any constraints on the method's class and type parameters
12325                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12326                                    "method has unsatisfied class constraints");
12327                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12328                                                                                 resolvedToken.hMethod),
12329                                    "method has unsatisfied method constraints");
12330
12331                     if (mflags & CORINFO_FLG_PROTECTED)
12332                     {
12333                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12334                                "Accessing protected method through wrong type.");
12335                     }
12336                 }
12337
12338                 /* Get the object-ref */
12339                 op1 = impPopStack().val;
12340                 assertImp(op1->gtType == TYP_REF);
12341
12342                 if (opts.IsReadyToRun())
12343                 {
12344                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12345                     {
12346                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12347                         {
12348                             op1 = gtUnusedValNode(op1);
12349                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12350                         }
12351                         goto DO_LDFTN;
12352                     }
12353                 }
12354                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12355                 {
12356                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12357                     {
12358                         op1 = gtUnusedValNode(op1);
12359                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12360                     }
12361                     goto DO_LDFTN;
12362                 }
12363
12364                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12365                 if (compDonotInline())
12366                 {
12367                     return;
12368                 }
12369
12370                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12371
12372                 break;
12373             }
12374
12375             case CEE_CONSTRAINED:
12376
12377                 assertImp(sz == sizeof(unsigned));
12378                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12379                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12380                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12381
12382                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12383                 prefixFlags |= PREFIX_CONSTRAINED;
12384
12385                 {
12386                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12387                     if (actualOpcode != CEE_CALLVIRT)
12388                     {
12389                         BADCODE("constrained. has to be followed by callvirt");
12390                     }
12391                 }
12392
12393                 goto PREFIX;
12394
12395             case CEE_READONLY:
12396                 JITDUMP(" readonly.");
12397
12398                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12399                 prefixFlags |= PREFIX_READONLY;
12400
12401                 {
12402                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12403                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12404                     {
12405                         BADCODE("readonly. has to be followed by ldelema or call");
12406                     }
12407                 }
12408
12409                 assert(sz == 0);
12410                 goto PREFIX;
12411
12412             case CEE_TAILCALL:
12413                 JITDUMP(" tail.");
12414
12415                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12416                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12417
12418                 {
12419                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12420                     if (!impOpcodeIsCallOpcode(actualOpcode))
12421                     {
12422                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12423                     }
12424                 }
12425                 assert(sz == 0);
12426                 goto PREFIX;
12427
12428             case CEE_NEWOBJ:
12429
12430                 /* Since we will implicitly insert newObjThisPtr at the start of the
12431                    argument list, spill any GTF_ORDER_SIDEEFF */
12432                 impSpillSpecialSideEff();
12433
12434                 /* NEWOBJ does not respond to TAIL */
12435                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12436
12437                 /* NEWOBJ does not respond to CONSTRAINED */
12438                 prefixFlags &= ~PREFIX_CONSTRAINED;
12439
12440 #if COR_JIT_EE_VERSION > 460
12441                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12442 #else
12443                 _impResolveToken(CORINFO_TOKENKIND_Method);
12444 #endif
12445
12446                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12447                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12448                               &callInfo);
12449
12450                 if (compIsForInlining())
12451                 {
12452                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12453                     {
12454                         // Check to see if this call violates the boundary.
12455                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12456                         return;
12457                     }
12458                 }
12459
12460                 mflags = callInfo.methodFlags;
12461
12462                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12463                 {
12464                     BADCODE("newobj on static or abstract method");
12465                 }
12466
12467                 // Insert the security callout before any actual code is generated
12468                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12469
12470                 // There are three different cases for new
12471                 // Object size is variable (depends on arguments)
12472                 //      1) Object is an array (arrays treated specially by the EE)
12473                 //      2) Object is some other variable sized object (e.g. String)
12474                 //      3) Class Size can be determined beforehand (normal case)
12475                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12476                 // in the second case we call the constructor with a '0' this pointer
12477                 // In the third case we alloc the memory, then call the constuctor
12478
12479                 clsFlags = callInfo.classFlags;
12480                 if (clsFlags & CORINFO_FLG_ARRAY)
12481                 {
12482                     if (tiVerificationNeeded)
12483                     {
12484                         CORINFO_CLASS_HANDLE elemTypeHnd;
12485                         INDEBUG(CorInfoType corType =)
12486                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12487                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12488                         Verify(elemTypeHnd == nullptr ||
12489                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12490                                "newarr of byref-like objects");
12491                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12492                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12493                                       &callInfo DEBUGARG(info.compFullName));
12494                     }
12495                     // Arrays need to call the NEWOBJ helper.
12496                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12497
12498                     impImportNewObjArray(&resolvedToken, &callInfo);
12499                     if (compDonotInline())
12500                     {
12501                         return;
12502                     }
12503
12504                     callTyp = TYP_REF;
12505                     break;
12506                 }
12507                 // At present this can only be String
12508                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12509                 {
12510                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12511                     {
12512                         // The dummy argument does not exist in CoreRT
12513                         newObjThisPtr = nullptr;
12514                     }
12515                     else
12516                     {
12517                         // This is the case for variable-sized objects that are not
12518                         // arrays.  In this case, call the constructor with a null 'this'
12519                         // pointer
12520                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12521                     }
12522
12523                     /* Remember that this basic block contains 'new' of an object */
12524                     block->bbFlags |= BBF_HAS_NEWOBJ;
12525                     optMethodFlags |= OMF_HAS_NEWOBJ;
12526                 }
12527                 else
12528                 {
12529                     // This is the normal case where the size of the object is
12530                     // fixed.  Allocate the memory and call the constructor.
12531
12532                     // Note: We cannot add a peep to avoid use of temp here
12533                     // becase we don't have enough interference info to detect when
12534                     // sources and destination interfere, example: s = new S(ref);
12535
12536                     // TODO: We find the correct place to introduce a general
12537                     // reverse copy prop for struct return values from newobj or
12538                     // any function returning structs.
12539
12540                     /* get a temporary for the new object */
12541                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12542
12543                     // In the value class case we only need clsHnd for size calcs.
12544                     //
12545                     // The lookup of the code pointer will be handled by CALL in this case
12546                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12547                     {
12548                         if (compIsForInlining())
12549                         {
12550                             // If value class has GC fields, inform the inliner. It may choose to
12551                             // bail out on the inline.
12552                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12553                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12554                             {
12555                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12556                                 if (compInlineResult->IsFailure())
12557                                 {
12558                                     return;
12559                                 }
12560
12561                                 // Do further notification in the case where the call site is rare;
12562                                 // some policies do not track the relative hotness of call sites for
12563                                 // "always" inline cases.
12564                                 if (impInlineInfo->iciBlock->isRunRarely())
12565                                 {
12566                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12567                                     if (compInlineResult->IsFailure())
12568                                     {
12569                                         return;
12570                                     }
12571                                 }
12572                             }
12573                         }
12574
12575                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12576                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12577
12578                         if (impIsPrimitive(jitTyp))
12579                         {
12580                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12581                         }
12582                         else
12583                         {
12584                             // The local variable itself is the allocated space.
12585                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12586                             // and potentially exploitable.
12587                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12588                         }
12589
12590                         // Append a tree to zero-out the temp
12591                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12592
12593                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12594                                                        gtNewIconNode(0), // Value
12595                                                        size,             // Size
12596                                                        false,            // isVolatile
12597                                                        false);           // not copyBlock
12598                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12599
12600                         // Obtain the address of the temp
12601                         newObjThisPtr =
12602                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12603                     }
12604                     else
12605                     {
12606 #ifdef FEATURE_READYTORUN_COMPILER
12607                         if (opts.IsReadyToRun())
12608                         {
12609                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12610                             usingReadyToRunHelper = (op1 != nullptr);
12611                         }
12612
12613                         if (!usingReadyToRunHelper)
12614 #endif
12615                         {
12616                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12617                             if (op1 == nullptr)
12618                             { // compDonotInline()
12619                                 return;
12620                             }
12621
12622                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12623                             // and the newfast call with a single call to a dynamic R2R cell that will:
12624                             //      1) Load the context
12625                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12626                             //      stub
12627                             //      3) Allocate and return the new object
12628                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12629
12630                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12631                                                     resolvedToken.hClass, TYP_REF, op1);
12632                         }
12633
12634                         // Remember that this basic block contains 'new' of an object
12635                         block->bbFlags |= BBF_HAS_NEWOBJ;
12636                         optMethodFlags |= OMF_HAS_NEWOBJ;
12637
12638                         // Append the assignment to the temp/local. Dont need to spill
12639                         // at all as we are just calling an EE-Jit helper which can only
12640                         // cause an (async) OutOfMemoryException.
12641
12642                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12643                         // to a temp. Note that the pattern "temp = allocObj" is required
12644                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12645                         // without exhaustive walk over all expressions.
12646
12647                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12648
12649                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12650                     }
12651                 }
12652                 goto CALL;
12653
12654             case CEE_CALLI:
12655
12656                 /* CALLI does not respond to CONSTRAINED */
12657                 prefixFlags &= ~PREFIX_CONSTRAINED;
12658
12659                 if (compIsForInlining())
12660                 {
12661                     // CALLI doesn't have a method handle, so assume the worst.
12662                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12663                     {
12664                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12665                         return;
12666                     }
12667                 }
12668
12669             // fall through
12670
12671             case CEE_CALLVIRT:
12672             case CEE_CALL:
12673
12674                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12675                 // many other places.  We unfortunately embed that knowledge here.
12676                 if (opcode != CEE_CALLI)
12677                 {
12678                     _impResolveToken(CORINFO_TOKENKIND_Method);
12679
12680                     eeGetCallInfo(&resolvedToken,
12681                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12682                                   // this is how impImportCall invokes getCallInfo
12683                                   addVerifyFlag(
12684                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12685                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12686                                                                        : CORINFO_CALLINFO_NONE)),
12687                                   &callInfo);
12688                 }
12689                 else
12690                 {
12691                     // Suppress uninitialized use warning.
12692                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12693                     memset(&callInfo, 0, sizeof(callInfo));
12694
12695                     resolvedToken.token = getU4LittleEndian(codeAddr);
12696                 }
12697
12698             CALL: // memberRef should be set.
12699                 // newObjThisPtr should be set for CEE_NEWOBJ
12700
12701                 JITDUMP(" %08X", resolvedToken.token);
12702                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12703
12704                 bool newBBcreatedForTailcallStress;
12705
12706                 newBBcreatedForTailcallStress = false;
12707
12708                 if (compIsForInlining())
12709                 {
12710                     if (compDonotInline())
12711                     {
12712                         return;
12713                     }
12714                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12715                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12716                 }
12717                 else
12718                 {
12719                     if (compTailCallStress())
12720                     {
12721                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12722                         // Tail call stress only recognizes call+ret patterns and forces them to be
12723                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12724                         // doesn't import 'ret' opcode following the call into the basic block containing
12725                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12726                         // is already checking that there is an opcode following call and hence it is
12727                         // safe here to read next opcode without bounds check.
12728                         newBBcreatedForTailcallStress =
12729                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12730                                                              // make it jump to RET.
12731                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12732
12733                         if (newBBcreatedForTailcallStress &&
12734                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12735                             verCheckTailCallConstraint(opcode, &resolvedToken,
12736                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12737                                                        true) // Is it legal to do talcall?
12738                             )
12739                         {
12740                             // Stress the tailcall.
12741                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12742                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12743                         }
12744                     }
12745
12746                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12747                     // hence will not be considered for implicit tail calling.
12748                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12749                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12750                     {
12751                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12752                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12753                     }
12754                 }
12755
12756                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12757                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12758                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12759
12760                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12761                 {
12762                     // All calls and delegates need a security callout.
12763                     // For delegates, this is the call to the delegate constructor, not the access check on the
12764                     // LD(virt)FTN.
12765                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12766
12767 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12768      
12769                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12770                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12771                 // ldtoken <filed token>, and we now check accessibility
12772                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12773                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12774                 {
12775                     if (prevOpcode != CEE_LDTOKEN)
12776                     {
12777                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12778                     }
12779                     else
12780                     {
12781                         assert(lastLoadToken != NULL);
12782                         // Now that we know we have a token, verify that it is accessible for loading
12783                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12784                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12785                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12786                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12787                     }
12788                 }
12789
12790 #endif // DevDiv 410397
12791                 }
12792
12793                 if (tiVerificationNeeded)
12794                 {
12795                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12796                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12797                                   &callInfo DEBUGARG(info.compFullName));
12798                 }
12799
12800                 // Insert delegate callout here.
12801                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12802                 {
12803 #ifdef DEBUG
12804                     // We should do this only if verification is enabled
12805                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12806                     if (tiVerificationNeeded)
12807                     {
12808                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12809                         // We should get here only for well formed delegate creation.
12810                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12811                     }
12812 #endif
12813
12814 #ifdef FEATURE_CORECLR
12815                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12816                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12817                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12818
12819                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12820 #endif // FEATURE_CORECLR
12821                 }
12822
12823                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12824                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12825                 if (compDonotInline())
12826                 {
12827                     return;
12828                 }
12829
12830                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12831                                                                        // have created a new BB after the "call"
12832                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12833                 {
12834                     assert(!compIsForInlining());
12835                     goto RET;
12836                 }
12837
12838                 break;
12839
12840             case CEE_LDFLD:
12841             case CEE_LDSFLD:
12842             case CEE_LDFLDA:
12843             case CEE_LDSFLDA:
12844             {
12845
12846                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12847                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12848
12849                 /* Get the CP_Fieldref index */
12850                 assertImp(sz == sizeof(unsigned));
12851
12852                 _impResolveToken(CORINFO_TOKENKIND_Field);
12853
12854                 JITDUMP(" %08X", resolvedToken.token);
12855
12856                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12857
12858                 GenTreePtr           obj     = nullptr;
12859                 typeInfo*            tiObj   = nullptr;
12860                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12861
12862                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12863                 {
12864                     tiObj = &impStackTop().seTypeInfo;
12865                     obj   = impPopStack(objType).val;
12866
12867                     if (impIsThis(obj))
12868                     {
12869                         aflags |= CORINFO_ACCESS_THIS;
12870
12871                         // An optimization for Contextful classes:
12872                         // we unwrap the proxy when we have a 'this reference'
12873
12874                         if (info.compUnwrapContextful)
12875                         {
12876                             aflags |= CORINFO_ACCESS_UNWRAP;
12877                         }
12878                     }
12879                 }
12880
12881                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12882
12883                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12884                 // handle
12885                 CorInfoType ciType = fieldInfo.fieldType;
12886                 clsHnd             = fieldInfo.structType;
12887
12888                 lclTyp = JITtype2varType(ciType);
12889
12890 #ifdef _TARGET_AMD64
12891                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12892 #endif // _TARGET_AMD64
12893
12894                 if (compIsForInlining())
12895                 {
12896                     switch (fieldInfo.fieldAccessor)
12897                     {
12898                         case CORINFO_FIELD_INSTANCE_HELPER:
12899                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12900                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12901                         case CORINFO_FIELD_STATIC_TLS:
12902
12903                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12904                             return;
12905
12906                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12907 #if COR_JIT_EE_VERSION > 460
12908                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12909 #endif
12910                             /* We may be able to inline the field accessors in specific instantiations of generic
12911                              * methods */
12912                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12913                             return;
12914
12915                         default:
12916                             break;
12917                     }
12918
12919                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12920                         clsHnd)
12921                     {
12922                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12923                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12924                         {
12925                             // Loading a static valuetype field usually will cause a JitHelper to be called
12926                             // for the static base. This will bloat the code.
12927                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12928
12929                             if (compInlineResult->IsFailure())
12930                             {
12931                                 return;
12932                             }
12933                         }
12934                     }
12935                 }
12936
12937                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12938                 if (isLoadAddress)
12939                 {
12940                     tiRetVal.MakeByRef();
12941                 }
12942                 else
12943                 {
12944                     tiRetVal.NormaliseForStack();
12945                 }
12946
12947                 // Perform this check always to ensure that we get field access exceptions even with
12948                 // SkipVerification.
12949                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12950
12951                 if (tiVerificationNeeded)
12952                 {
12953                     // You can also pass the unboxed struct to  LDFLD
12954                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12955                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12956                     {
12957                         bAllowPlainValueTypeAsThis = TRUE;
12958                     }
12959
12960                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12961
12962                     // If we're doing this on a heap object or from a 'safe' byref
12963                     // then the result is a safe byref too
12964                     if (isLoadAddress) // load address
12965                     {
12966                         if (fieldInfo.fieldFlags &
12967                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12968                         {
12969                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12970                             {
12971                                 tiRetVal.SetIsPermanentHomeByRef();
12972                             }
12973                         }
12974                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12975                         {
12976                             // ldflda of byref is safe if done on a gc object or on  a
12977                             // safe byref
12978                             tiRetVal.SetIsPermanentHomeByRef();
12979                         }
12980                     }
12981                 }
12982                 else
12983                 {
12984                     // tiVerificationNeeded is false.
12985                     // Raise InvalidProgramException if static load accesses non-static field
12986                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12987                     {
12988                         BADCODE("static access on an instance field");
12989                     }
12990                 }
12991
12992                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12993                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12994                 {
12995                     if (obj->gtFlags & GTF_SIDE_EFFECT)
12996                     {
12997                         obj = gtUnusedValNode(obj);
12998                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12999                     }
13000                     obj = nullptr;
13001                 }
13002
13003                 /* Preserve 'small' int types */
13004                 if (lclTyp > TYP_INT)
13005                 {
13006                     lclTyp = genActualType(lclTyp);
13007                 }
13008
13009                 bool usesHelper = false;
13010
13011                 switch (fieldInfo.fieldAccessor)
13012                 {
13013                     case CORINFO_FIELD_INSTANCE:
13014 #ifdef FEATURE_READYTORUN_COMPILER
13015                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13016 #endif
13017                     {
13018                         bool nullcheckNeeded = false;
13019
13020                         obj = impCheckForNullPointer(obj);
13021
13022                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13023                         {
13024                             nullcheckNeeded = true;
13025                         }
13026
13027                         // If the object is a struct, what we really want is
13028                         // for the field to operate on the address of the struct.
13029                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13030                         {
13031                             assert(opcode == CEE_LDFLD && objType != nullptr);
13032
13033                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13034                         }
13035
13036                         /* Create the data member node */
13037                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13038
13039 #ifdef FEATURE_READYTORUN_COMPILER
13040                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13041                         {
13042                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13043                         }
13044 #endif
13045
13046                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13047
13048                         if (fgAddrCouldBeNull(obj))
13049                         {
13050                             op1->gtFlags |= GTF_EXCEPT;
13051                         }
13052
13053                         // If gtFldObj is a BYREF then our target is a value class and
13054                         // it could point anywhere, example a boxed class static int
13055                         if (obj->gtType == TYP_BYREF)
13056                         {
13057                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13058                         }
13059
13060                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13061                         if (StructHasOverlappingFields(typeFlags))
13062                         {
13063                             op1->gtField.gtFldMayOverlap = true;
13064                         }
13065
13066                         // wrap it in a address of operator if necessary
13067                         if (isLoadAddress)
13068                         {
13069                             op1 = gtNewOperNode(GT_ADDR,
13070                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13071                         }
13072                         else
13073                         {
13074                             if (compIsForInlining() &&
13075                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13076                                                                                    impInlineInfo->inlArgInfo))
13077                             {
13078                                 impInlineInfo->thisDereferencedFirst = true;
13079                             }
13080                         }
13081                     }
13082                     break;
13083
13084                     case CORINFO_FIELD_STATIC_TLS:
13085 #ifdef _TARGET_X86_
13086                         // Legacy TLS access is implemented as intrinsic on x86 only
13087
13088                         /* Create the data member node */
13089                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13090                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13091
13092                         if (isLoadAddress)
13093                         {
13094                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13095                         }
13096                         break;
13097 #else
13098                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13099
13100                         __fallthrough;
13101 #endif
13102
13103                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13104                     case CORINFO_FIELD_INSTANCE_HELPER:
13105                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13106                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13107                                                clsHnd, nullptr);
13108                         usesHelper = true;
13109                         break;
13110
13111                     case CORINFO_FIELD_STATIC_ADDRESS:
13112                         // Replace static read-only fields with constant if possible
13113                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13114                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13115                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13116                         {
13117                             CorInfoInitClassResult initClassResult =
13118                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13119                                                             impTokenLookupContextHandle);
13120
13121                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13122                             {
13123                                 void** pFldAddr = nullptr;
13124                                 void*  fldAddr =
13125                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13126
13127                                 // We should always be able to access this static's address directly
13128                                 assert(pFldAddr == nullptr);
13129
13130                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13131                                 goto FIELD_DONE;
13132                             }
13133                         }
13134
13135                         __fallthrough;
13136
13137                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13138                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13139                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13140 #if COR_JIT_EE_VERSION > 460
13141                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13142 #endif
13143                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13144                                                          lclTyp);
13145                         break;
13146
13147                     case CORINFO_FIELD_INTRINSIC_ZERO:
13148                     {
13149                         assert(aflags & CORINFO_ACCESS_GET);
13150                         op1 = gtNewIconNode(0, lclTyp);
13151                         goto FIELD_DONE;
13152                     }
13153                     break;
13154
13155                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13156                     {
13157                         assert(aflags & CORINFO_ACCESS_GET);
13158
13159                         LPVOID         pValue;
13160                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13161                         op1                = gtNewStringLiteralNode(iat, pValue);
13162                         goto FIELD_DONE;
13163                     }
13164                     break;
13165
13166                     default:
13167                         assert(!"Unexpected fieldAccessor");
13168                 }
13169
13170                 if (!isLoadAddress)
13171                 {
13172
13173                     if (prefixFlags & PREFIX_VOLATILE)
13174                     {
13175                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13176                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13177
13178                         if (!usesHelper)
13179                         {
13180                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13181                                    (op1->OperGet() == GT_OBJ));
13182                             op1->gtFlags |= GTF_IND_VOLATILE;
13183                         }
13184                     }
13185
13186                     if (prefixFlags & PREFIX_UNALIGNED)
13187                     {
13188                         if (!usesHelper)
13189                         {
13190                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13191                                    (op1->OperGet() == GT_OBJ));
13192                             op1->gtFlags |= GTF_IND_UNALIGNED;
13193                         }
13194                     }
13195                 }
13196
13197                 /* Check if the class needs explicit initialization */
13198
13199                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13200                 {
13201                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13202                     if (compDonotInline())
13203                     {
13204                         return;
13205                     }
13206                     if (helperNode != nullptr)
13207                     {
13208                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13209                     }
13210                 }
13211
13212             FIELD_DONE:
13213                 impPushOnStack(op1, tiRetVal);
13214             }
13215             break;
13216
13217             case CEE_STFLD:
13218             case CEE_STSFLD:
13219             {
13220
13221                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13222
13223                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13224
13225                 /* Get the CP_Fieldref index */
13226
13227                 assertImp(sz == sizeof(unsigned));
13228
13229                 _impResolveToken(CORINFO_TOKENKIND_Field);
13230
13231                 JITDUMP(" %08X", resolvedToken.token);
13232
13233                 int        aflags = CORINFO_ACCESS_SET;
13234                 GenTreePtr obj    = nullptr;
13235                 typeInfo*  tiObj  = nullptr;
13236                 typeInfo   tiVal;
13237
13238                 /* Pull the value from the stack */
13239                 op2    = impPopStack(tiVal);
13240                 clsHnd = tiVal.GetClassHandle();
13241
13242                 if (opcode == CEE_STFLD)
13243                 {
13244                     tiObj = &impStackTop().seTypeInfo;
13245                     obj   = impPopStack().val;
13246
13247                     if (impIsThis(obj))
13248                     {
13249                         aflags |= CORINFO_ACCESS_THIS;
13250
13251                         // An optimization for Contextful classes:
13252                         // we unwrap the proxy when we have a 'this reference'
13253
13254                         if (info.compUnwrapContextful)
13255                         {
13256                             aflags |= CORINFO_ACCESS_UNWRAP;
13257                         }
13258                     }
13259                 }
13260
13261                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13262
13263                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13264                 // handle
13265                 CorInfoType ciType = fieldInfo.fieldType;
13266                 fieldClsHnd        = fieldInfo.structType;
13267
13268                 lclTyp = JITtype2varType(ciType);
13269
13270                 if (compIsForInlining())
13271                 {
13272                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13273                      * per-inst static? */
13274
13275                     switch (fieldInfo.fieldAccessor)
13276                     {
13277                         case CORINFO_FIELD_INSTANCE_HELPER:
13278                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13279                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13280                         case CORINFO_FIELD_STATIC_TLS:
13281
13282                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13283                             return;
13284
13285                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13286 #if COR_JIT_EE_VERSION > 460
13287                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13288 #endif
13289
13290                             /* We may be able to inline the field accessors in specific instantiations of generic
13291                              * methods */
13292                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13293                             return;
13294
13295                         default:
13296                             break;
13297                     }
13298                 }
13299
13300                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13301
13302                 if (tiVerificationNeeded)
13303                 {
13304                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13305                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13306                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13307                 }
13308                 else
13309                 {
13310                     // tiVerificationNeed is false.
13311                     // Raise InvalidProgramException if static store accesses non-static field
13312                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13313                     {
13314                         BADCODE("static access on an instance field");
13315                     }
13316                 }
13317
13318                 // We are using stfld on a static field.
13319                 // We allow it, but need to eval any side-effects for obj
13320                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13321                 {
13322                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13323                     {
13324                         obj = gtUnusedValNode(obj);
13325                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13326                     }
13327                     obj = nullptr;
13328                 }
13329
13330                 /* Preserve 'small' int types */
13331                 if (lclTyp > TYP_INT)
13332                 {
13333                     lclTyp = genActualType(lclTyp);
13334                 }
13335
13336                 switch (fieldInfo.fieldAccessor)
13337                 {
13338                     case CORINFO_FIELD_INSTANCE:
13339 #ifdef FEATURE_READYTORUN_COMPILER
13340                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13341 #endif
13342                     {
13343                         obj = impCheckForNullPointer(obj);
13344
13345                         /* Create the data member node */
13346                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13347                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13348                         if (StructHasOverlappingFields(typeFlags))
13349                         {
13350                             op1->gtField.gtFldMayOverlap = true;
13351                         }
13352
13353 #ifdef FEATURE_READYTORUN_COMPILER
13354                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13355                         {
13356                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13357                         }
13358 #endif
13359
13360                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13361
13362                         if (fgAddrCouldBeNull(obj))
13363                         {
13364                             op1->gtFlags |= GTF_EXCEPT;
13365                         }
13366
13367                         // If gtFldObj is a BYREF then our target is a value class and
13368                         // it could point anywhere, example a boxed class static int
13369                         if (obj->gtType == TYP_BYREF)
13370                         {
13371                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13372                         }
13373
13374                         if (compIsForInlining() &&
13375                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13376                         {
13377                             impInlineInfo->thisDereferencedFirst = true;
13378                         }
13379                     }
13380                     break;
13381
13382                     case CORINFO_FIELD_STATIC_TLS:
13383 #ifdef _TARGET_X86_
13384                         // Legacy TLS access is implemented as intrinsic on x86 only
13385
13386                         /* Create the data member node */
13387                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13388                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13389
13390                         break;
13391 #else
13392                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13393
13394                         __fallthrough;
13395 #endif
13396
13397                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13398                     case CORINFO_FIELD_INSTANCE_HELPER:
13399                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13400                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13401                                                clsHnd, op2);
13402                         goto SPILL_APPEND;
13403
13404                     case CORINFO_FIELD_STATIC_ADDRESS:
13405                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13406                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13407                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13408 #if COR_JIT_EE_VERSION > 460
13409                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13410 #endif
13411                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13412                                                          lclTyp);
13413                         break;
13414
13415                     default:
13416                         assert(!"Unexpected fieldAccessor");
13417                 }
13418
13419                 // Create the member assignment, unless we have a struct.
13420                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13421                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13422
13423                 if (!deferStructAssign)
13424                 {
13425                     if (prefixFlags & PREFIX_VOLATILE)
13426                     {
13427                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13428                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13429                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13430                         op1->gtFlags |= GTF_IND_VOLATILE;
13431                     }
13432                     if (prefixFlags & PREFIX_UNALIGNED)
13433                     {
13434                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13435                         op1->gtFlags |= GTF_IND_UNALIGNED;
13436                     }
13437
13438                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13439                        trust
13440                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13441                        importation
13442                        and reads from the union as if it were a long during code generation. Though this can potentially
13443                        read garbage, one can get lucky to have this working correctly.
13444
13445                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13446                        /O2
13447                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13448                        on
13449                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13450                        correctly
13451                        always.
13452
13453                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13454                        V4.0.
13455                     */
13456                     CLANG_FORMAT_COMMENT_ANCHOR;
13457
13458 #ifdef _TARGET_X86_
13459                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13460                         varTypeIsLong(op1->TypeGet()))
13461                     {
13462                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13463                     }
13464 #endif
13465
13466 #ifdef _TARGET_64BIT_
13467                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13468                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13469                     {
13470                         op2->gtType = TYP_I_IMPL;
13471                     }
13472                     else
13473                     {
13474                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13475                         //
13476                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13477                         {
13478                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13479                         }
13480                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13481                         //
13482                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13483                         {
13484                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13485                         }
13486                     }
13487 #endif
13488
13489 #if !FEATURE_X87_DOUBLES
13490                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13491                     // We insert a cast to the dest 'op1' type
13492                     //
13493                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13494                         varTypeIsFloating(op2->gtType))
13495                     {
13496                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13497                     }
13498 #endif // !FEATURE_X87_DOUBLES
13499
13500                     op1 = gtNewAssignNode(op1, op2);
13501
13502                     /* Mark the expression as containing an assignment */
13503
13504                     op1->gtFlags |= GTF_ASG;
13505                 }
13506
13507                 /* Check if the class needs explicit initialization */
13508
13509                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13510                 {
13511                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13512                     if (compDonotInline())
13513                     {
13514                         return;
13515                     }
13516                     if (helperNode != nullptr)
13517                     {
13518                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13519                     }
13520                 }
13521
13522                 /* stfld can interfere with value classes (consider the sequence
13523                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13524                    spill all value class references from the stack. */
13525
13526                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13527                 {
13528                     assert(tiObj);
13529
13530                     if (impIsValueType(tiObj))
13531                     {
13532                         impSpillEvalStack();
13533                     }
13534                     else
13535                     {
13536                         impSpillValueClasses();
13537                     }
13538                 }
13539
13540                 /* Spill any refs to the same member from the stack */
13541
13542                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13543
13544                 /* stsfld also interferes with indirect accesses (for aliased
13545                    statics) and calls. But don't need to spill other statics
13546                    as we have explicitly spilled this particular static field. */
13547
13548                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13549
13550                 if (deferStructAssign)
13551                 {
13552                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13553                 }
13554             }
13555                 goto APPEND;
13556
13557             case CEE_NEWARR:
13558             {
13559
13560                 /* Get the class type index operand */
13561
13562                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13563
13564                 JITDUMP(" %08X", resolvedToken.token);
13565
13566                 if (!opts.IsReadyToRun())
13567                 {
13568                     // Need to restore array classes before creating array objects on the heap
13569                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13570                     if (op1 == nullptr)
13571                     { // compDonotInline()
13572                         return;
13573                     }
13574                 }
13575
13576                 if (tiVerificationNeeded)
13577                 {
13578                     // As per ECMA 'numElems' specified can be either int32 or native int.
13579                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13580
13581                     CORINFO_CLASS_HANDLE elemTypeHnd;
13582                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13583                     Verify(elemTypeHnd == nullptr ||
13584                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13585                            "array of byref-like type");
13586                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13587                 }
13588
13589                 accessAllowedResult =
13590                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13591                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13592
13593                 /* Form the arglist: array class handle, size */
13594                 op2 = impPopStack().val;
13595                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13596
13597 #ifdef FEATURE_READYTORUN_COMPILER
13598                 if (opts.IsReadyToRun())
13599                 {
13600                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13601                                                     gtNewArgList(op2));
13602                     usingReadyToRunHelper = (op1 != nullptr);
13603
13604                     if (!usingReadyToRunHelper)
13605                     {
13606                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13607                         // and the newarr call with a single call to a dynamic R2R cell that will:
13608                         //      1) Load the context
13609                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13610                         //      3) Allocate the new array
13611                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13612
13613                         // Need to restore array classes before creating array objects on the heap
13614                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13615                         if (op1 == nullptr)
13616                         { // compDonotInline()
13617                             return;
13618                         }
13619                     }
13620                 }
13621
13622                 if (!usingReadyToRunHelper)
13623 #endif
13624                 {
13625                     args = gtNewArgList(op1, op2);
13626
13627                     /* Create a call to 'new' */
13628
13629                     // Note that this only works for shared generic code because the same helper is used for all
13630                     // reference array types
13631                     op1 =
13632                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13633                 }
13634
13635                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13636
13637                 /* Remember that this basic block contains 'new' of an sd array */
13638
13639                 block->bbFlags |= BBF_HAS_NEWARRAY;
13640                 optMethodFlags |= OMF_HAS_NEWARRAY;
13641
13642                 /* Push the result of the call on the stack */
13643
13644                 impPushOnStack(op1, tiRetVal);
13645
13646                 callTyp = TYP_REF;
13647             }
13648             break;
13649
13650             case CEE_LOCALLOC:
13651                 assert(!compIsForInlining());
13652
13653                 if (tiVerificationNeeded)
13654                 {
13655                     Verify(false, "bad opcode");
13656                 }
13657
13658                 // We don't allow locallocs inside handlers
13659                 if (block->hasHndIndex())
13660                 {
13661                     BADCODE("Localloc can't be inside handler");
13662                 }
13663
13664                 /* The FP register may not be back to the original value at the end
13665                    of the method, even if the frame size is 0, as localloc may
13666                    have modified it. So we will HAVE to reset it */
13667
13668                 compLocallocUsed = true;
13669                 setNeedsGSSecurityCookie();
13670
13671                 // Get the size to allocate
13672
13673                 op2 = impPopStack().val;
13674                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13675
13676                 if (verCurrentState.esStackDepth != 0)
13677                 {
13678                     BADCODE("Localloc can only be used when the stack is empty");
13679                 }
13680
13681                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13682
13683                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13684
13685                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13686
13687                 impPushOnStack(op1, tiRetVal);
13688                 break;
13689
13690             case CEE_ISINST:
13691
13692                 /* Get the type token */
13693                 assertImp(sz == sizeof(unsigned));
13694
13695                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13696
13697                 JITDUMP(" %08X", resolvedToken.token);
13698
13699                 if (!opts.IsReadyToRun())
13700                 {
13701                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13702                     if (op2 == nullptr)
13703                     { // compDonotInline()
13704                         return;
13705                     }
13706                 }
13707
13708                 if (tiVerificationNeeded)
13709                 {
13710                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13711                     // Even if this is a value class, we know it is boxed.
13712                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13713                 }
13714                 accessAllowedResult =
13715                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13716                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13717
13718                 op1 = impPopStack().val;
13719
13720 #ifdef FEATURE_READYTORUN_COMPILER
13721                 if (opts.IsReadyToRun())
13722                 {
13723                     GenTreePtr opLookup =
13724                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13725                                                   gtNewArgList(op1));
13726                     usingReadyToRunHelper = (opLookup != nullptr);
13727                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13728
13729                     if (!usingReadyToRunHelper)
13730                     {
13731                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13732                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13733                         //      1) Load the context
13734                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13735                         //      3) Perform the 'is instance' check on the input object
13736                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13737
13738                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13739                         if (op2 == nullptr)
13740                         { // compDonotInline()
13741                             return;
13742                         }
13743                     }
13744                 }
13745
13746                 if (!usingReadyToRunHelper)
13747 #endif
13748                 {
13749                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13750                 }
13751                 if (compDonotInline())
13752                 {
13753                     return;
13754                 }
13755
13756                 impPushOnStack(op1, tiRetVal);
13757
13758                 break;
13759
13760             case CEE_REFANYVAL:
13761
13762                 // get the class handle and make a ICON node out of it
13763
13764                 _impResolveToken(CORINFO_TOKENKIND_Class);
13765
13766                 JITDUMP(" %08X", resolvedToken.token);
13767
13768                 op2 = impTokenToHandle(&resolvedToken);
13769                 if (op2 == nullptr)
13770                 { // compDonotInline()
13771                     return;
13772                 }
13773
13774                 if (tiVerificationNeeded)
13775                 {
13776                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13777                            "need refany");
13778                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13779                 }
13780
13781                 op1 = impPopStack().val;
13782                 // make certain it is normalized;
13783                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13784
13785                 // Call helper GETREFANY(classHandle, op1);
13786                 args = gtNewArgList(op2, op1);
13787                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13788
13789                 impPushOnStack(op1, tiRetVal);
13790                 break;
13791
13792             case CEE_REFANYTYPE:
13793
13794                 if (tiVerificationNeeded)
13795                 {
13796                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13797                            "need refany");
13798                 }
13799
13800                 op1 = impPopStack().val;
13801
13802                 // make certain it is normalized;
13803                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13804
13805                 if (op1->gtOper == GT_OBJ)
13806                 {
13807                     // Get the address of the refany
13808                     op1 = op1->gtOp.gtOp1;
13809
13810                     // Fetch the type from the correct slot
13811                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13812                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13813                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13814                 }
13815                 else
13816                 {
13817                     assertImp(op1->gtOper == GT_MKREFANY);
13818
13819                     // The pointer may have side-effects
13820                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13821                     {
13822                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13823 #ifdef DEBUG
13824                         impNoteLastILoffs();
13825 #endif
13826                     }
13827
13828                     // We already have the class handle
13829                     op1 = op1->gtOp.gtOp2;
13830                 }
13831
13832                 // convert native TypeHandle to RuntimeTypeHandle
13833                 {
13834                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13835
13836                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13837                                               helperArgs);
13838
13839                     // The handle struct is returned in register
13840                     op1->gtCall.gtReturnType = TYP_REF;
13841
13842                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13843                 }
13844
13845                 impPushOnStack(op1, tiRetVal);
13846                 break;
13847
13848             case CEE_LDTOKEN:
13849             {
13850                 /* Get the Class index */
13851                 assertImp(sz == sizeof(unsigned));
13852                 lastLoadToken = codeAddr;
13853                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13854
13855                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13856
13857                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13858                 if (op1 == nullptr)
13859                 { // compDonotInline()
13860                     return;
13861                 }
13862
13863                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13864                 assert(resolvedToken.hClass != nullptr);
13865
13866                 if (resolvedToken.hMethod != nullptr)
13867                 {
13868                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13869                 }
13870                 else if (resolvedToken.hField != nullptr)
13871                 {
13872                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13873                 }
13874
13875                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13876
13877                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13878
13879                 // The handle struct is returned in register
13880                 op1->gtCall.gtReturnType = TYP_REF;
13881
13882                 tiRetVal = verMakeTypeInfo(tokenType);
13883                 impPushOnStack(op1, tiRetVal);
13884             }
13885             break;
13886
13887             case CEE_UNBOX:
13888             case CEE_UNBOX_ANY:
13889             {
13890                 /* Get the Class index */
13891                 assertImp(sz == sizeof(unsigned));
13892
13893                 _impResolveToken(CORINFO_TOKENKIND_Class);
13894
13895                 JITDUMP(" %08X", resolvedToken.token);
13896
13897                 BOOL runtimeLookup;
13898                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13899                 if (op2 == nullptr)
13900                 { // compDonotInline()
13901                     return;
13902                 }
13903
13904                 // Run this always so we can get access exceptions even with SkipVerification.
13905                 accessAllowedResult =
13906                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13907                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13908
13909                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13910                 {
13911                     if (tiVerificationNeeded)
13912                     {
13913                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13914                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13915                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13916                         tiRetVal.NormaliseForStack();
13917                     }
13918                     op1 = impPopStack().val;
13919                     goto CASTCLASS;
13920                 }
13921
13922                 /* Pop the object and create the unbox helper call */
13923                 /* You might think that for UNBOX_ANY we need to push a different */
13924                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13925                 /* for the intermediate pointer which we then transfer onto the OBJ */
13926                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13927                 if (tiVerificationNeeded)
13928                 {
13929                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13930                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13931
13932                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13933                     Verify(tiRetVal.IsValueClass(), "not value class");
13934                     tiRetVal.MakeByRef();
13935
13936                     // We always come from an objref, so this is safe byref
13937                     tiRetVal.SetIsPermanentHomeByRef();
13938                     tiRetVal.SetIsReadonlyByRef();
13939                 }
13940
13941                 op1 = impPopStack().val;
13942                 assertImp(op1->gtType == TYP_REF);
13943
13944                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13945                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13946
13947                 // We only want to expand inline the normal UNBOX helper;
13948                 expandInline = (helper == CORINFO_HELP_UNBOX);
13949
13950                 if (expandInline)
13951                 {
13952                     if (compCurBB->isRunRarely())
13953                     {
13954                         expandInline = false; // not worth the code expansion
13955                     }
13956                 }
13957
13958                 if (expandInline)
13959                 {
13960                     // we are doing normal unboxing
13961                     // inline the common case of the unbox helper
13962                     // UNBOX(exp) morphs into
13963                     // clone = pop(exp);
13964                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13965                     // push(clone + sizeof(void*))
13966                     //
13967                     GenTreePtr cloneOperand;
13968                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13969                                        nullptr DEBUGARG("inline UNBOX clone1"));
13970                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13971
13972                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13973
13974                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13975                                        nullptr DEBUGARG("inline UNBOX clone2"));
13976                     op2 = impTokenToHandle(&resolvedToken);
13977                     if (op2 == nullptr)
13978                     { // compDonotInline()
13979                         return;
13980                     }
13981                     args = gtNewArgList(op2, op1);
13982                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13983
13984                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13985                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13986                     condBox->gtFlags |= GTF_RELOP_QMARK;
13987
13988                     // QMARK nodes cannot reside on the evaluation stack. Because there
13989                     // may be other trees on the evaluation stack that side-effect the
13990                     // sources of the UNBOX operation we must spill the stack.
13991
13992                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13993
13994                     // Create the address-expression to reference past the object header
13995                     // to the beginning of the value-type. Today this means adjusting
13996                     // past the base of the objects vtable field which is pointer sized.
13997
13998                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13999                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14000                 }
14001                 else
14002                 {
14003                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14004
14005                     // Don't optimize, just call the helper and be done with it
14006                     args = gtNewArgList(op2, op1);
14007                     op1  = gtNewHelperCallNode(helper,
14008                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14009                                               callFlags, args);
14010                 }
14011
14012                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14013                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14014                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14015                        );
14016
14017                 /*
14018                   ----------------------------------------------------------------------
14019                   | \ helper  |                         |                              |
14020                   |   \       |                         |                              |
14021                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14022                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14023                   | opcode  \ |                         |                              |
14024                   |---------------------------------------------------------------------
14025                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14026                   |           |                         | push the BYREF to this local |
14027                   |---------------------------------------------------------------------
14028                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14029                   |           | the BYREF               | For Linux when the           |
14030                   |           |                         |  struct is returned in two   |
14031                   |           |                         |  registers create a temp     |
14032                   |           |                         |  which address is passed to  |
14033                   |           |                         |  the unbox_nullable helper.  |
14034                   |---------------------------------------------------------------------
14035                 */
14036
14037                 if (opcode == CEE_UNBOX)
14038                 {
14039                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14040                     {
14041                         // Unbox nullable helper returns a struct type.
14042                         // We need to spill it to a temp so than can take the address of it.
14043                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14044                         // further along and potetially be exploitable.
14045
14046                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14047                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14048
14049                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14050                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14051                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14052
14053                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14054                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14055                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14056                     }
14057
14058                     assert(op1->gtType == TYP_BYREF);
14059                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14060                 }
14061                 else
14062                 {
14063                     assert(opcode == CEE_UNBOX_ANY);
14064
14065                     if (helper == CORINFO_HELP_UNBOX)
14066                     {
14067                         // Normal unbox helper returns a TYP_BYREF.
14068                         impPushOnStack(op1, tiRetVal);
14069                         oper = GT_OBJ;
14070                         goto OBJ;
14071                     }
14072
14073                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14074
14075 #if FEATURE_MULTIREG_RET
14076
14077                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14078                     {
14079                         // Unbox nullable helper returns a TYP_STRUCT.
14080                         // For the multi-reg case we need to spill it to a temp so that
14081                         // we can pass the address to the unbox_nullable jit helper.
14082
14083                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14084                         lvaTable[tmp].lvIsMultiRegArg = true;
14085                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14086
14087                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14088                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14089                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14090
14091                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14092                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14093                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14094
14095                         // In this case the return value of the unbox helper is TYP_BYREF.
14096                         // Make sure the right type is placed on the operand type stack.
14097                         impPushOnStack(op1, tiRetVal);
14098
14099                         // Load the struct.
14100                         oper = GT_OBJ;
14101
14102                         assert(op1->gtType == TYP_BYREF);
14103                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14104
14105                         goto OBJ;
14106                     }
14107                     else
14108
14109 #endif // !FEATURE_MULTIREG_RET
14110
14111                     {
14112                         // If non register passable struct we have it materialized in the RetBuf.
14113                         assert(op1->gtType == TYP_STRUCT);
14114                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14115                         assert(tiRetVal.IsValueClass());
14116                     }
14117                 }
14118
14119                 impPushOnStack(op1, tiRetVal);
14120             }
14121             break;
14122
14123             case CEE_BOX:
14124             {
14125                 /* Get the Class index */
14126                 assertImp(sz == sizeof(unsigned));
14127
14128                 _impResolveToken(CORINFO_TOKENKIND_Box);
14129
14130                 JITDUMP(" %08X", resolvedToken.token);
14131
14132                 if (tiVerificationNeeded)
14133                 {
14134                     typeInfo tiActual = impStackTop().seTypeInfo;
14135                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14136
14137                     Verify(verIsBoxable(tiBox), "boxable type expected");
14138
14139                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14140                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14141                            "boxed type has unsatisfied class constraints");
14142
14143                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14144
14145                     // Observation: the following code introduces a boxed value class on the stack, but,
14146                     // according to the ECMA spec, one would simply expect: tiRetVal =
14147                     // typeInfo(TI_REF,impGetObjectClass());
14148
14149                     // Push the result back on the stack,
14150                     // even if clsHnd is a value class we want the TI_REF
14151                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14152                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14153                 }
14154
14155                 accessAllowedResult =
14156                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14157                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14158
14159                 // Note BOX can be used on things that are not value classes, in which
14160                 // case we get a NOP.  However the verifier's view of the type on the
14161                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14162                 if (!eeIsValueClass(resolvedToken.hClass))
14163                 {
14164                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14165                     break;
14166                 }
14167
14168                 // Look ahead for unbox.any
14169                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14170                 {
14171                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14172                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14173                     {
14174                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14175
14176                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14177
14178                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14179                         {
14180                             // Skip the next unbox.any instruction
14181                             sz += sizeof(mdToken) + 1;
14182                             break;
14183                         }
14184                     }
14185                 }
14186
14187                 impImportAndPushBox(&resolvedToken);
14188                 if (compDonotInline())
14189                 {
14190                     return;
14191                 }
14192             }
14193             break;
14194
14195             case CEE_SIZEOF:
14196
14197                 /* Get the Class index */
14198                 assertImp(sz == sizeof(unsigned));
14199
14200                 _impResolveToken(CORINFO_TOKENKIND_Class);
14201
14202                 JITDUMP(" %08X", resolvedToken.token);
14203
14204                 if (tiVerificationNeeded)
14205                 {
14206                     tiRetVal = typeInfo(TI_INT);
14207                 }
14208
14209                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14210                 impPushOnStack(op1, tiRetVal);
14211                 break;
14212
14213             case CEE_CASTCLASS:
14214
14215                 /* Get the Class index */
14216
14217                 assertImp(sz == sizeof(unsigned));
14218
14219                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14220
14221                 JITDUMP(" %08X", resolvedToken.token);
14222
14223                 if (!opts.IsReadyToRun())
14224                 {
14225                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14226                     if (op2 == nullptr)
14227                     { // compDonotInline()
14228                         return;
14229                     }
14230                 }
14231
14232                 if (tiVerificationNeeded)
14233                 {
14234                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14235                     // box it
14236                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14237                 }
14238
14239                 accessAllowedResult =
14240                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14241                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14242
14243                 op1 = impPopStack().val;
14244
14245             /* Pop the address and create the 'checked cast' helper call */
14246
14247             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14248             // and op2 to contain code that creates the type handle corresponding to typeRef
14249             CASTCLASS:
14250
14251 #ifdef FEATURE_READYTORUN_COMPILER
14252                 if (opts.IsReadyToRun())
14253                 {
14254                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14255                                                                     TYP_REF, gtNewArgList(op1));
14256                     usingReadyToRunHelper = (opLookup != nullptr);
14257                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14258
14259                     if (!usingReadyToRunHelper)
14260                     {
14261                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14262                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14263                         //      1) Load the context
14264                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14265                         //      3) Check the object on the stack for the type-cast
14266                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14267
14268                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14269                         if (op2 == nullptr)
14270                         { // compDonotInline()
14271                             return;
14272                         }
14273                     }
14274                 }
14275
14276                 if (!usingReadyToRunHelper)
14277 #endif
14278                 {
14279                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14280                 }
14281                 if (compDonotInline())
14282                 {
14283                     return;
14284                 }
14285
14286                 /* Push the result back on the stack */
14287                 impPushOnStack(op1, tiRetVal);
14288                 break;
14289
14290             case CEE_THROW:
14291
14292                 if (compIsForInlining())
14293                 {
14294                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14295                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14296                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14297
14298                     /* Do we have just the exception on the stack ?*/
14299
14300                     if (verCurrentState.esStackDepth != 1)
14301                     {
14302                         /* if not, just don't inline the method */
14303
14304                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14305                         return;
14306                     }
14307                 }
14308
14309                 if (tiVerificationNeeded)
14310                 {
14311                     tiRetVal = impStackTop().seTypeInfo;
14312                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14313                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14314                     {
14315                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14316                     }
14317                 }
14318
14319                 block->bbSetRunRarely(); // any block with a throw is rare
14320                 /* Pop the exception object and create the 'throw' helper call */
14321
14322                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14323
14324             EVAL_APPEND:
14325                 if (verCurrentState.esStackDepth > 0)
14326                 {
14327                     impEvalSideEffects();
14328                 }
14329
14330                 assert(verCurrentState.esStackDepth == 0);
14331
14332                 goto APPEND;
14333
14334             case CEE_RETHROW:
14335
14336                 assert(!compIsForInlining());
14337
14338                 if (info.compXcptnsCount == 0)
14339                 {
14340                     BADCODE("rethrow outside catch");
14341                 }
14342
14343                 if (tiVerificationNeeded)
14344                 {
14345                     Verify(block->hasHndIndex(), "rethrow outside catch");
14346                     if (block->hasHndIndex())
14347                     {
14348                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14349                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14350                         if (HBtab->HasFilter())
14351                         {
14352                             // we better be in the handler clause part, not the filter part
14353                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14354                                    "rethrow in filter");
14355                         }
14356                     }
14357                 }
14358
14359                 /* Create the 'rethrow' helper call */
14360
14361                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14362
14363                 goto EVAL_APPEND;
14364
14365             case CEE_INITOBJ:
14366
14367                 assertImp(sz == sizeof(unsigned));
14368
14369                 _impResolveToken(CORINFO_TOKENKIND_Class);
14370
14371                 JITDUMP(" %08X", resolvedToken.token);
14372
14373                 if (tiVerificationNeeded)
14374                 {
14375                     typeInfo tiTo    = impStackTop().seTypeInfo;
14376                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14377
14378                     Verify(tiTo.IsByRef(), "byref expected");
14379                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14380
14381                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14382                            "type operand incompatible with type of address");
14383                 }
14384
14385                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14386                 op2  = gtNewIconNode(0);                                     // Value
14387                 op1  = impPopStack().val;                                    // Dest
14388                 op1  = gtNewBlockVal(op1, size);
14389                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14390                 goto SPILL_APPEND;
14391
14392             case CEE_INITBLK:
14393
14394                 if (tiVerificationNeeded)
14395                 {
14396                     Verify(false, "bad opcode");
14397                 }
14398
14399                 op3 = impPopStack().val; // Size
14400                 op2 = impPopStack().val; // Value
14401                 op1 = impPopStack().val; // Dest
14402
14403                 if (op3->IsCnsIntOrI())
14404                 {
14405                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14406                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14407                 }
14408                 else
14409                 {
14410                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14411                     size = 0;
14412                 }
14413                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14414
14415                 goto SPILL_APPEND;
14416
14417             case CEE_CPBLK:
14418
14419                 if (tiVerificationNeeded)
14420                 {
14421                     Verify(false, "bad opcode");
14422                 }
14423                 op3 = impPopStack().val; // Size
14424                 op2 = impPopStack().val; // Src
14425                 op1 = impPopStack().val; // Dest
14426
14427                 if (op3->IsCnsIntOrI())
14428                 {
14429                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14430                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14431                 }
14432                 else
14433                 {
14434                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14435                     size = 0;
14436                 }
14437                 if (op2->OperGet() == GT_ADDR)
14438                 {
14439                     op2 = op2->gtOp.gtOp1;
14440                 }
14441                 else
14442                 {
14443                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14444                 }
14445
14446                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14447                 goto SPILL_APPEND;
14448
14449             case CEE_CPOBJ:
14450
14451                 assertImp(sz == sizeof(unsigned));
14452
14453                 _impResolveToken(CORINFO_TOKENKIND_Class);
14454
14455                 JITDUMP(" %08X", resolvedToken.token);
14456
14457                 if (tiVerificationNeeded)
14458                 {
14459                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14460                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14461                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14462
14463                     Verify(tiFrom.IsByRef(), "expected byref source");
14464                     Verify(tiTo.IsByRef(), "expected byref destination");
14465
14466                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14467                            "type of source address incompatible with type operand");
14468                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14469                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14470                            "type operand incompatible with type of destination address");
14471                 }
14472
14473                 if (!eeIsValueClass(resolvedToken.hClass))
14474                 {
14475                     op1 = impPopStack().val; // address to load from
14476
14477                     impBashVarAddrsToI(op1);
14478
14479                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14480
14481                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14482                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14483
14484                     impPushOnStackNoType(op1);
14485                     opcode = CEE_STIND_REF;
14486                     lclTyp = TYP_REF;
14487                     goto STIND_POST_VERIFY;
14488                 }
14489
14490                 op2 = impPopStack().val; // Src
14491                 op1 = impPopStack().val; // Dest
14492                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14493                 goto SPILL_APPEND;
14494
14495             case CEE_STOBJ:
14496             {
14497                 assertImp(sz == sizeof(unsigned));
14498
14499                 _impResolveToken(CORINFO_TOKENKIND_Class);
14500
14501                 JITDUMP(" %08X", resolvedToken.token);
14502
14503                 if (eeIsValueClass(resolvedToken.hClass))
14504                 {
14505                     lclTyp = TYP_STRUCT;
14506                 }
14507                 else
14508                 {
14509                     lclTyp = TYP_REF;
14510                 }
14511
14512                 if (tiVerificationNeeded)
14513                 {
14514
14515                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14516
14517                     // Make sure we have a good looking byref
14518                     Verify(tiPtr.IsByRef(), "pointer not byref");
14519                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14520                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14521                     {
14522                         compUnsafeCastUsed = true;
14523                     }
14524
14525                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14526                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14527
14528                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14529                     {
14530                         Verify(false, "type of value incompatible with type operand");
14531                         compUnsafeCastUsed = true;
14532                     }
14533
14534                     if (!tiCompatibleWith(argVal, ptrVal, false))
14535                     {
14536                         Verify(false, "type operand incompatible with type of address");
14537                         compUnsafeCastUsed = true;
14538                     }
14539                 }
14540                 else
14541                 {
14542                     compUnsafeCastUsed = true;
14543                 }
14544
14545                 if (lclTyp == TYP_REF)
14546                 {
14547                     opcode = CEE_STIND_REF;
14548                     goto STIND_POST_VERIFY;
14549                 }
14550
14551                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14552                 if (impIsPrimitive(jitTyp))
14553                 {
14554                     lclTyp = JITtype2varType(jitTyp);
14555                     goto STIND_POST_VERIFY;
14556                 }
14557
14558                 op2 = impPopStack().val; // Value
14559                 op1 = impPopStack().val; // Ptr
14560
14561                 assertImp(varTypeIsStruct(op2));
14562
14563                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14564                 goto SPILL_APPEND;
14565             }
14566
14567             case CEE_MKREFANY:
14568
14569                 assert(!compIsForInlining());
14570
14571                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14572                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14573
14574                 JITDUMP("disabling struct promotion because of mkrefany\n");
14575                 fgNoStructPromotion = true;
14576
14577                 oper = GT_MKREFANY;
14578                 assertImp(sz == sizeof(unsigned));
14579
14580                 _impResolveToken(CORINFO_TOKENKIND_Class);
14581
14582                 JITDUMP(" %08X", resolvedToken.token);
14583
14584                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14585                 if (op2 == nullptr)
14586                 { // compDonotInline()
14587                     return;
14588                 }
14589
14590                 if (tiVerificationNeeded)
14591                 {
14592                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14593                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14594
14595                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14596                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14597                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14598                 }
14599
14600                 accessAllowedResult =
14601                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14602                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14603
14604                 op1 = impPopStack().val;
14605
14606                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14607                 // But JIT32 allowed it, so we continue to allow it.
14608                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14609
14610                 // MKREFANY returns a struct.  op2 is the class token.
14611                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14612
14613                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14614                 break;
14615
14616             case CEE_LDOBJ:
14617             {
14618                 oper = GT_OBJ;
14619                 assertImp(sz == sizeof(unsigned));
14620
14621                 _impResolveToken(CORINFO_TOKENKIND_Class);
14622
14623                 JITDUMP(" %08X", resolvedToken.token);
14624
14625             OBJ:
14626
14627                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14628
14629                 if (tiVerificationNeeded)
14630                 {
14631                     typeInfo tiPtr = impStackTop().seTypeInfo;
14632
14633                     // Make sure we have a byref
14634                     if (!tiPtr.IsByRef())
14635                     {
14636                         Verify(false, "pointer not byref");
14637                         compUnsafeCastUsed = true;
14638                     }
14639                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14640
14641                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14642                     {
14643                         Verify(false, "type of address incompatible with type operand");
14644                         compUnsafeCastUsed = true;
14645                     }
14646                     tiRetVal.NormaliseForStack();
14647                 }
14648                 else
14649                 {
14650                     compUnsafeCastUsed = true;
14651                 }
14652
14653                 if (eeIsValueClass(resolvedToken.hClass))
14654                 {
14655                     lclTyp = TYP_STRUCT;
14656                 }
14657                 else
14658                 {
14659                     lclTyp = TYP_REF;
14660                     opcode = CEE_LDIND_REF;
14661                     goto LDIND_POST_VERIFY;
14662                 }
14663
14664                 op1 = impPopStack().val;
14665
14666                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14667
14668                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14669                 if (impIsPrimitive(jitTyp))
14670                 {
14671                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14672
14673                     // Could point anywhere, example a boxed class static int
14674                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14675                     assertImp(varTypeIsArithmetic(op1->gtType));
14676                 }
14677                 else
14678                 {
14679                     // OBJ returns a struct
14680                     // and an inline argument which is the class token of the loaded obj
14681                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14682                 }
14683                 op1->gtFlags |= GTF_EXCEPT;
14684
14685                 impPushOnStack(op1, tiRetVal);
14686                 break;
14687             }
14688
14689             case CEE_LDLEN:
14690                 if (tiVerificationNeeded)
14691                 {
14692                     typeInfo tiArray = impStackTop().seTypeInfo;
14693                     Verify(verIsSDArray(tiArray), "bad array");
14694                     tiRetVal = typeInfo(TI_INT);
14695                 }
14696
14697                 op1 = impPopStack().val;
14698                 if (!opts.MinOpts() && !opts.compDbgCode)
14699                 {
14700                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14701                     GenTreeArrLen* arrLen =
14702                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14703
14704                     /* Mark the block as containing a length expression */
14705
14706                     if (op1->gtOper == GT_LCL_VAR)
14707                     {
14708                         block->bbFlags |= BBF_HAS_IDX_LEN;
14709                     }
14710
14711                     op1 = arrLen;
14712                 }
14713                 else
14714                 {
14715                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14716                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14717                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14718                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14719                     op1->gtFlags |= GTF_IND_ARR_LEN;
14720                 }
14721
14722                 /* An indirection will cause a GPF if the address is null */
14723                 op1->gtFlags |= GTF_EXCEPT;
14724
14725                 /* Push the result back on the stack */
14726                 impPushOnStack(op1, tiRetVal);
14727                 break;
14728
14729             case CEE_BREAK:
14730                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14731                 goto SPILL_APPEND;
14732
14733             case CEE_NOP:
14734                 if (opts.compDbgCode)
14735                 {
14736                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14737                     goto SPILL_APPEND;
14738                 }
14739                 break;
14740
14741             /******************************** NYI *******************************/
14742
14743             case 0xCC:
14744                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14745
14746             case CEE_ILLEGAL:
14747             case CEE_MACRO_END:
14748
14749             default:
14750                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14751         }
14752
14753         codeAddr += sz;
14754         prevOpcode = opcode;
14755
14756         prefixFlags = 0;
14757         assert(!insertLdloc || opcode == CEE_DUP);
14758     }
14759
14760     assert(!insertLdloc);
14761
14762     return;
14763 #undef _impResolveToken
14764 }
14765 #ifdef _PREFAST_
14766 #pragma warning(pop)
14767 #endif
14768
14769 // Push a local/argument treeon the operand stack
14770 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14771 {
14772     tiRetVal.NormaliseForStack();
14773
14774     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14775     {
14776         tiRetVal.SetUninitialisedObjRef();
14777     }
14778
14779     impPushOnStack(op, tiRetVal);
14780 }
14781
14782 // Load a local/argument on the operand stack
14783 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14784 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14785 {
14786     var_types lclTyp;
14787
14788     if (lvaTable[lclNum].lvNormalizeOnLoad())
14789     {
14790         lclTyp = lvaGetRealType(lclNum);
14791     }
14792     else
14793     {
14794         lclTyp = lvaGetActualType(lclNum);
14795     }
14796
14797     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14798 }
14799
14800 // Load an argument on the operand stack
14801 // Shared by the various CEE_LDARG opcodes
14802 // ilArgNum is the argument index as specified in IL.
14803 // It will be mapped to the correct lvaTable index
14804 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14805 {
14806     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14807
14808     if (compIsForInlining())
14809     {
14810         if (ilArgNum >= info.compArgsCount)
14811         {
14812             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14813             return;
14814         }
14815
14816         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14817                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14818     }
14819     else
14820     {
14821         if (ilArgNum >= info.compArgsCount)
14822         {
14823             BADCODE("Bad IL");
14824         }
14825
14826         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14827
14828         if (lclNum == info.compThisArg)
14829         {
14830             lclNum = lvaArg0Var;
14831         }
14832
14833         impLoadVar(lclNum, offset);
14834     }
14835 }
14836
14837 // Load a local on the operand stack
14838 // Shared by the various CEE_LDLOC opcodes
14839 // ilLclNum is the local index as specified in IL.
14840 // It will be mapped to the correct lvaTable index
14841 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14842 {
14843     if (tiVerificationNeeded)
14844     {
14845         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14846         Verify(info.compInitMem, "initLocals not set");
14847     }
14848
14849     if (compIsForInlining())
14850     {
14851         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14852         {
14853             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14854             return;
14855         }
14856
14857         // Get the local type
14858         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14859
14860         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14861
14862         /* Have we allocated a temp for this local? */
14863
14864         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14865
14866         // All vars of inlined methods should be !lvNormalizeOnLoad()
14867
14868         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14869         lclTyp = genActualType(lclTyp);
14870
14871         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14872     }
14873     else
14874     {
14875         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14876         {
14877             BADCODE("Bad IL");
14878         }
14879
14880         unsigned lclNum = info.compArgsCount + ilLclNum;
14881
14882         impLoadVar(lclNum, offset);
14883     }
14884 }
14885
14886 #ifdef _TARGET_ARM_
14887 /**************************************************************************************
14888  *
14889  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14890  *  dst struct, because struct promotion will turn it into a float/double variable while
14891  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14892  *  a float, but there is nothing that might prevent us from doing so. The tree however
14893  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14894  *
14895  *  tmpNum - the lcl dst variable num that is a struct.
14896  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14897  *  hClass - the type handle for the struct variable.
14898  *
14899  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14900  *        however, we could do a codegen of transferring from int to float registers
14901  *        (transfer, not a cast.)
14902  *
14903  */
14904 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14905 {
14906     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14907     {
14908         int       hfaSlots = GetHfaCount(hClass);
14909         var_types hfaType  = GetHfaType(hClass);
14910
14911         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14912         // type: struct/float at importer because the ABI calls out return in integer registers.
14913         // We don't want struct promotion to replace an expression like this:
14914         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14915         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14916         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14917             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14918         {
14919             // Make sure this struct type stays as struct so we can receive the call in a struct.
14920             lvaTable[tmpNum].lvIsMultiRegRet = true;
14921         }
14922     }
14923 }
14924 #endif // _TARGET_ARM_
14925
14926 #if FEATURE_MULTIREG_RET
14927 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14928 {
14929     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14930     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14931     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14932
14933     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14934     ret->gtFlags |= GTF_DONT_CSE;
14935
14936     assert(IsMultiRegReturnedType(hClass));
14937
14938     // Mark the var so that fields are not promoted and stay together.
14939     lvaTable[tmpNum].lvIsMultiRegRet = true;
14940
14941     return ret;
14942 }
14943 #endif // FEATURE_MULTIREG_RET
14944
14945 // do import for a return
14946 // returns false if inlining was aborted
14947 // opcode can be ret or call in the case of a tail.call
14948 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14949 {
14950     if (tiVerificationNeeded)
14951     {
14952         verVerifyThisPtrInitialised();
14953
14954         unsigned expectedStack = 0;
14955         if (info.compRetType != TYP_VOID)
14956         {
14957             typeInfo tiVal = impStackTop().seTypeInfo;
14958             typeInfo tiDeclared =
14959                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14960
14961             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14962
14963             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14964             expectedStack = 1;
14965         }
14966         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14967     }
14968
14969     GenTree*             op2       = nullptr;
14970     GenTree*             op1       = nullptr;
14971     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14972
14973     if (info.compRetType != TYP_VOID)
14974     {
14975         StackEntry se = impPopStack(retClsHnd);
14976         op2           = se.val;
14977
14978         if (!compIsForInlining())
14979         {
14980             impBashVarAddrsToI(op2);
14981             op2 = impImplicitIorI4Cast(op2, info.compRetType);
14982             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14983             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14984                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14985                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14986                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14987                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14988
14989 #ifdef DEBUG
14990             if (opts.compGcChecks && info.compRetType == TYP_REF)
14991             {
14992                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
14993                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14994                 // one-return BB.
14995
14996                 assert(op2->gtType == TYP_REF);
14997
14998                 // confirm that the argument is a GC pointer (for debugging (GC stress))
14999                 GenTreeArgList* args = gtNewArgList(op2);
15000                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15001
15002                 if (verbose)
15003                 {
15004                     printf("\ncompGcChecks tree:\n");
15005                     gtDispTree(op2);
15006                 }
15007             }
15008 #endif
15009         }
15010         else
15011         {
15012             // inlinee's stack should be empty now.
15013             assert(verCurrentState.esStackDepth == 0);
15014
15015 #ifdef DEBUG
15016             if (verbose)
15017             {
15018                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15019                 gtDispTree(op2);
15020             }
15021 #endif
15022
15023             // Make sure the type matches the original call.
15024
15025             var_types returnType       = genActualType(op2->gtType);
15026             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15027             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15028             {
15029                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15030             }
15031
15032             if (returnType != originalCallType)
15033             {
15034                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15035                 return false;
15036             }
15037
15038             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15039             // expression. At this point, retExpr could already be set if there are multiple
15040             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15041             // the other blocks already set it. If there is only a single return block,
15042             // retExpr shouldn't be set. However, this is not true if we reimport a block
15043             // with a return. In that case, retExpr will be set, then the block will be
15044             // reimported, but retExpr won't get cleared as part of setting the block to
15045             // be reimported. The reimported retExpr value should be the same, so even if
15046             // we don't unconditionally overwrite it, it shouldn't matter.
15047             if (info.compRetNativeType != TYP_STRUCT)
15048             {
15049                 // compRetNativeType is not TYP_STRUCT.
15050                 // This implies it could be either a scalar type or SIMD vector type or
15051                 // a struct type that can be normalized to a scalar type.
15052
15053                 if (varTypeIsStruct(info.compRetType))
15054                 {
15055                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15056                     // adjust the type away from struct to integral
15057                     // and no normalizing
15058                     op2 = impFixupStructReturnType(op2, retClsHnd);
15059                 }
15060                 else
15061                 {
15062                     // Do we have to normalize?
15063                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15064                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15065                         fgCastNeeded(op2, fncRealRetType))
15066                     {
15067                         // Small-typed return values are normalized by the callee
15068                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15069                     }
15070                 }
15071
15072                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15073                 {
15074                     assert(info.compRetNativeType != TYP_VOID &&
15075                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15076
15077                     // This is a bit of a workaround...
15078                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15079                     // not a struct (for example, the struct is composed of exactly one int, and the native
15080                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15081                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15082                     // to the *native* return type), and at least one of the return blocks is the result of
15083                     // a call, then we have a problem. The situation is like this (from a failed test case):
15084                     //
15085                     // inliner:
15086                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15087                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15088                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15089                     //
15090                     // inlinee:
15091                     //      ...
15092                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15093                     //      ret
15094                     //      ...
15095                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15096                     //      object&, class System.Func`1<!!0>)
15097                     //      ret
15098                     //
15099                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15100                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15101                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15102                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15103                     //
15104                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15105                     // native return type, which is what it will be set to eventually. We generate the
15106                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15107                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15108
15109                     bool restoreType = false;
15110                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15111                     {
15112                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15113                         op2->gtType = info.compRetNativeType;
15114                         restoreType = true;
15115                     }
15116
15117                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15118                                      (unsigned)CHECK_SPILL_ALL);
15119
15120                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15121
15122                     if (restoreType)
15123                     {
15124                         op2->gtType = TYP_STRUCT; // restore it to what it was
15125                     }
15126
15127                     op2 = tmpOp2;
15128
15129 #ifdef DEBUG
15130                     if (impInlineInfo->retExpr)
15131                     {
15132                         // Some other block(s) have seen the CEE_RET first.
15133                         // Better they spilled to the same temp.
15134                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15135                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15136                     }
15137 #endif
15138                 }
15139
15140 #ifdef DEBUG
15141                 if (verbose)
15142                 {
15143                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15144                     gtDispTree(op2);
15145                 }
15146 #endif
15147
15148                 // Report the return expression
15149                 impInlineInfo->retExpr = op2;
15150             }
15151             else
15152             {
15153                 // compRetNativeType is TYP_STRUCT.
15154                 // This implies that struct return via RetBuf arg or multi-reg struct return
15155
15156                 GenTreePtr iciCall = impInlineInfo->iciCall;
15157                 assert(iciCall->gtOper == GT_CALL);
15158
15159                 // Assign the inlinee return into a spill temp.
15160                 // spill temp only exists if there are multiple return points
15161                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15162                 {
15163                     // in this case we have to insert multiple struct copies to the temp
15164                     // and the retexpr is just the temp.
15165                     assert(info.compRetNativeType != TYP_VOID);
15166                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15167
15168                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15169                                      (unsigned)CHECK_SPILL_ALL);
15170                 }
15171
15172 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15173 #if defined(_TARGET_ARM_)
15174                 // TODO-ARM64-NYI: HFA
15175                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15176                 // next ifdefs could be refactored in a single method with the ifdef inside.
15177                 if (IsHfa(retClsHnd))
15178                 {
15179 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15180 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15181                 ReturnTypeDesc retTypeDesc;
15182                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15183                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15184
15185                 if (retRegCount != 0)
15186                 {
15187                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15188                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15189                     // max allowed.)
15190                     assert(retRegCount == MAX_RET_REG_COUNT);
15191                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15192                     CLANG_FORMAT_COMMENT_ANCHOR;
15193 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15194
15195                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15196                     {
15197                         if (!impInlineInfo->retExpr)
15198                         {
15199 #if defined(_TARGET_ARM_)
15200                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15201 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15202                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15203                             impInlineInfo->retExpr =
15204                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15205 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15206                         }
15207                     }
15208                     else
15209                     {
15210                         impInlineInfo->retExpr = op2;
15211                     }
15212                 }
15213                 else
15214 #elif defined(_TARGET_ARM64_)
15215                 ReturnTypeDesc retTypeDesc;
15216                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15217                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15218
15219                 if (retRegCount != 0)
15220                 {
15221                     assert(!iciCall->AsCall()->HasRetBufArg());
15222                     assert(retRegCount >= 2);
15223                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15224                     {
15225                         if (!impInlineInfo->retExpr)
15226                         {
15227                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15228                             impInlineInfo->retExpr =
15229                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15230                         }
15231                     }
15232                     else
15233                     {
15234                         impInlineInfo->retExpr = op2;
15235                     }
15236                 }
15237                 else
15238 #endif // defined(_TARGET_ARM64_)
15239                 {
15240                     assert(iciCall->AsCall()->HasRetBufArg());
15241                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15242                     // spill temp only exists if there are multiple return points
15243                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15244                     {
15245                         // if this is the first return we have seen set the retExpr
15246                         if (!impInlineInfo->retExpr)
15247                         {
15248                             impInlineInfo->retExpr =
15249                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15250                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15251                         }
15252                     }
15253                     else
15254                     {
15255                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15256                     }
15257                 }
15258             }
15259         }
15260     }
15261
15262     if (compIsForInlining())
15263     {
15264         return true;
15265     }
15266
15267     if (info.compRetType == TYP_VOID)
15268     {
15269         // return void
15270         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15271     }
15272     else if (info.compRetBuffArg != BAD_VAR_NUM)
15273     {
15274         // Assign value to return buff (first param)
15275         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15276
15277         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15278         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15279
15280         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15281         CLANG_FORMAT_COMMENT_ANCHOR;
15282
15283 #if defined(_TARGET_AMD64_)
15284
15285         // x64 (System V and Win64) calling convention requires to
15286         // return the implicit return buffer explicitly (in RAX).
15287         // Change the return type to be BYREF.
15288         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15289 #else  // !defined(_TARGET_AMD64_)
15290         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15291         // In such case the return value of the function is changed to BYREF.
15292         // If profiler hook is not needed the return type of the function is TYP_VOID.
15293         if (compIsProfilerHookNeeded())
15294         {
15295             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15296         }
15297         else
15298         {
15299             // return void
15300             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15301         }
15302 #endif // !defined(_TARGET_AMD64_)
15303     }
15304     else if (varTypeIsStruct(info.compRetType))
15305     {
15306 #if !FEATURE_MULTIREG_RET
15307         // For both ARM architectures the HFA native types are maintained as structs.
15308         // Also on System V AMD64 the multireg structs returns are also left as structs.
15309         noway_assert(info.compRetNativeType != TYP_STRUCT);
15310 #endif
15311         op2 = impFixupStructReturnType(op2, retClsHnd);
15312         // return op2
15313         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15314     }
15315     else
15316     {
15317         // return op2
15318         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15319     }
15320
15321     // We must have imported a tailcall and jumped to RET
15322     if (prefixFlags & PREFIX_TAILCALL)
15323     {
15324 #ifndef _TARGET_AMD64_
15325         // Jit64 compat:
15326         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15327         //      tail.call
15328         //      pop
15329         //      ret
15330         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15331 #endif
15332
15333         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15334
15335         // impImportCall() would have already appended TYP_VOID calls
15336         if (info.compRetType == TYP_VOID)
15337         {
15338             return true;
15339         }
15340     }
15341
15342     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15343 #ifdef DEBUG
15344     // Remember at which BC offset the tree was finished
15345     impNoteLastILoffs();
15346 #endif
15347     return true;
15348 }
15349
15350 /*****************************************************************************
15351  *  Mark the block as unimported.
15352  *  Note that the caller is responsible for calling impImportBlockPending(),
15353  *  with the appropriate stack-state
15354  */
15355
15356 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15357 {
15358 #ifdef DEBUG
15359     if (verbose && (block->bbFlags & BBF_IMPORTED))
15360     {
15361         printf("\nBB%02u will be reimported\n", block->bbNum);
15362     }
15363 #endif
15364
15365     block->bbFlags &= ~BBF_IMPORTED;
15366 }
15367
15368 /*****************************************************************************
15369  *  Mark the successors of the given block as unimported.
15370  *  Note that the caller is responsible for calling impImportBlockPending()
15371  *  for all the successors, with the appropriate stack-state.
15372  */
15373
15374 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15375 {
15376     for (unsigned i = 0; i < block->NumSucc(); i++)
15377     {
15378         impReimportMarkBlock(block->GetSucc(i));
15379     }
15380 }
15381
15382 /*****************************************************************************
15383  *
15384  *  Filter wrapper to handle only passed in exception code
15385  *  from it).
15386  */
15387
15388 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15389 {
15390     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15391     {
15392         return EXCEPTION_EXECUTE_HANDLER;
15393     }
15394
15395     return EXCEPTION_CONTINUE_SEARCH;
15396 }
15397
15398 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15399 {
15400     assert(block->hasTryIndex());
15401     assert(!compIsForInlining());
15402
15403     unsigned  tryIndex = block->getTryIndex();
15404     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15405
15406     if (isTryStart)
15407     {
15408         assert(block->bbFlags & BBF_TRY_BEG);
15409
15410         // The Stack must be empty
15411         //
15412         if (block->bbStkDepth != 0)
15413         {
15414             BADCODE("Evaluation stack must be empty on entry into a try block");
15415         }
15416     }
15417
15418     // Save the stack contents, we'll need to restore it later
15419     //
15420     SavedStack blockState;
15421     impSaveStackState(&blockState, false);
15422
15423     while (HBtab != nullptr)
15424     {
15425         if (isTryStart)
15426         {
15427             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15428             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15429             //
15430             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15431             {
15432                 // We  trigger an invalid program exception here unless we have a try/fault region.
15433                 //
15434                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15435                 {
15436                     BADCODE(
15437                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15438                 }
15439                 else
15440                 {
15441                     // Allow a try/fault region to proceed.
15442                     assert(HBtab->HasFaultHandler());
15443                 }
15444             }
15445
15446             /* Recursively process the handler block */
15447             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15448
15449             //  Construct the proper verification stack state
15450             //   either empty or one that contains just
15451             //   the Exception Object that we are dealing with
15452             //
15453             verCurrentState.esStackDepth = 0;
15454
15455             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15456             {
15457                 CORINFO_CLASS_HANDLE clsHnd;
15458
15459                 if (HBtab->HasFilter())
15460                 {
15461                     clsHnd = impGetObjectClass();
15462                 }
15463                 else
15464                 {
15465                     CORINFO_RESOLVED_TOKEN resolvedToken;
15466
15467                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15468                     resolvedToken.tokenScope   = info.compScopeHnd;
15469                     resolvedToken.token        = HBtab->ebdTyp;
15470                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15471                     info.compCompHnd->resolveToken(&resolvedToken);
15472
15473                     clsHnd = resolvedToken.hClass;
15474                 }
15475
15476                 // push catch arg the stack, spill to a temp if necessary
15477                 // Note: can update HBtab->ebdHndBeg!
15478                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15479             }
15480
15481             // Queue up the handler for importing
15482             //
15483             impImportBlockPending(hndBegBB);
15484
15485             if (HBtab->HasFilter())
15486             {
15487                 /* @VERIFICATION : Ideally the end of filter state should get
15488                    propagated to the catch handler, this is an incompleteness,
15489                    but is not a security/compliance issue, since the only
15490                    interesting state is the 'thisInit' state.
15491                    */
15492
15493                 verCurrentState.esStackDepth = 0;
15494
15495                 BasicBlock* filterBB = HBtab->ebdFilter;
15496
15497                 // push catch arg the stack, spill to a temp if necessary
15498                 // Note: can update HBtab->ebdFilter!
15499                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15500
15501                 impImportBlockPending(filterBB);
15502             }
15503         }
15504         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15505         {
15506             /* Recursively process the handler block */
15507
15508             verCurrentState.esStackDepth = 0;
15509
15510             // Queue up the fault handler for importing
15511             //
15512             impImportBlockPending(HBtab->ebdHndBeg);
15513         }
15514
15515         // Now process our enclosing try index (if any)
15516         //
15517         tryIndex = HBtab->ebdEnclosingTryIndex;
15518         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15519         {
15520             HBtab = nullptr;
15521         }
15522         else
15523         {
15524             HBtab = ehGetDsc(tryIndex);
15525         }
15526     }
15527
15528     // Restore the stack contents
15529     impRestoreStackState(&blockState);
15530 }
15531
15532 //***************************************************************
15533 // Import the instructions for the given basic block.  Perform
15534 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15535 // time, or whose verification pre-state is changed.
15536
15537 #ifdef _PREFAST_
15538 #pragma warning(push)
15539 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15540 #endif
15541 void Compiler::impImportBlock(BasicBlock* block)
15542 {
15543     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15544     // handle them specially. In particular, there is no IL to import for them, but we do need
15545     // to mark them as imported and put their successors on the pending import list.
15546     if (block->bbFlags & BBF_INTERNAL)
15547     {
15548         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15549         block->bbFlags |= BBF_IMPORTED;
15550
15551         for (unsigned i = 0; i < block->NumSucc(); i++)
15552         {
15553             impImportBlockPending(block->GetSucc(i));
15554         }
15555
15556         return;
15557     }
15558
15559     bool markImport;
15560
15561     assert(block);
15562
15563     /* Make the block globaly available */
15564
15565     compCurBB = block;
15566
15567 #ifdef DEBUG
15568     /* Initialize the debug variables */
15569     impCurOpcName = "unknown";
15570     impCurOpcOffs = block->bbCodeOffs;
15571 #endif
15572
15573     /* Set the current stack state to the merged result */
15574     verResetCurrentState(block, &verCurrentState);
15575
15576     /* Now walk the code and import the IL into GenTrees */
15577
15578     struct FilterVerificationExceptionsParam
15579     {
15580         Compiler*   pThis;
15581         BasicBlock* block;
15582     };
15583     FilterVerificationExceptionsParam param;
15584
15585     param.pThis = this;
15586     param.block = block;
15587
15588     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15589     {
15590         /* @VERIFICATION : For now, the only state propagation from try
15591            to it's handler is "thisInit" state (stack is empty at start of try).
15592            In general, for state that we track in verification, we need to
15593            model the possibility that an exception might happen at any IL
15594            instruction, so we really need to merge all states that obtain
15595            between IL instructions in a try block into the start states of
15596            all handlers.
15597
15598            However we do not allow the 'this' pointer to be uninitialized when
15599            entering most kinds try regions (only try/fault are allowed to have
15600            an uninitialized this pointer on entry to the try)
15601
15602            Fortunately, the stack is thrown away when an exception
15603            leads to a handler, so we don't have to worry about that.
15604            We DO, however, have to worry about the "thisInit" state.
15605            But only for the try/fault case.
15606
15607            The only allowed transition is from TIS_Uninit to TIS_Init.
15608
15609            So for a try/fault region for the fault handler block
15610            we will merge the start state of the try begin
15611            and the post-state of each block that is part of this try region
15612         */
15613
15614         // merge the start state of the try begin
15615         //
15616         if (pParam->block->bbFlags & BBF_TRY_BEG)
15617         {
15618             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15619         }
15620
15621         pParam->pThis->impImportBlockCode(pParam->block);
15622
15623         // As discussed above:
15624         // merge the post-state of each block that is part of this try region
15625         //
15626         if (pParam->block->hasTryIndex())
15627         {
15628             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15629         }
15630     }
15631     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15632     {
15633         verHandleVerificationFailure(block DEBUGARG(false));
15634     }
15635     PAL_ENDTRY
15636
15637     if (compDonotInline())
15638     {
15639         return;
15640     }
15641
15642     assert(!compDonotInline());
15643
15644     markImport = false;
15645
15646 SPILLSTACK:
15647
15648     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15649     bool        reimportSpillClique = false;
15650     BasicBlock* tgtBlock            = nullptr;
15651
15652     /* If the stack is non-empty, we might have to spill its contents */
15653
15654     if (verCurrentState.esStackDepth != 0)
15655     {
15656         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15657                                   // on the stack, its lifetime is hard to determine, simply
15658                                   // don't reuse such temps.
15659
15660         GenTreePtr addStmt = nullptr;
15661
15662         /* Do the successors of 'block' have any other predecessors ?
15663            We do not want to do some of the optimizations related to multiRef
15664            if we can reimport blocks */
15665
15666         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15667
15668         switch (block->bbJumpKind)
15669         {
15670             case BBJ_COND:
15671
15672                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15673
15674                 assert(impTreeLast);
15675                 assert(impTreeLast->gtOper == GT_STMT);
15676                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15677
15678                 addStmt     = impTreeLast;
15679                 impTreeLast = impTreeLast->gtPrev;
15680
15681                 /* Note if the next block has more than one ancestor */
15682
15683                 multRef |= block->bbNext->bbRefs;
15684
15685                 /* Does the next block have temps assigned? */
15686
15687                 baseTmp  = block->bbNext->bbStkTempsIn;
15688                 tgtBlock = block->bbNext;
15689
15690                 if (baseTmp != NO_BASE_TMP)
15691                 {
15692                     break;
15693                 }
15694
15695                 /* Try the target of the jump then */
15696
15697                 multRef |= block->bbJumpDest->bbRefs;
15698                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15699                 tgtBlock = block->bbJumpDest;
15700                 break;
15701
15702             case BBJ_ALWAYS:
15703                 multRef |= block->bbJumpDest->bbRefs;
15704                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15705                 tgtBlock = block->bbJumpDest;
15706                 break;
15707
15708             case BBJ_NONE:
15709                 multRef |= block->bbNext->bbRefs;
15710                 baseTmp  = block->bbNext->bbStkTempsIn;
15711                 tgtBlock = block->bbNext;
15712                 break;
15713
15714             case BBJ_SWITCH:
15715
15716                 BasicBlock** jmpTab;
15717                 unsigned     jmpCnt;
15718
15719                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15720
15721                 assert(impTreeLast);
15722                 assert(impTreeLast->gtOper == GT_STMT);
15723                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15724
15725                 addStmt     = impTreeLast;
15726                 impTreeLast = impTreeLast->gtPrev;
15727
15728                 jmpCnt = block->bbJumpSwt->bbsCount;
15729                 jmpTab = block->bbJumpSwt->bbsDstTab;
15730
15731                 do
15732                 {
15733                     tgtBlock = (*jmpTab);
15734
15735                     multRef |= tgtBlock->bbRefs;
15736
15737                     // Thanks to spill cliques, we should have assigned all or none
15738                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15739                     baseTmp = tgtBlock->bbStkTempsIn;
15740                     if (multRef > 1)
15741                     {
15742                         break;
15743                     }
15744                 } while (++jmpTab, --jmpCnt);
15745
15746                 break;
15747
15748             case BBJ_CALLFINALLY:
15749             case BBJ_EHCATCHRET:
15750             case BBJ_RETURN:
15751             case BBJ_EHFINALLYRET:
15752             case BBJ_EHFILTERRET:
15753             case BBJ_THROW:
15754                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15755                 break;
15756
15757             default:
15758                 noway_assert(!"Unexpected bbJumpKind");
15759                 break;
15760         }
15761
15762         assert(multRef >= 1);
15763
15764         /* Do we have a base temp number? */
15765
15766         bool newTemps = (baseTmp == NO_BASE_TMP);
15767
15768         if (newTemps)
15769         {
15770             /* Grab enough temps for the whole stack */
15771             baseTmp = impGetSpillTmpBase(block);
15772         }
15773
15774         /* Spill all stack entries into temps */
15775         unsigned level, tempNum;
15776
15777         JITDUMP("\nSpilling stack entries into temps\n");
15778         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15779         {
15780             GenTreePtr tree = verCurrentState.esStack[level].val;
15781
15782             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15783                the other. This should merge to a byref in unverifiable code.
15784                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15785                successor would be imported assuming there was a TYP_I_IMPL on
15786                the stack. Thus the value would not get GC-tracked. Hence,
15787                change the temp to TYP_BYREF and reimport the successors.
15788                Note: We should only allow this in unverifiable code.
15789             */
15790             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15791             {
15792                 lvaTable[tempNum].lvType = TYP_BYREF;
15793                 impReimportMarkSuccessors(block);
15794                 markImport = true;
15795             }
15796
15797 #ifdef _TARGET_64BIT_
15798             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15799             {
15800                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15801                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15802                 {
15803                     // Merge the current state into the entry state of block;
15804                     // the call to verMergeEntryStates must have changed
15805                     // the entry state of the block by merging the int local var
15806                     // and the native-int stack entry.
15807                     bool changed = false;
15808                     if (verMergeEntryStates(tgtBlock, &changed))
15809                     {
15810                         impRetypeEntryStateTemps(tgtBlock);
15811                         impReimportBlockPending(tgtBlock);
15812                         assert(changed);
15813                     }
15814                     else
15815                     {
15816                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15817                         break;
15818                     }
15819                 }
15820
15821                 // Some other block in the spill clique set this to "int", but now we have "native int".
15822                 // Change the type and go back to re-import any blocks that used the wrong type.
15823                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15824                 reimportSpillClique      = true;
15825             }
15826             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15827             {
15828                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15829                 // Insert a sign-extension to "native int" so we match the clique.
15830                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15831             }
15832
15833             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15834             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15835             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15836             // behavior instead of asserting and then generating bad code (where we save/restore the
15837             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15838             // imported already, we need to change the type of the local and reimport the spill clique.
15839             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15840             // the 'byref' size.
15841             if (!tiVerificationNeeded)
15842             {
15843                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15844                 {
15845                     // Some other block in the spill clique set this to "int", but now we have "byref".
15846                     // Change the type and go back to re-import any blocks that used the wrong type.
15847                     lvaTable[tempNum].lvType = TYP_BYREF;
15848                     reimportSpillClique      = true;
15849                 }
15850                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15851                 {
15852                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15853                     // Insert a sign-extension to "native int" so we match the clique size.
15854                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15855                 }
15856             }
15857 #endif // _TARGET_64BIT_
15858
15859 #if FEATURE_X87_DOUBLES
15860             // X87 stack doesn't differentiate between float/double
15861             // so promoting is no big deal.
15862             // For everybody else keep it as float until we have a collision and then promote
15863             // Just like for x64's TYP_INT<->TYP_I_IMPL
15864
15865             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15866             {
15867                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15868             }
15869
15870 #else // !FEATURE_X87_DOUBLES
15871
15872             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15873             {
15874                 // Some other block in the spill clique set this to "float", but now we have "double".
15875                 // Change the type and go back to re-import any blocks that used the wrong type.
15876                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15877                 reimportSpillClique      = true;
15878             }
15879             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15880             {
15881                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15882                 // Insert a cast to "double" so we match the clique.
15883                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15884             }
15885
15886 #endif // FEATURE_X87_DOUBLES
15887
15888             /* If addStmt has a reference to tempNum (can only happen if we
15889                are spilling to the temps already used by a previous block),
15890                we need to spill addStmt */
15891
15892             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15893             {
15894                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15895
15896                 if (addTree->gtOper == GT_JTRUE)
15897                 {
15898                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15899                     assert(relOp->OperIsCompare());
15900
15901                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15902
15903                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15904                     {
15905                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15906                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15907                         type              = genActualType(lvaTable[temp].TypeGet());
15908                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15909                     }
15910
15911                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15912                     {
15913                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15914                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15915                         type              = genActualType(lvaTable[temp].TypeGet());
15916                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15917                     }
15918                 }
15919                 else
15920                 {
15921                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15922
15923                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15924                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15925                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15926                 }
15927             }
15928
15929             /* Spill the stack entry, and replace with the temp */
15930
15931             if (!impSpillStackEntry(level, tempNum
15932 #ifdef DEBUG
15933                                     ,
15934                                     true, "Spill Stack Entry"
15935 #endif
15936                                     ))
15937             {
15938                 if (markImport)
15939                 {
15940                     BADCODE("bad stack state");
15941                 }
15942
15943                 // Oops. Something went wrong when spilling. Bad code.
15944                 verHandleVerificationFailure(block DEBUGARG(true));
15945
15946                 goto SPILLSTACK;
15947             }
15948         }
15949
15950         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15951
15952         if (addStmt)
15953         {
15954             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15955         }
15956     }
15957
15958     // Some of the append/spill logic works on compCurBB
15959
15960     assert(compCurBB == block);
15961
15962     /* Save the tree list in the block */
15963     impEndTreeList(block);
15964
15965     // impEndTreeList sets BBF_IMPORTED on the block
15966     // We do *NOT* want to set it later than this because
15967     // impReimportSpillClique might clear it if this block is both a
15968     // predecessor and successor in the current spill clique
15969     assert(block->bbFlags & BBF_IMPORTED);
15970
15971     // If we had a int/native int, or float/double collision, we need to re-import
15972     if (reimportSpillClique)
15973     {
15974         // This will re-import all the successors of block (as well as each of their predecessors)
15975         impReimportSpillClique(block);
15976
15977         // For blocks that haven't been imported yet, we still need to mark them as pending import.
15978         for (unsigned i = 0; i < block->NumSucc(); i++)
15979         {
15980             BasicBlock* succ = block->GetSucc(i);
15981             if ((succ->bbFlags & BBF_IMPORTED) == 0)
15982             {
15983                 impImportBlockPending(succ);
15984             }
15985         }
15986     }
15987     else // the normal case
15988     {
15989         // otherwise just import the successors of block
15990
15991         /* Does this block jump to any other blocks? */
15992         for (unsigned i = 0; i < block->NumSucc(); i++)
15993         {
15994             impImportBlockPending(block->GetSucc(i));
15995         }
15996     }
15997 }
15998 #ifdef _PREFAST_
15999 #pragma warning(pop)
16000 #endif
16001
16002 /*****************************************************************************/
16003 //
16004 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16005 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16006 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16007 // (its "pre-state").
16008
16009 void Compiler::impImportBlockPending(BasicBlock* block)
16010 {
16011 #ifdef DEBUG
16012     if (verbose)
16013     {
16014         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16015     }
16016 #endif
16017
16018     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16019     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16020     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16021
16022     // If the block has not been imported, add to pending set.
16023     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16024
16025     // Initialize bbEntryState just the first time we try to add this block to the pending list
16026     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16027     // We use NULL to indicate the 'common' state to avoid memory allocation
16028     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16029         (impGetPendingBlockMember(block) == 0))
16030     {
16031         verInitBBEntryState(block, &verCurrentState);
16032         assert(block->bbStkDepth == 0);
16033         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16034         assert(addToPending);
16035         assert(impGetPendingBlockMember(block) == 0);
16036     }
16037     else
16038     {
16039         // The stack should have the same height on entry to the block from all its predecessors.
16040         if (block->bbStkDepth != verCurrentState.esStackDepth)
16041         {
16042 #ifdef DEBUG
16043             char buffer[400];
16044             sprintf_s(buffer, sizeof(buffer),
16045                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16046                       "Previous depth was %d, current depth is %d",
16047                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16048                       verCurrentState.esStackDepth);
16049             buffer[400 - 1] = 0;
16050             NO_WAY(buffer);
16051 #else
16052             NO_WAY("Block entered with different stack depths");
16053 #endif
16054         }
16055
16056         // Additionally, if we need to verify, merge the verification state.
16057         if (tiVerificationNeeded)
16058         {
16059             // Merge the current state into the entry state of block; if this does not change the entry state
16060             // by merging, do not add the block to the pending-list.
16061             bool changed = false;
16062             if (!verMergeEntryStates(block, &changed))
16063             {
16064                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16065                 addToPending = true; // We will pop it off, and check the flag set above.
16066             }
16067             else if (changed)
16068             {
16069                 addToPending = true;
16070
16071                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16072             }
16073         }
16074
16075         if (!addToPending)
16076         {
16077             return;
16078         }
16079
16080         if (block->bbStkDepth > 0)
16081         {
16082             // We need to fix the types of any spill temps that might have changed:
16083             //   int->native int, float->double, int->byref, etc.
16084             impRetypeEntryStateTemps(block);
16085         }
16086
16087         // OK, we must add to the pending list, if it's not already in it.
16088         if (impGetPendingBlockMember(block) != 0)
16089         {
16090             return;
16091         }
16092     }
16093
16094     // Get an entry to add to the pending list
16095
16096     PendingDsc* dsc;
16097
16098     if (impPendingFree)
16099     {
16100         // We can reuse one of the freed up dscs.
16101         dsc            = impPendingFree;
16102         impPendingFree = dsc->pdNext;
16103     }
16104     else
16105     {
16106         // We have to create a new dsc
16107         dsc = new (this, CMK_Unknown) PendingDsc;
16108     }
16109
16110     dsc->pdBB                 = block;
16111     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16112     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16113
16114     // Save the stack trees for later
16115
16116     if (verCurrentState.esStackDepth)
16117     {
16118         impSaveStackState(&dsc->pdSavedStack, false);
16119     }
16120
16121     // Add the entry to the pending list
16122
16123     dsc->pdNext    = impPendingList;
16124     impPendingList = dsc;
16125     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16126
16127     // Various assertions require us to now to consider the block as not imported (at least for
16128     // the final time...)
16129     block->bbFlags &= ~BBF_IMPORTED;
16130
16131 #ifdef DEBUG
16132     if (verbose && 0)
16133     {
16134         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16135     }
16136 #endif
16137 }
16138
16139 /*****************************************************************************/
16140 //
16141 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16142 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16143 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16144
16145 void Compiler::impReimportBlockPending(BasicBlock* block)
16146 {
16147     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16148
16149     assert(block->bbFlags & BBF_IMPORTED);
16150
16151     // OK, we must add to the pending list, if it's not already in it.
16152     if (impGetPendingBlockMember(block) != 0)
16153     {
16154         return;
16155     }
16156
16157     // Get an entry to add to the pending list
16158
16159     PendingDsc* dsc;
16160
16161     if (impPendingFree)
16162     {
16163         // We can reuse one of the freed up dscs.
16164         dsc            = impPendingFree;
16165         impPendingFree = dsc->pdNext;
16166     }
16167     else
16168     {
16169         // We have to create a new dsc
16170         dsc = new (this, CMK_ImpStack) PendingDsc;
16171     }
16172
16173     dsc->pdBB = block;
16174
16175     if (block->bbEntryState)
16176     {
16177         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16178         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16179         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16180     }
16181     else
16182     {
16183         dsc->pdThisPtrInit        = TIS_Bottom;
16184         dsc->pdSavedStack.ssDepth = 0;
16185         dsc->pdSavedStack.ssTrees = nullptr;
16186     }
16187
16188     // Add the entry to the pending list
16189
16190     dsc->pdNext    = impPendingList;
16191     impPendingList = dsc;
16192     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16193
16194     // Various assertions require us to now to consider the block as not imported (at least for
16195     // the final time...)
16196     block->bbFlags &= ~BBF_IMPORTED;
16197
16198 #ifdef DEBUG
16199     if (verbose && 0)
16200     {
16201         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16202     }
16203 #endif
16204 }
16205
16206 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16207 {
16208     if (comp->impBlockListNodeFreeList == nullptr)
16209     {
16210         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16211     }
16212     else
16213     {
16214         BlockListNode* res             = comp->impBlockListNodeFreeList;
16215         comp->impBlockListNodeFreeList = res->m_next;
16216         return res;
16217     }
16218 }
16219
16220 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16221 {
16222     node->m_next             = impBlockListNodeFreeList;
16223     impBlockListNodeFreeList = node;
16224 }
16225
16226 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16227 {
16228     bool toDo = true;
16229
16230     noway_assert(!fgComputePredsDone);
16231     if (!fgCheapPredsValid)
16232     {
16233         fgComputeCheapPreds();
16234     }
16235
16236     BlockListNode* succCliqueToDo = nullptr;
16237     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16238     while (toDo)
16239     {
16240         toDo = false;
16241         // Look at the successors of every member of the predecessor to-do list.
16242         while (predCliqueToDo != nullptr)
16243         {
16244             BlockListNode* node = predCliqueToDo;
16245             predCliqueToDo      = node->m_next;
16246             BasicBlock* blk     = node->m_blk;
16247             FreeBlockListNode(node);
16248
16249             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16250             {
16251                 BasicBlock* succ = blk->GetSucc(succNum);
16252                 // If it's not already in the clique, add it, and also add it
16253                 // as a member of the successor "toDo" set.
16254                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16255                 {
16256                     callback->Visit(SpillCliqueSucc, succ);
16257                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16258                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16259                     toDo           = true;
16260                 }
16261             }
16262         }
16263         // Look at the predecessors of every member of the successor to-do list.
16264         while (succCliqueToDo != nullptr)
16265         {
16266             BlockListNode* node = succCliqueToDo;
16267             succCliqueToDo      = node->m_next;
16268             BasicBlock* blk     = node->m_blk;
16269             FreeBlockListNode(node);
16270
16271             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16272             {
16273                 BasicBlock* predBlock = pred->block;
16274                 // If it's not already in the clique, add it, and also add it
16275                 // as a member of the predecessor "toDo" set.
16276                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16277                 {
16278                     callback->Visit(SpillCliquePred, predBlock);
16279                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16280                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16281                     toDo           = true;
16282                 }
16283             }
16284         }
16285     }
16286
16287     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16288     // miss walking back to include the predecessor we started from.
16289     // This most likely cause: missing or out of date bbPreds
16290     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16291 }
16292
16293 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16294 {
16295     if (predOrSucc == SpillCliqueSucc)
16296     {
16297         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16298         blk->bbStkTempsIn = m_baseTmp;
16299     }
16300     else
16301     {
16302         assert(predOrSucc == SpillCliquePred);
16303         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16304         blk->bbStkTempsOut = m_baseTmp;
16305     }
16306 }
16307
16308 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16309 {
16310     // For Preds we could be a little smarter and just find the existing store
16311     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16312     // just re-import the whole block (just like we do for successors)
16313
16314     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16315     {
16316         // If we haven't imported this block and we're not going to (because it isn't on
16317         // the pending list) then just ignore it for now.
16318
16319         // This block has either never been imported (EntryState == NULL) or it failed
16320         // verification. Neither state requires us to force it to be imported now.
16321         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16322         return;
16323     }
16324
16325     // For successors we have a valid verCurrentState, so just mark them for reimport
16326     // the 'normal' way
16327     // Unlike predecessors, we *DO* need to reimport the current block because the
16328     // initial import had the wrong entry state types.
16329     // Similarly, blocks that are currently on the pending list, still need to call
16330     // impImportBlockPending to fixup their entry state.
16331     if (predOrSucc == SpillCliqueSucc)
16332     {
16333         m_pComp->impReimportMarkBlock(blk);
16334
16335         // Set the current stack state to that of the blk->bbEntryState
16336         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16337         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16338
16339         m_pComp->impImportBlockPending(blk);
16340     }
16341     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16342     {
16343         // As described above, we are only visiting predecessors so they can
16344         // add the appropriate casts, since we have already done that for the current
16345         // block, it does not need to be reimported.
16346         // Nor do we need to reimport blocks that are still pending, but not yet
16347         // imported.
16348         //
16349         // For predecessors, we have no state to seed the EntryState, so we just have
16350         // to assume the existing one is correct.
16351         // If the block is also a successor, it will get the EntryState properly
16352         // updated when it is visited as a successor in the above "if" block.
16353         assert(predOrSucc == SpillCliquePred);
16354         m_pComp->impReimportBlockPending(blk);
16355     }
16356 }
16357
16358 // Re-type the incoming lclVar nodes to match the varDsc.
16359 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16360 {
16361     if (blk->bbEntryState != nullptr)
16362     {
16363         EntryState* es = blk->bbEntryState;
16364         for (unsigned level = 0; level < es->esStackDepth; level++)
16365         {
16366             GenTreePtr tree = es->esStack[level].val;
16367             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16368             {
16369                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16370                 noway_assert(lclNum < lvaCount);
16371                 LclVarDsc* varDsc              = lvaTable + lclNum;
16372                 es->esStack[level].val->gtType = varDsc->TypeGet();
16373             }
16374         }
16375     }
16376 }
16377
16378 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16379 {
16380     if (block->bbStkTempsOut != NO_BASE_TMP)
16381     {
16382         return block->bbStkTempsOut;
16383     }
16384
16385 #ifdef DEBUG
16386     if (verbose)
16387     {
16388         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16389     }
16390 #endif // DEBUG
16391
16392     // Otherwise, choose one, and propagate to all members of the spill clique.
16393     // Grab enough temps for the whole stack.
16394     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16395     SetSpillTempsBase callback(baseTmp);
16396
16397     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16398     // to one spill clique, and similarly can only be the sucessor to one spill clique
16399     impWalkSpillCliqueFromPred(block, &callback);
16400
16401     return baseTmp;
16402 }
16403
16404 void Compiler::impReimportSpillClique(BasicBlock* block)
16405 {
16406 #ifdef DEBUG
16407     if (verbose)
16408     {
16409         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16410     }
16411 #endif // DEBUG
16412
16413     // If we get here, it is because this block is already part of a spill clique
16414     // and one predecessor had an outgoing live stack slot of type int, and this
16415     // block has an outgoing live stack slot of type native int.
16416     // We need to reset these before traversal because they have already been set
16417     // by the previous walk to determine all the members of the spill clique.
16418     impInlineRoot()->impSpillCliquePredMembers.Reset();
16419     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16420
16421     ReimportSpillClique callback(this);
16422
16423     impWalkSpillCliqueFromPred(block, &callback);
16424 }
16425
16426 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16427 // a copy of "srcState", cloning tree pointers as required.
16428 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16429 {
16430     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16431     {
16432         block->bbEntryState = nullptr;
16433         return;
16434     }
16435
16436     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16437
16438     // block->bbEntryState.esRefcount = 1;
16439
16440     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16441     block->bbEntryState->thisInitialized = TIS_Bottom;
16442
16443     if (srcState->esStackDepth > 0)
16444     {
16445         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16446         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16447
16448         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16449         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16450         {
16451             GenTreePtr tree                         = srcState->esStack[level].val;
16452             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16453         }
16454     }
16455
16456     if (verTrackObjCtorInitState)
16457     {
16458         verSetThisInit(block, srcState->thisInitialized);
16459     }
16460
16461     return;
16462 }
16463
16464 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16465 {
16466     assert(tis != TIS_Bottom); // Precondition.
16467     if (block->bbEntryState == nullptr)
16468     {
16469         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16470     }
16471
16472     block->bbEntryState->thisInitialized = tis;
16473 }
16474
16475 /*
16476  * Resets the current state to the state at the start of the basic block
16477  */
16478 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16479 {
16480
16481     if (block->bbEntryState == nullptr)
16482     {
16483         destState->esStackDepth    = 0;
16484         destState->thisInitialized = TIS_Bottom;
16485         return;
16486     }
16487
16488     destState->esStackDepth = block->bbEntryState->esStackDepth;
16489
16490     if (destState->esStackDepth > 0)
16491     {
16492         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16493
16494         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16495     }
16496
16497     destState->thisInitialized = block->bbThisOnEntry();
16498
16499     return;
16500 }
16501
16502 ThisInitState BasicBlock::bbThisOnEntry()
16503 {
16504     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16505 }
16506
16507 unsigned BasicBlock::bbStackDepthOnEntry()
16508 {
16509     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16510 }
16511
16512 void BasicBlock::bbSetStack(void* stackBuffer)
16513 {
16514     assert(bbEntryState);
16515     assert(stackBuffer);
16516     bbEntryState->esStack = (StackEntry*)stackBuffer;
16517 }
16518
16519 StackEntry* BasicBlock::bbStackOnEntry()
16520 {
16521     assert(bbEntryState);
16522     return bbEntryState->esStack;
16523 }
16524
16525 void Compiler::verInitCurrentState()
16526 {
16527     verTrackObjCtorInitState        = FALSE;
16528     verCurrentState.thisInitialized = TIS_Bottom;
16529
16530     if (tiVerificationNeeded)
16531     {
16532         // Track this ptr initialization
16533         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16534         {
16535             verTrackObjCtorInitState        = TRUE;
16536             verCurrentState.thisInitialized = TIS_Uninit;
16537         }
16538     }
16539
16540     // initialize stack info
16541
16542     verCurrentState.esStackDepth = 0;
16543     assert(verCurrentState.esStack != nullptr);
16544
16545     // copy current state to entry state of first BB
16546     verInitBBEntryState(fgFirstBB, &verCurrentState);
16547 }
16548
16549 Compiler* Compiler::impInlineRoot()
16550 {
16551     if (impInlineInfo == nullptr)
16552     {
16553         return this;
16554     }
16555     else
16556     {
16557         return impInlineInfo->InlineRoot;
16558     }
16559 }
16560
16561 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16562 {
16563     if (predOrSucc == SpillCliquePred)
16564     {
16565         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16566     }
16567     else
16568     {
16569         assert(predOrSucc == SpillCliqueSucc);
16570         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16571     }
16572 }
16573
16574 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16575 {
16576     if (predOrSucc == SpillCliquePred)
16577     {
16578         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16579     }
16580     else
16581     {
16582         assert(predOrSucc == SpillCliqueSucc);
16583         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16584     }
16585 }
16586
16587 /*****************************************************************************
16588  *
16589  *  Convert the instrs ("import") into our internal format (trees). The
16590  *  basic flowgraph has already been constructed and is passed in.
16591  */
16592
16593 void Compiler::impImport(BasicBlock* method)
16594 {
16595 #ifdef DEBUG
16596     if (verbose)
16597     {
16598         printf("*************** In impImport() for %s\n", info.compFullName);
16599     }
16600 #endif
16601
16602     /* Allocate the stack contents */
16603
16604     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16605     {
16606         /* Use local variable, don't waste time allocating on the heap */
16607
16608         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16609         verCurrentState.esStack = impSmallStack;
16610     }
16611     else
16612     {
16613         impStkSize              = info.compMaxStack;
16614         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16615     }
16616
16617     // initialize the entry state at start of method
16618     verInitCurrentState();
16619
16620     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16621     Compiler* inlineRoot = impInlineRoot();
16622     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16623     {
16624         // We have initialized these previously, but to size 0.  Make them larger.
16625         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16626         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16627         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16628     }
16629     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16630     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16631     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16632     impBlockListNodeFreeList = nullptr;
16633
16634 #ifdef DEBUG
16635     impLastILoffsStmt   = nullptr;
16636     impNestedStackSpill = false;
16637 #endif
16638     impBoxTemp = BAD_VAR_NUM;
16639
16640     impPendingList = impPendingFree = nullptr;
16641
16642     /* Add the entry-point to the worker-list */
16643
16644     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16645     // from EH normalization.
16646     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16647     // out.
16648     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16649     {
16650         // Treat these as imported.
16651         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16652         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16653         method->bbFlags |= BBF_IMPORTED;
16654     }
16655
16656     impImportBlockPending(method);
16657
16658     /* Import blocks in the worker-list until there are no more */
16659
16660     while (impPendingList)
16661     {
16662         /* Remove the entry at the front of the list */
16663
16664         PendingDsc* dsc = impPendingList;
16665         impPendingList  = impPendingList->pdNext;
16666         impSetPendingBlockMember(dsc->pdBB, 0);
16667
16668         /* Restore the stack state */
16669
16670         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16671         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16672         if (verCurrentState.esStackDepth)
16673         {
16674             impRestoreStackState(&dsc->pdSavedStack);
16675         }
16676
16677         /* Add the entry to the free list for reuse */
16678
16679         dsc->pdNext    = impPendingFree;
16680         impPendingFree = dsc;
16681
16682         /* Now import the block */
16683
16684         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16685         {
16686
16687 #ifdef _TARGET_64BIT_
16688             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16689             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16690             // method for further explanation on why we raise this exception instead of making the jitted
16691             // code throw the verification exception during execution.
16692             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16693             {
16694                 BADCODE("Basic block marked as not verifiable");
16695             }
16696             else
16697 #endif // _TARGET_64BIT_
16698             {
16699                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16700                 impEndTreeList(dsc->pdBB);
16701             }
16702         }
16703         else
16704         {
16705             impImportBlock(dsc->pdBB);
16706
16707             if (compDonotInline())
16708             {
16709                 return;
16710             }
16711             if (compIsForImportOnly() && !tiVerificationNeeded)
16712             {
16713                 return;
16714             }
16715         }
16716     }
16717
16718 #ifdef DEBUG
16719     if (verbose && info.compXcptnsCount)
16720     {
16721         printf("\nAfter impImport() added block for try,catch,finally");
16722         fgDispBasicBlocks();
16723         printf("\n");
16724     }
16725
16726     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16727     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16728     {
16729         block->bbFlags &= ~BBF_VISITED;
16730     }
16731 #endif
16732
16733     assert(!compIsForInlining() || !tiVerificationNeeded);
16734 }
16735
16736 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16737 // The invariant here is that if it's not a ref or a method and has a class handle
16738 // it's a valuetype
16739 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16740 {
16741     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16742     {
16743         return true;
16744     }
16745     else
16746     {
16747         return false;
16748     }
16749 }
16750
16751 /*****************************************************************************
16752  *  Check to see if the tree is the address of a local or
16753     the address of a field in a local.
16754
16755     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16756
16757  */
16758
16759 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16760 {
16761     if (tree->gtOper != GT_ADDR)
16762     {
16763         return FALSE;
16764     }
16765
16766     GenTreePtr op = tree->gtOp.gtOp1;
16767     while (op->gtOper == GT_FIELD)
16768     {
16769         op = op->gtField.gtFldObj;
16770         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16771         {
16772             op = op->gtOp.gtOp1;
16773         }
16774         else
16775         {
16776             return false;
16777         }
16778     }
16779
16780     if (op->gtOper == GT_LCL_VAR)
16781     {
16782         *lclVarTreeOut = op;
16783         return TRUE;
16784     }
16785     else
16786     {
16787         return FALSE;
16788     }
16789 }
16790
16791 //------------------------------------------------------------------------
16792 // impMakeDiscretionaryInlineObservations: make observations that help
16793 // determine the profitability of a discretionary inline
16794 //
16795 // Arguments:
16796 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16797 //    inlineResult -- InlineResult accumulating information about this inline
16798 //
16799 // Notes:
16800 //    If inlining or prejitting the root, this method also makes
16801 //    various observations about the method that factor into inline
16802 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16803
16804 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16805 {
16806     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16807            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16808            );
16809
16810     // If we're really inlining, we should just have one result in play.
16811     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16812
16813     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16814     // to the trouble of estimating the native code size. Even if it did, it
16815     // shouldn't be relying on the result of this method.
16816     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16817
16818     // Note if the caller contains NEWOBJ or NEWARR.
16819     Compiler* rootCompiler = impInlineRoot();
16820
16821     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16822     {
16823         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16824     }
16825
16826     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16827     {
16828         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16829     }
16830
16831     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16832     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16833
16834     if (isSpecialMethod)
16835     {
16836         if (calleeIsStatic)
16837         {
16838             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16839         }
16840         else
16841         {
16842             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16843         }
16844     }
16845     else if (!calleeIsStatic)
16846     {
16847         // Callee is an instance method.
16848         //
16849         // Check if the callee has the same 'this' as the root.
16850         if (pInlineInfo != nullptr)
16851         {
16852             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16853             assert(thisArg);
16854             bool isSameThis = impIsThis(thisArg);
16855             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16856         }
16857     }
16858
16859     // Note if the callee's class is a promotable struct
16860     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16861     {
16862         lvaStructPromotionInfo structPromotionInfo;
16863         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16864         if (structPromotionInfo.canPromote)
16865         {
16866             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16867         }
16868     }
16869
16870 #ifdef FEATURE_SIMD
16871
16872     // Note if this method is has SIMD args or return value
16873     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16874     {
16875         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16876     }
16877
16878 #endif // FEATURE_SIMD
16879
16880     // Roughly classify callsite frequency.
16881     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16882
16883     // If this is a prejit root, or a maximally hot block...
16884     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16885     {
16886         frequency = InlineCallsiteFrequency::HOT;
16887     }
16888     // No training data.  Look for loop-like things.
16889     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16890     // However, give it to things nearby.
16891     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16892              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16893     {
16894         frequency = InlineCallsiteFrequency::LOOP;
16895     }
16896     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16897     {
16898         frequency = InlineCallsiteFrequency::WARM;
16899     }
16900     // Now modify the multiplier based on where we're called from.
16901     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16902     {
16903         frequency = InlineCallsiteFrequency::RARE;
16904     }
16905     else
16906     {
16907         frequency = InlineCallsiteFrequency::BORING;
16908     }
16909
16910     // Also capture the block weight of the call site.  In the prejit
16911     // root case, assume there's some hot call site for this method.
16912     unsigned weight = 0;
16913
16914     if (pInlineInfo != nullptr)
16915     {
16916         weight = pInlineInfo->iciBlock->bbWeight;
16917     }
16918     else
16919     {
16920         weight = BB_MAX_WEIGHT;
16921     }
16922
16923     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16924     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16925 }
16926
16927 /*****************************************************************************
16928  This method makes STATIC inlining decision based on the IL code.
16929  It should not make any inlining decision based on the context.
16930  If forceInline is true, then the inlining decision should not depend on
16931  performance heuristics (code size, etc.).
16932  */
16933
16934 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16935                               CORINFO_METHOD_INFO*  methInfo,
16936                               bool                  forceInline,
16937                               InlineResult*         inlineResult)
16938 {
16939     unsigned codeSize = methInfo->ILCodeSize;
16940
16941     // We shouldn't have made up our minds yet...
16942     assert(!inlineResult->IsDecided());
16943
16944     if (methInfo->EHcount)
16945     {
16946         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16947         return;
16948     }
16949
16950     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16951     {
16952         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16953         return;
16954     }
16955
16956     // For now we don't inline varargs (import code can't handle it)
16957
16958     if (methInfo->args.isVarArg())
16959     {
16960         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16961         return;
16962     }
16963
16964     // Reject if it has too many locals.
16965     // This is currently an implementation limit due to fixed-size arrays in the
16966     // inline info, rather than a performance heuristic.
16967
16968     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16969
16970     if (methInfo->locals.numArgs > MAX_INL_LCLS)
16971     {
16972         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16973         return;
16974     }
16975
16976     // Make sure there aren't too many arguments.
16977     // This is currently an implementation limit due to fixed-size arrays in the
16978     // inline info, rather than a performance heuristic.
16979
16980     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16981
16982     if (methInfo->args.numArgs > MAX_INL_ARGS)
16983     {
16984         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16985         return;
16986     }
16987
16988     // Note force inline state
16989
16990     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16991
16992     // Note IL code size
16993
16994     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16995
16996     if (inlineResult->IsFailure())
16997     {
16998         return;
16999     }
17000
17001     // Make sure maxstack is not too big
17002
17003     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17004
17005     if (inlineResult->IsFailure())
17006     {
17007         return;
17008     }
17009 }
17010
17011 /*****************************************************************************
17012  */
17013
17014 void Compiler::impCheckCanInline(GenTreePtr             call,
17015                                  CORINFO_METHOD_HANDLE  fncHandle,
17016                                  unsigned               methAttr,
17017                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17018                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17019                                  InlineResult*          inlineResult)
17020 {
17021     // Either EE or JIT might throw exceptions below.
17022     // If that happens, just don't inline the method.
17023
17024     struct Param
17025     {
17026         Compiler*              pThis;
17027         GenTreePtr             call;
17028         CORINFO_METHOD_HANDLE  fncHandle;
17029         unsigned               methAttr;
17030         CORINFO_CONTEXT_HANDLE exactContextHnd;
17031         InlineResult*          result;
17032         InlineCandidateInfo**  ppInlineCandidateInfo;
17033     } param = {nullptr};
17034
17035     param.pThis                 = this;
17036     param.call                  = call;
17037     param.fncHandle             = fncHandle;
17038     param.methAttr              = methAttr;
17039     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17040     param.result                = inlineResult;
17041     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17042
17043     bool success = eeRunWithErrorTrap<Param>(
17044         [](Param* pParam) {
17045             DWORD                  dwRestrictions = 0;
17046             CorInfoInitClassResult initClassResult;
17047
17048 #ifdef DEBUG
17049             const char* methodName;
17050             const char* className;
17051             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17052
17053             if (JitConfig.JitNoInline())
17054             {
17055                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17056                 goto _exit;
17057             }
17058 #endif
17059
17060             /* Try to get the code address/size for the method */
17061
17062             CORINFO_METHOD_INFO methInfo;
17063             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17064             {
17065                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17066                 goto _exit;
17067             }
17068
17069             bool forceInline;
17070             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17071
17072             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17073
17074             if (pParam->result->IsFailure())
17075             {
17076                 assert(pParam->result->IsNever());
17077                 goto _exit;
17078             }
17079
17080             // Speculatively check if initClass() can be done.
17081             // If it can be done, we will try to inline the method. If inlining
17082             // succeeds, then we will do the non-speculative initClass() and commit it.
17083             // If this speculative call to initClass() fails, there is no point
17084             // trying to inline this method.
17085             initClassResult =
17086                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17087                                                            pParam->exactContextHnd /* context */,
17088                                                            TRUE /* speculative */);
17089
17090             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17091             {
17092                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17093                 goto _exit;
17094             }
17095
17096             // Given the EE the final say in whether to inline or not.
17097             // This should be last since for verifiable code, this can be expensive
17098
17099             /* VM Inline check also ensures that the method is verifiable if needed */
17100             CorInfoInline vmResult;
17101             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17102                                                                   &dwRestrictions);
17103
17104             if (vmResult == INLINE_FAIL)
17105             {
17106                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17107             }
17108             else if (vmResult == INLINE_NEVER)
17109             {
17110                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17111             }
17112
17113             if (pParam->result->IsFailure())
17114             {
17115                 // Make sure not to report this one.  It was already reported by the VM.
17116                 pParam->result->SetReported();
17117                 goto _exit;
17118             }
17119
17120             // check for unsupported inlining restrictions
17121             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17122
17123             if (dwRestrictions & INLINE_SAME_THIS)
17124             {
17125                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17126                 assert(thisArg);
17127
17128                 if (!pParam->pThis->impIsThis(thisArg))
17129                 {
17130                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17131                     goto _exit;
17132                 }
17133             }
17134
17135             /* Get the method properties */
17136
17137             CORINFO_CLASS_HANDLE clsHandle;
17138             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17139             unsigned clsAttr;
17140             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17141
17142             /* Get the return type */
17143
17144             var_types fncRetType;
17145             fncRetType = pParam->call->TypeGet();
17146
17147 #ifdef DEBUG
17148             var_types fncRealRetType;
17149             fncRealRetType = JITtype2varType(methInfo.args.retType);
17150
17151             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17152                    // <BUGNUM> VSW 288602 </BUGNUM>
17153                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17154                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17155                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17156 #endif
17157
17158             //
17159             // Allocate an InlineCandidateInfo structure
17160             //
17161             InlineCandidateInfo* pInfo;
17162             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17163
17164             pInfo->dwRestrictions  = dwRestrictions;
17165             pInfo->methInfo        = methInfo;
17166             pInfo->methAttr        = pParam->methAttr;
17167             pInfo->clsHandle       = clsHandle;
17168             pInfo->clsAttr         = clsAttr;
17169             pInfo->fncRetType      = fncRetType;
17170             pInfo->exactContextHnd = pParam->exactContextHnd;
17171             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17172             pInfo->initClassResult = initClassResult;
17173
17174             *(pParam->ppInlineCandidateInfo) = pInfo;
17175
17176         _exit:;
17177         },
17178         &param);
17179     if (!success)
17180     {
17181         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17182     }
17183 }
17184
17185 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17186                                       GenTreePtr    curArgVal,
17187                                       unsigned      argNum,
17188                                       InlineResult* inlineResult)
17189 {
17190     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17191
17192     if (curArgVal->gtOper == GT_MKREFANY)
17193     {
17194         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17195         return;
17196     }
17197
17198     inlCurArgInfo->argNode = curArgVal;
17199
17200     GenTreePtr lclVarTree;
17201     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17202     {
17203         inlCurArgInfo->argIsByRefToStructLocal = true;
17204 #ifdef FEATURE_SIMD
17205         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17206         {
17207             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17208         }
17209 #endif // FEATURE_SIMD
17210     }
17211
17212     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17213     {
17214         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17215         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17216     }
17217
17218     if (curArgVal->gtOper == GT_LCL_VAR)
17219     {
17220         inlCurArgInfo->argIsLclVar = true;
17221
17222         /* Remember the "original" argument number */
17223         curArgVal->gtLclVar.gtLclILoffs = argNum;
17224     }
17225
17226     if ((curArgVal->OperKind() & GTK_CONST) ||
17227         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17228     {
17229         inlCurArgInfo->argIsInvariant = true;
17230         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17231         {
17232             /* Abort, but do not mark as not inlinable */
17233             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17234             return;
17235         }
17236     }
17237
17238     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17239     {
17240         inlCurArgInfo->argHasLdargaOp = true;
17241     }
17242
17243 #ifdef DEBUG
17244     if (verbose)
17245     {
17246         if (inlCurArgInfo->argIsThis)
17247         {
17248             printf("thisArg:");
17249         }
17250         else
17251         {
17252             printf("\nArgument #%u:", argNum);
17253         }
17254         if (inlCurArgInfo->argIsLclVar)
17255         {
17256             printf(" is a local var");
17257         }
17258         if (inlCurArgInfo->argIsInvariant)
17259         {
17260             printf(" is a constant");
17261         }
17262         if (inlCurArgInfo->argHasGlobRef)
17263         {
17264             printf(" has global refs");
17265         }
17266         if (inlCurArgInfo->argHasSideEff)
17267         {
17268             printf(" has side effects");
17269         }
17270         if (inlCurArgInfo->argHasLdargaOp)
17271         {
17272             printf(" has ldarga effect");
17273         }
17274         if (inlCurArgInfo->argHasStargOp)
17275         {
17276             printf(" has starg effect");
17277         }
17278         if (inlCurArgInfo->argIsByRefToStructLocal)
17279         {
17280             printf(" is byref to a struct local");
17281         }
17282
17283         printf("\n");
17284         gtDispTree(curArgVal);
17285         printf("\n");
17286     }
17287 #endif
17288 }
17289
17290 /*****************************************************************************
17291  *
17292  */
17293
17294 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17295 {
17296     assert(!compIsForInlining());
17297
17298     GenTreePtr           call         = pInlineInfo->iciCall;
17299     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17300     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17301     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17302     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17303     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17304
17305     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17306
17307     /* init the argument stuct */
17308
17309     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17310
17311     /* Get hold of the 'this' pointer and the argument list proper */
17312
17313     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17314     GenTreePtr argList = call->gtCall.gtCallArgs;
17315     unsigned   argCnt  = 0; // Count of the arguments
17316
17317     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17318
17319     if (thisArg)
17320     {
17321         inlArgInfo[0].argIsThis = true;
17322
17323         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17324
17325         if (inlineResult->IsFailure())
17326         {
17327             return;
17328         }
17329
17330         /* Increment the argument count */
17331         argCnt++;
17332     }
17333
17334     /* Record some information about each of the arguments */
17335     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17336
17337 #if USER_ARGS_COME_LAST
17338     unsigned typeCtxtArg = thisArg ? 1 : 0;
17339 #else  // USER_ARGS_COME_LAST
17340     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17341 #endif // USER_ARGS_COME_LAST
17342
17343     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17344     {
17345         if (argTmp == argList && hasRetBuffArg)
17346         {
17347             continue;
17348         }
17349
17350         // Ignore the type context argument
17351         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17352         {
17353             typeCtxtArg = 0xFFFFFFFF;
17354             continue;
17355         }
17356
17357         assert(argTmp->gtOper == GT_LIST);
17358         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17359
17360         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17361
17362         if (inlineResult->IsFailure())
17363         {
17364             return;
17365         }
17366
17367         /* Increment the argument count */
17368         argCnt++;
17369     }
17370
17371     /* Make sure we got the arg number right */
17372     assert(argCnt == methInfo->args.totalILArgs());
17373
17374 #ifdef FEATURE_SIMD
17375     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17376 #endif // FEATURE_SIMD
17377
17378     /* We have typeless opcodes, get type information from the signature */
17379
17380     if (thisArg)
17381     {
17382         var_types sigType;
17383
17384         if (clsAttr & CORINFO_FLG_VALUECLASS)
17385         {
17386             sigType = TYP_BYREF;
17387         }
17388         else
17389         {
17390             sigType = TYP_REF;
17391         }
17392
17393         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17394         lclVarInfo[0].lclHasLdlocaOp = false;
17395
17396 #ifdef FEATURE_SIMD
17397         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17398         // the inlining multiplier) for anything in that assembly.
17399         // But we only need to normalize it if it is a TYP_STRUCT
17400         // (which we need to do even if we have already set foundSIMDType).
17401         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17402         {
17403             if (sigType == TYP_STRUCT)
17404             {
17405                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17406             }
17407             foundSIMDType = true;
17408         }
17409 #endif // FEATURE_SIMD
17410         lclVarInfo[0].lclTypeInfo = sigType;
17411
17412         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17413                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17414                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17415
17416         if (genActualType(thisArg->gtType) != genActualType(sigType))
17417         {
17418             if (sigType == TYP_REF)
17419             {
17420                 /* The argument cannot be bashed into a ref (see bug 750871) */
17421                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17422                 return;
17423             }
17424
17425             /* This can only happen with byrefs <-> ints/shorts */
17426
17427             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17428             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17429
17430             if (sigType == TYP_BYREF)
17431             {
17432                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17433             }
17434             else if (thisArg->gtType == TYP_BYREF)
17435             {
17436                 assert(sigType == TYP_I_IMPL);
17437
17438                 /* If possible change the BYREF to an int */
17439                 if (thisArg->IsVarAddr())
17440                 {
17441                     thisArg->gtType              = TYP_I_IMPL;
17442                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17443                 }
17444                 else
17445                 {
17446                     /* Arguments 'int <- byref' cannot be bashed */
17447                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17448                     return;
17449                 }
17450             }
17451         }
17452     }
17453
17454     /* Init the types of the arguments and make sure the types
17455      * from the trees match the types in the signature */
17456
17457     CORINFO_ARG_LIST_HANDLE argLst;
17458     argLst = methInfo->args.args;
17459
17460     unsigned i;
17461     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17462     {
17463         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17464
17465         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17466
17467 #ifdef FEATURE_SIMD
17468         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17469         {
17470             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17471             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17472             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17473             foundSIMDType = true;
17474             if (sigType == TYP_STRUCT)
17475             {
17476                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17477                 sigType              = structType;
17478             }
17479         }
17480 #endif // FEATURE_SIMD
17481
17482         lclVarInfo[i].lclTypeInfo    = sigType;
17483         lclVarInfo[i].lclHasLdlocaOp = false;
17484
17485         /* Does the tree type match the signature type? */
17486
17487         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17488
17489         if (sigType != inlArgNode->gtType)
17490         {
17491             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17492                but in bad IL cases with caller-callee signature mismatches we can see other types.
17493                Intentionally reject cases with mismatches so the jit is more flexible when
17494                encountering bad IL. */
17495
17496             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17497                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17498                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17499
17500             if (!isPlausibleTypeMatch)
17501             {
17502                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17503                 return;
17504             }
17505
17506             /* Is it a narrowing or widening cast?
17507              * Widening casts are ok since the value computed is already
17508              * normalized to an int (on the IL stack) */
17509
17510             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17511             {
17512                 if (sigType == TYP_BYREF)
17513                 {
17514                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17515                 }
17516                 else if (inlArgNode->gtType == TYP_BYREF)
17517                 {
17518                     assert(varTypeIsIntOrI(sigType));
17519
17520                     /* If possible bash the BYREF to an int */
17521                     if (inlArgNode->IsVarAddr())
17522                     {
17523                         inlArgNode->gtType           = TYP_I_IMPL;
17524                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17525                     }
17526                     else
17527                     {
17528                         /* Arguments 'int <- byref' cannot be changed */
17529                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17530                         return;
17531                     }
17532                 }
17533                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17534                 {
17535                     /* Narrowing cast */
17536
17537                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17538                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17539                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17540                     {
17541                         /* We don't need to insert a cast here as the variable
17542                            was assigned a normalized value of the right type */
17543
17544                         continue;
17545                     }
17546
17547                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17548
17549                     inlArgInfo[i].argIsLclVar = false;
17550
17551                     /* Try to fold the node in case we have constant arguments */
17552
17553                     if (inlArgInfo[i].argIsInvariant)
17554                     {
17555                         inlArgNode            = gtFoldExprConst(inlArgNode);
17556                         inlArgInfo[i].argNode = inlArgNode;
17557                         assert(inlArgNode->OperIsConst());
17558                     }
17559                 }
17560 #ifdef _TARGET_64BIT_
17561                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17562                 {
17563                     // This should only happen for int -> native int widening
17564                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17565
17566                     inlArgInfo[i].argIsLclVar = false;
17567
17568                     /* Try to fold the node in case we have constant arguments */
17569
17570                     if (inlArgInfo[i].argIsInvariant)
17571                     {
17572                         inlArgNode            = gtFoldExprConst(inlArgNode);
17573                         inlArgInfo[i].argNode = inlArgNode;
17574                         assert(inlArgNode->OperIsConst());
17575                     }
17576                 }
17577 #endif // _TARGET_64BIT_
17578             }
17579         }
17580     }
17581
17582     /* Init the types of the local variables */
17583
17584     CORINFO_ARG_LIST_HANDLE localsSig;
17585     localsSig = methInfo->locals.args;
17586
17587     for (i = 0; i < methInfo->locals.numArgs; i++)
17588     {
17589         bool      isPinned;
17590         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17591
17592         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17593         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17594         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17595
17596         if (isPinned)
17597         {
17598             // Pinned locals may cause inlines to fail.
17599             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17600             if (inlineResult->IsFailure())
17601             {
17602                 return;
17603             }
17604         }
17605
17606         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17607
17608         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17609         // out on the inline.
17610         if (type == TYP_STRUCT)
17611         {
17612             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17613             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17614             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17615             {
17616                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17617                 if (inlineResult->IsFailure())
17618                 {
17619                     return;
17620                 }
17621
17622                 // Do further notification in the case where the call site is rare; some policies do
17623                 // not track the relative hotness of call sites for "always" inline cases.
17624                 if (pInlineInfo->iciBlock->isRunRarely())
17625                 {
17626                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17627                     if (inlineResult->IsFailure())
17628                     {
17629
17630                         return;
17631                     }
17632                 }
17633             }
17634         }
17635
17636         localsSig = info.compCompHnd->getArgNext(localsSig);
17637
17638 #ifdef FEATURE_SIMD
17639         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17640         {
17641             foundSIMDType = true;
17642             if (featureSIMD && type == TYP_STRUCT)
17643             {
17644                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17645                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17646             }
17647         }
17648 #endif // FEATURE_SIMD
17649     }
17650
17651 #ifdef FEATURE_SIMD
17652     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17653     {
17654         foundSIMDType = true;
17655     }
17656     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17657 #endif // FEATURE_SIMD
17658 }
17659
17660 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17661 {
17662     assert(compIsForInlining());
17663
17664     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17665
17666     if (tmpNum == BAD_VAR_NUM)
17667     {
17668         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17669
17670         // The lifetime of this local might span multiple BBs.
17671         // So it is a long lifetime local.
17672         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17673
17674         lvaTable[tmpNum].lvType = lclTyp;
17675         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17676         {
17677             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17678         }
17679
17680         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17681         {
17682             lvaTable[tmpNum].lvPinned = 1;
17683
17684             if (!impInlineInfo->hasPinnedLocals)
17685             {
17686                 // If the inlinee returns a value, use a spill temp
17687                 // for the return value to ensure that even in case
17688                 // where the return expression refers to one of the
17689                 // pinned locals, we can unpin the local right after
17690                 // the inlined method body.
17691                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17692                 {
17693                     lvaInlineeReturnSpillTemp =
17694                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17695                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17696                 }
17697             }
17698
17699             impInlineInfo->hasPinnedLocals = true;
17700         }
17701
17702         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17703         {
17704             if (varTypeIsStruct(lclTyp))
17705             {
17706                 lvaSetStruct(tmpNum,
17707                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17708                              true /* unsafe value cls check */);
17709             }
17710             else
17711             {
17712                 // This is a wrapped primitive.  Make sure the verstate knows that
17713                 lvaTable[tmpNum].lvVerTypeInfo =
17714                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17715             }
17716         }
17717     }
17718
17719     return tmpNum;
17720 }
17721
17722 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17723 // Only use this method for the arguments of the inlinee method.
17724 // !!! Do not use it for the locals of the inlinee method. !!!!
17725
17726 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17727 {
17728     /* Get the argument type */
17729     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17730
17731     GenTreePtr op1 = nullptr;
17732
17733     // constant or address of local
17734     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17735     {
17736         /* Clone the constant. Note that we cannot directly use argNode
17737         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17738         would introduce aliasing between inlArgInfo[].argNode and
17739         impInlineExpr. Then gtFoldExpr() could change it, causing further
17740         references to the argument working off of the bashed copy. */
17741
17742         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17743         PREFIX_ASSUME(op1 != nullptr);
17744         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17745     }
17746     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17747     {
17748         /* Argument is a local variable (of the caller)
17749          * Can we re-use the passed argument node? */
17750
17751         op1                          = inlArgInfo[lclNum].argNode;
17752         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17753
17754         if (inlArgInfo[lclNum].argIsUsed)
17755         {
17756             assert(op1->gtOper == GT_LCL_VAR);
17757             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17758
17759             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17760             {
17761                 lclTyp = genActualType(lclTyp);
17762             }
17763
17764             /* Create a new lcl var node - remember the argument lclNum */
17765             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17766         }
17767     }
17768     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17769     {
17770         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17771            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17772            This way we will increase the chance for this byref to be optimized away by
17773            a subsequent "dereference" operation.
17774
17775            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17776            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17777            For example, if the caller is:
17778                 ldloca.s   V_1  // V_1 is a local struct
17779                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17780            and the callee being inlined has:
17781                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17782                     ldarga.s   ptrToInts
17783                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17784            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17785            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17786         */
17787         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17788                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17789         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17790     }
17791     else
17792     {
17793         /* Argument is a complex expression - it must be evaluated into a temp */
17794
17795         if (inlArgInfo[lclNum].argHasTmp)
17796         {
17797             assert(inlArgInfo[lclNum].argIsUsed);
17798             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17799
17800             /* Create a new lcl var node - remember the argument lclNum */
17801             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17802
17803             /* This is the second or later use of the this argument,
17804             so we have to use the temp (instead of the actual arg) */
17805             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17806         }
17807         else
17808         {
17809             /* First time use */
17810             assert(inlArgInfo[lclNum].argIsUsed == false);
17811
17812             /* Reserve a temp for the expression.
17813             * Use a large size node as we may change it later */
17814
17815             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17816
17817             lvaTable[tmpNum].lvType = lclTyp;
17818             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17819             if (inlArgInfo[lclNum].argHasLdargaOp)
17820             {
17821                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17822             }
17823
17824             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17825             {
17826                 if (varTypeIsStruct(lclTyp))
17827                 {
17828                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17829                                  true /* unsafe value cls check */);
17830                 }
17831                 else
17832                 {
17833                     // This is a wrapped primitive.  Make sure the verstate knows that
17834                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17835                 }
17836             }
17837
17838             inlArgInfo[lclNum].argHasTmp = true;
17839             inlArgInfo[lclNum].argTmpNum = tmpNum;
17840
17841             // If we require strict exception order, then arguments must
17842             // be evaluated in sequence before the body of the inlined method.
17843             // So we need to evaluate them to a temp.
17844             // Also, if arguments have global references, we need to
17845             // evaluate them to a temp before the inlined body as the
17846             // inlined body may be modifying the global ref.
17847             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17848             // if it is a struct, because it requires some additional handling.
17849
17850             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17851             {
17852                 /* Get a *LARGE* LCL_VAR node */
17853                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17854
17855                 /* Record op1 as the very first use of this argument.
17856                 If there are no further uses of the arg, we may be
17857                 able to use the actual arg node instead of the temp.
17858                 If we do see any further uses, we will clear this. */
17859                 inlArgInfo[lclNum].argBashTmpNode = op1;
17860             }
17861             else
17862             {
17863                 /* Get a small LCL_VAR node */
17864                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17865                 /* No bashing of this argument */
17866                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17867             }
17868         }
17869     }
17870
17871     /* Mark the argument as used */
17872
17873     inlArgInfo[lclNum].argIsUsed = true;
17874
17875     return op1;
17876 }
17877
17878 /******************************************************************************
17879  Is this the original "this" argument to the call being inlined?
17880
17881  Note that we do not inline methods with "starg 0", and so we do not need to
17882  worry about it.
17883 */
17884
17885 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17886 {
17887     assert(compIsForInlining());
17888     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17889 }
17890
17891 //-----------------------------------------------------------------------------
17892 // This function checks if a dereference in the inlinee can guarantee that
17893 // the "this" is non-NULL.
17894 // If we haven't hit a branch or a side effect, and we are dereferencing
17895 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17896 // then we can avoid a separate null pointer check.
17897 //
17898 // "additionalTreesToBeEvaluatedBefore"
17899 // is the set of pending trees that have not yet been added to the statement list,
17900 // and which have been removed from verCurrentState.esStack[]
17901
17902 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17903                                                                   GenTreePtr  variableBeingDereferenced,
17904                                                                   InlArgInfo* inlArgInfo)
17905 {
17906     assert(compIsForInlining());
17907     assert(opts.OptEnabled(CLFLG_INLINING));
17908
17909     BasicBlock* block = compCurBB;
17910
17911     GenTreePtr stmt;
17912     GenTreePtr expr;
17913
17914     if (block != fgFirstBB)
17915     {
17916         return FALSE;
17917     }
17918
17919     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17920     {
17921         return FALSE;
17922     }
17923
17924     if (additionalTreesToBeEvaluatedBefore &&
17925         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17926     {
17927         return FALSE;
17928     }
17929
17930     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17931     {
17932         expr = stmt->gtStmt.gtStmtExpr;
17933
17934         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17935         {
17936             return FALSE;
17937         }
17938     }
17939
17940     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17941     {
17942         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17943         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17944         {
17945             return FALSE;
17946         }
17947     }
17948
17949     return TRUE;
17950 }
17951
17952 /******************************************************************************/
17953 // Check the inlining eligibility of this GT_CALL node.
17954 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17955
17956 // Todo: find a way to record the failure reasons in the IR (or
17957 // otherwise build tree context) so when we do the inlining pass we
17958 // can capture these reasons
17959
17960 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17961                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17962                                       CORINFO_CALL_INFO*     callInfo)
17963 {
17964     // Let the strategy know there's another call
17965     impInlineRoot()->m_inlineStrategy->NoteCall();
17966
17967     if (!opts.OptEnabled(CLFLG_INLINING))
17968     {
17969         /* XXX Mon 8/18/2008
17970          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
17971          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
17972          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
17973          * figure out why we did not set MAXOPT for this compile.
17974          */
17975         assert(!compIsForInlining());
17976         return;
17977     }
17978
17979     if (compIsForImportOnly())
17980     {
17981         // Don't bother creating the inline candidate during verification.
17982         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17983         // that leads to the creation of multiple instances of Compiler.
17984         return;
17985     }
17986
17987     GenTreeCall* call = callNode->AsCall();
17988     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17989
17990     // Don't inline if not optimizing root method
17991     if (opts.compDbgCode)
17992     {
17993         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17994         return;
17995     }
17996
17997     // Don't inline if inlining into root method is disabled.
17998     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17999     {
18000         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18001         return;
18002     }
18003
18004     // Inlining candidate determination needs to honor only IL tail prefix.
18005     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18006     if (call->IsTailPrefixedCall())
18007     {
18008         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18009         return;
18010     }
18011
18012     // Tail recursion elimination takes precedence over inlining.
18013     // TODO: We may want to do some of the additional checks from fgMorphCall
18014     // here to reduce the chance we don't inline a call that won't be optimized
18015     // as a fast tail call or turned into a loop.
18016     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18017     {
18018         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18019         return;
18020     }
18021
18022     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18023     {
18024         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18025         return;
18026     }
18027
18028     /* Ignore helper calls */
18029
18030     if (call->gtCallType == CT_HELPER)
18031     {
18032         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18033         return;
18034     }
18035
18036     /* Ignore indirect calls */
18037     if (call->gtCallType == CT_INDIRECT)
18038     {
18039         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18040         return;
18041     }
18042
18043     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18044      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18045      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18046
18047     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18048     unsigned              methAttr;
18049
18050     // Reuse method flags from the original callInfo if possible
18051     if (fncHandle == callInfo->hMethod)
18052     {
18053         methAttr = callInfo->methodFlags;
18054     }
18055     else
18056     {
18057         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18058     }
18059
18060 #ifdef DEBUG
18061     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18062     {
18063         methAttr |= CORINFO_FLG_FORCEINLINE;
18064     }
18065 #endif
18066
18067     // Check for COMPlus_AggressiveInlining
18068     if (compDoAggressiveInlining)
18069     {
18070         methAttr |= CORINFO_FLG_FORCEINLINE;
18071     }
18072
18073     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18074     {
18075         /* Don't bother inline blocks that are in the filter region */
18076         if (bbInCatchHandlerILRange(compCurBB))
18077         {
18078 #ifdef DEBUG
18079             if (verbose)
18080             {
18081                 printf("\nWill not inline blocks that are in the catch handler region\n");
18082             }
18083
18084 #endif
18085
18086             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18087             return;
18088         }
18089
18090         if (bbInFilterILRange(compCurBB))
18091         {
18092 #ifdef DEBUG
18093             if (verbose)
18094             {
18095                 printf("\nWill not inline blocks that are in the filter region\n");
18096             }
18097 #endif
18098
18099             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18100             return;
18101         }
18102     }
18103
18104     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18105
18106     if (opts.compNeedSecurityCheck)
18107     {
18108         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18109         return;
18110     }
18111
18112     /* Check if we tried to inline this method before */
18113
18114     if (methAttr & CORINFO_FLG_DONT_INLINE)
18115     {
18116         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18117         return;
18118     }
18119
18120     /* Cannot inline synchronized methods */
18121
18122     if (methAttr & CORINFO_FLG_SYNCH)
18123     {
18124         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18125         return;
18126     }
18127
18128     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18129
18130     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18131     {
18132         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18133         return;
18134     }
18135
18136     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18137     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18138
18139     if (inlineResult.IsFailure())
18140     {
18141         return;
18142     }
18143
18144     // The old value should be NULL
18145     assert(call->gtInlineCandidateInfo == nullptr);
18146
18147     call->gtInlineCandidateInfo = inlineCandidateInfo;
18148
18149     // Mark the call node as inline candidate.
18150     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18151
18152     // Let the strategy know there's another candidate.
18153     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18154
18155     // Since we're not actually inlining yet, and this call site is
18156     // still just an inline candidate, there's nothing to report.
18157     inlineResult.SetReported();
18158 }
18159
18160 /******************************************************************************/
18161 // Returns true if the given intrinsic will be implemented by target-specific
18162 // instructions
18163
18164 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18165 {
18166 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18167     switch (intrinsicId)
18168     {
18169         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18170         //
18171         // TODO: Because the x86 backend only targets SSE for floating-point code,
18172         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18173         //       implemented those intrinsics as x87 instructions). If this poses
18174         //       a CQ problem, it may be necessary to change the implementation of
18175         //       the helper calls to decrease call overhead or switch back to the
18176         //       x87 instructions. This is tracked by #7097.
18177         case CORINFO_INTRINSIC_Sqrt:
18178         case CORINFO_INTRINSIC_Abs:
18179             return true;
18180
18181         default:
18182             return false;
18183     }
18184 #elif defined(_TARGET_ARM64_)
18185     switch (intrinsicId)
18186     {
18187         case CORINFO_INTRINSIC_Sqrt:
18188         case CORINFO_INTRINSIC_Abs:
18189         case CORINFO_INTRINSIC_Round:
18190             return true;
18191
18192         default:
18193             return false;
18194     }
18195 #elif defined(_TARGET_ARM_)
18196     switch (intrinsicId)
18197     {
18198         case CORINFO_INTRINSIC_Sqrt:
18199         case CORINFO_INTRINSIC_Abs:
18200         case CORINFO_INTRINSIC_Round:
18201             return true;
18202
18203         default:
18204             return false;
18205     }
18206 #elif defined(_TARGET_X86_)
18207     switch (intrinsicId)
18208     {
18209         case CORINFO_INTRINSIC_Sin:
18210         case CORINFO_INTRINSIC_Cos:
18211         case CORINFO_INTRINSIC_Sqrt:
18212         case CORINFO_INTRINSIC_Abs:
18213         case CORINFO_INTRINSIC_Round:
18214             return true;
18215
18216         default:
18217             return false;
18218     }
18219 #else
18220     // TODO: This portion of logic is not implemented for other arch.
18221     // The reason for returning true is that on all other arch the only intrinsic
18222     // enabled are target intrinsics.
18223     return true;
18224 #endif //_TARGET_AMD64_
18225 }
18226
18227 /******************************************************************************/
18228 // Returns true if the given intrinsic will be implemented by calling System.Math
18229 // methods.
18230
18231 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18232 {
18233     // Currently, if an math intrisic is not implemented by target-specific
18234     // intructions, it will be implemented by a System.Math call. In the
18235     // future, if we turn to implementing some of them with helper callers,
18236     // this predicate needs to be revisited.
18237     return !IsTargetIntrinsic(intrinsicId);
18238 }
18239
18240 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18241 {
18242     switch (intrinsicId)
18243     {
18244         case CORINFO_INTRINSIC_Sin:
18245         case CORINFO_INTRINSIC_Sqrt:
18246         case CORINFO_INTRINSIC_Abs:
18247         case CORINFO_INTRINSIC_Cos:
18248         case CORINFO_INTRINSIC_Round:
18249         case CORINFO_INTRINSIC_Cosh:
18250         case CORINFO_INTRINSIC_Sinh:
18251         case CORINFO_INTRINSIC_Tan:
18252         case CORINFO_INTRINSIC_Tanh:
18253         case CORINFO_INTRINSIC_Asin:
18254         case CORINFO_INTRINSIC_Acos:
18255         case CORINFO_INTRINSIC_Atan:
18256         case CORINFO_INTRINSIC_Atan2:
18257         case CORINFO_INTRINSIC_Log10:
18258         case CORINFO_INTRINSIC_Pow:
18259         case CORINFO_INTRINSIC_Exp:
18260         case CORINFO_INTRINSIC_Ceiling:
18261         case CORINFO_INTRINSIC_Floor:
18262             return true;
18263         default:
18264             return false;
18265     }
18266 }
18267
18268 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18269 {
18270     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18271 }
18272 /*****************************************************************************/