Merge pull request #8867 from AndyAyersMS/BinaryTreesVariant
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492 #ifdef FEATURE_CORECLR
1493     const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1494 #else
1495     // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496     const bool        isRefAny    = (structHnd == impGetRefAnyClass());
1497     const bool        hasGCPtrs   = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1498 #endif
1499
1500 #ifdef FEATURE_SIMD
1501     // Check to see if this is a SIMD type.
1502     if (featureSIMD && !hasGCPtrs)
1503     {
1504         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505
1506         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507         {
1508             unsigned int sizeBytes;
1509             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510             if (simdBaseType != TYP_UNKNOWN)
1511             {
1512                 assert(sizeBytes == originalSize);
1513                 structType = getSIMDTypeForSize(sizeBytes);
1514                 if (pSimdBaseType != nullptr)
1515                 {
1516                     *pSimdBaseType = simdBaseType;
1517                 }
1518 #ifdef _TARGET_AMD64_
1519                 // Amd64: also indicate that we use floating point registers
1520                 compFloatingPointUsed = true;
1521 #endif
1522             }
1523         }
1524     }
1525 #endif // FEATURE_SIMD
1526
1527     // Fetch GC layout info if requested
1528     if (gcLayout != nullptr)
1529     {
1530         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1531
1532         // Verify that the quick test up above via the class attributes gave a
1533         // safe view of the type's GCness.
1534         //
1535         // Note there are cases where hasGCPtrs is true but getClassGClayout
1536         // does not report any gc fields.
1537         assert(hasGCPtrs || (numGCVars == 0));
1538
1539         if (pNumGCVars != nullptr)
1540         {
1541             *pNumGCVars = numGCVars;
1542         }
1543     }
1544     else
1545     {
1546         // Can't safely ask for number of GC pointers without also
1547         // asking for layout.
1548         assert(pNumGCVars == nullptr);
1549     }
1550
1551     return structType;
1552 }
1553
1554 //****************************************************************************
1555 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1557 //
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1559                                       CORINFO_CLASS_HANDLE structHnd,
1560                                       unsigned             curLevel,
1561                                       bool                 forceNormalization /*=false*/)
1562 {
1563     assert(forceNormalization || varTypeIsStruct(structVal));
1564     assert(structHnd != NO_CLASS_HANDLE);
1565     var_types structType = structVal->TypeGet();
1566     bool      makeTemp   = false;
1567     if (structType == TYP_STRUCT)
1568     {
1569         structType = impNormStructType(structHnd);
1570     }
1571     bool                 alreadyNormalized = false;
1572     GenTreeLclVarCommon* structLcl         = nullptr;
1573
1574     genTreeOps oper = structVal->OperGet();
1575     switch (oper)
1576     {
1577         // GT_RETURN and GT_MKREFANY don't capture the handle.
1578         case GT_RETURN:
1579             break;
1580         case GT_MKREFANY:
1581             alreadyNormalized = true;
1582             break;
1583
1584         case GT_CALL:
1585             structVal->gtCall.gtRetClsHnd = structHnd;
1586             makeTemp                      = true;
1587             break;
1588
1589         case GT_RET_EXPR:
1590             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1591             makeTemp                         = true;
1592             break;
1593
1594         case GT_ARGPLACE:
1595             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1596             break;
1597
1598         case GT_INDEX:
1599             // This will be transformed to an OBJ later.
1600             alreadyNormalized                    = true;
1601             structVal->gtIndex.gtStructElemClass = structHnd;
1602             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1603             break;
1604
1605         case GT_FIELD:
1606             // Wrap it in a GT_OBJ.
1607             structVal->gtType = structType;
1608             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1609             break;
1610
1611         case GT_LCL_VAR:
1612         case GT_LCL_FLD:
1613             structLcl = structVal->AsLclVarCommon();
1614             // Wrap it in a GT_OBJ.
1615             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1616             __fallthrough;
1617
1618         case GT_OBJ:
1619         case GT_BLK:
1620         case GT_DYN_BLK:
1621         case GT_ASG:
1622             // These should already have the appropriate type.
1623             assert(structVal->gtType == structType);
1624             alreadyNormalized = true;
1625             break;
1626
1627         case GT_IND:
1628             assert(structVal->gtType == structType);
1629             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630             alreadyNormalized = true;
1631             break;
1632
1633 #ifdef FEATURE_SIMD
1634         case GT_SIMD:
1635             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1636             break;
1637 #endif // FEATURE_SIMD
1638
1639         case GT_COMMA:
1640         {
1641             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642             GenTree* blockNode = structVal->gtOp.gtOp2;
1643             assert(blockNode->gtType == structType);
1644
1645             // Is this GT_COMMA(op1, GT_COMMA())?
1646             GenTree* parent = structVal;
1647             if (blockNode->OperGet() == GT_COMMA)
1648             {
1649                 // Find the last node in the comma chain.
1650                 do
1651                 {
1652                     assert(blockNode->gtType == structType);
1653                     parent    = blockNode;
1654                     blockNode = blockNode->gtOp.gtOp2;
1655                 } while (blockNode->OperGet() == GT_COMMA);
1656             }
1657
1658 #ifdef FEATURE_SIMD
1659             if (blockNode->OperGet() == GT_SIMD)
1660             {
1661                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662                 alreadyNormalized  = true;
1663             }
1664             else
1665 #endif
1666             {
1667                 assert(blockNode->OperIsBlk());
1668
1669                 // Sink the GT_COMMA below the blockNode addr.
1670                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1672                 //
1673                 // In case of a chained GT_COMMA case, we sink the last
1674                 // GT_COMMA below the blockNode addr.
1675                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676                 assert(blockNodeAddr->gtType == TYP_BYREF);
1677                 GenTree* commaNode    = parent;
1678                 commaNode->gtType     = TYP_BYREF;
1679                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680                 blockNode->gtOp.gtOp1 = commaNode;
1681                 if (parent == structVal)
1682                 {
1683                     structVal = blockNode;
1684                 }
1685                 alreadyNormalized = true;
1686             }
1687         }
1688         break;
1689
1690         default:
1691             assert(!"Unexpected node in impNormStructVal()");
1692             break;
1693     }
1694     structVal->gtType  = structType;
1695     GenTree* structObj = structVal;
1696
1697     if (!alreadyNormalized || forceNormalization)
1698     {
1699         if (makeTemp)
1700         {
1701             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1702
1703             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1704
1705             // The structVal is now the temp itself
1706
1707             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1710         }
1711         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1712         {
1713             // Wrap it in a GT_OBJ
1714             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1715         }
1716     }
1717
1718     if (structLcl != nullptr)
1719     {
1720         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721         // so we don't set GTF_EXCEPT here.
1722         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1723         {
1724             structObj->gtFlags &= ~GTF_GLOB_REF;
1725         }
1726     }
1727     else
1728     {
1729         // In general a OBJ is an indirection and could raise an exception.
1730         structObj->gtFlags |= GTF_EXCEPT;
1731     }
1732     return (structObj);
1733 }
1734
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1738 //
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1741 //
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1744 // at compile-time.
1745 //
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747                                       BOOL*                   pRuntimeLookup /* = NULL */,
1748                                       BOOL                    mustRestoreHandle /* = FALSE */,
1749                                       BOOL                    importParent /* = FALSE */)
1750 {
1751     assert(!fgGlobalMorph);
1752
1753     CORINFO_GENERICHANDLE_RESULT embedInfo;
1754     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1755
1756     if (pRuntimeLookup)
1757     {
1758         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1759     }
1760
1761     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1762     {
1763         switch (embedInfo.handleType)
1764         {
1765             case CORINFO_HANDLETYPE_CLASS:
1766                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1767                 break;
1768
1769             case CORINFO_HANDLETYPE_METHOD:
1770                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1771                 break;
1772
1773             case CORINFO_HANDLETYPE_FIELD:
1774                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1776                 break;
1777
1778             default:
1779                 break;
1780         }
1781     }
1782
1783     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784                            embedInfo.compileTimeHandle);
1785 }
1786
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788                                      CORINFO_LOOKUP*         pLookup,
1789                                      unsigned                handleFlags,
1790                                      void*                   compileTimeHandle)
1791 {
1792     if (!pLookup->lookupKind.needsRuntimeLookup)
1793     {
1794         // No runtime lookup is required.
1795         // Access is direct or memory-indirect (of a fixed address) reference
1796
1797         CORINFO_GENERIC_HANDLE handle       = nullptr;
1798         void*                  pIndirection = nullptr;
1799         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1800
1801         if (pLookup->constLookup.accessType == IAT_VALUE)
1802         {
1803             handle = pLookup->constLookup.handle;
1804         }
1805         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1806         {
1807             pIndirection = pLookup->constLookup.addr;
1808         }
1809         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1810     }
1811     else if (compIsForInlining())
1812     {
1813         // Don't import runtime lookups when inlining
1814         // Inlining has to be aborted in such a case
1815         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1816         return nullptr;
1817     }
1818     else
1819     {
1820         // Need to use dictionary-based access which depends on the typeContext
1821         // which is only available at runtime, not at compile-time.
1822
1823         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1824     }
1825 }
1826
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829                                                unsigned              handleFlags,
1830                                                void*                 compileTimeHandle)
1831 {
1832     CORINFO_GENERIC_HANDLE handle       = nullptr;
1833     void*                  pIndirection = nullptr;
1834     assert(pLookup->accessType != IAT_PPVALUE);
1835
1836     if (pLookup->accessType == IAT_VALUE)
1837     {
1838         handle = pLookup->handle;
1839     }
1840     else if (pLookup->accessType == IAT_PVALUE)
1841     {
1842         pIndirection = pLookup->addr;
1843     }
1844     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1845 }
1846
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849     CorInfoHelpFunc         helper,
1850     var_types               type,
1851     GenTreeArgList*         args /* =NULL*/,
1852     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1853 {
1854     CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1857     {
1858         return nullptr;
1859     }
1860 #else
1861     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1862 #endif
1863
1864     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1865
1866     op1->gtCall.setEntryPoint(lookup);
1867
1868     return op1;
1869 }
1870 #endif
1871
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1873 {
1874     GenTreePtr op1 = nullptr;
1875
1876     switch (pCallInfo->kind)
1877     {
1878         case CORINFO_CALL:
1879             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1880
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882             if (opts.IsReadyToRun())
1883             {
1884                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1885                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1887             }
1888             else
1889             {
1890                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1891             }
1892 #endif
1893             break;
1894
1895         case CORINFO_CALL_CODE_POINTER:
1896             if (compIsForInlining())
1897             {
1898                 // Don't import runtime lookups when inlining
1899                 // Inlining has to be aborted in such a case
1900                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1901                 return nullptr;
1902             }
1903
1904             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1905             break;
1906
1907         default:
1908             noway_assert(!"unknown call kind");
1909             break;
1910     }
1911
1912     return op1;
1913 }
1914
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1917 //
1918 // Arguments:
1919 //    kind - lookup kind.
1920 //
1921 // Return Value:
1922 //    Return GenTree pointer to generic shared context.
1923 //
1924 // Notes:
1925 //    Reports about generic context using.
1926
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1928 {
1929     GenTreePtr ctxTree = nullptr;
1930
1931     // Collectible types requires that for shared generic code, if we use the generic context parameter
1932     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933     // context parameter is this that we don't need the eager reporting logic.)
1934     lvaGenericsContextUsed = true;
1935
1936     if (kind == CORINFO_LOOKUP_THISOBJ)
1937     {
1938         // this Object
1939         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1940
1941         // Vtable pointer of this object
1942         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1945     }
1946     else
1947     {
1948         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1949
1950         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1951     }
1952     return ctxTree;
1953 }
1954
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957    generic instantiations.
1958    The lookup depends on the typeContext which is only available at
1959    runtime, and not at compile-time.
1960    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1961    The cases are:
1962
1963    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964       instantiation-specific handle, and the tokens to lookup the handle.
1965    2. pLookup->indirections != CORINFO_USEHELPER :
1966       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1967           to get the handle.
1968       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969           If it is non-NULL, it is the handle required. Else, call a helper
1970           to lookup the handle.
1971  */
1972
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974                                             CORINFO_LOOKUP*         pLookup,
1975                                             void*                   compileTimeHandle)
1976 {
1977
1978     // This method can only be called from the importer instance of the Compiler.
1979     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980     assert(!compIsForInlining());
1981
1982     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1983
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985     if (opts.IsReadyToRun())
1986     {
1987         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1989     }
1990 #endif
1991
1992     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993     // It's available only via the run-time helper function
1994     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1995     {
1996         GenTreeArgList* helperArgs =
1997             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998                                                       nullptr, compileTimeHandle));
1999
2000         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2001     }
2002
2003     // Slot pointer
2004     GenTreePtr slotPtrTree = ctxTree;
2005
2006     if (pRuntimeLookup->testForNull)
2007     {
2008         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2010     }
2011
2012     // Applied repeated indirections
2013     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2014     {
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021         if (pRuntimeLookup->offsets[i] != 0)
2022         {
2023             slotPtrTree =
2024                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2025         }
2026     }
2027
2028     // No null test required
2029     if (!pRuntimeLookup->testForNull)
2030     {
2031         if (pRuntimeLookup->indirections == 0)
2032         {
2033             return slotPtrTree;
2034         }
2035
2036         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2038
2039         if (!pRuntimeLookup->testForFixup)
2040         {
2041             return slotPtrTree;
2042         }
2043
2044         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2045
2046         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047                                       nullptr DEBUGARG("impRuntimeLookup test"));
2048         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2049
2050         // Use a GT_AND to check for the lowest bit and indirect if it is set
2051         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053         relop->gtFlags |= GTF_RELOP_QMARK;
2054
2055         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056                            nullptr DEBUGARG("impRuntimeLookup indir"));
2057         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2060
2061         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2062
2063         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065         return gtNewLclvNode(tmp, TYP_I_IMPL);
2066     }
2067
2068     assert(pRuntimeLookup->indirections != 0);
2069
2070     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2071
2072     // Extract the handle
2073     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074     handle->gtFlags |= GTF_IND_NONFAULTING;
2075
2076     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2078
2079     // Call to helper
2080     GenTreeArgList* helperArgs =
2081         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082                                                   compileTimeHandle));
2083     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2084
2085     // Check for null and possibly call helper
2086     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087     relop->gtFlags |= GTF_RELOP_QMARK;
2088
2089     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090                                                          gtNewNothingNode(), // do nothing if nonnull
2091                                                          helperCall);
2092
2093     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2094
2095     unsigned tmp;
2096     if (handleCopy->IsLocal())
2097     {
2098         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2099     }
2100     else
2101     {
2102         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2103     }
2104
2105     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106     return gtNewLclvNode(tmp, TYP_I_IMPL);
2107 }
2108
2109 /******************************************************************************
2110  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112  *     else, grab a new temp.
2113  *  For structs (which can be pushed on the stack using obj, etc),
2114  *  special handling is needed
2115  */
2116
2117 struct RecursiveGuard
2118 {
2119 public:
2120     RecursiveGuard()
2121     {
2122         m_pAddress = nullptr;
2123     }
2124
2125     ~RecursiveGuard()
2126     {
2127         if (m_pAddress)
2128         {
2129             *m_pAddress = false;
2130         }
2131     }
2132
2133     void Init(bool* pAddress, bool bInitialize)
2134     {
2135         assert(pAddress && *pAddress == false && "Recursive guard violation");
2136         m_pAddress = pAddress;
2137
2138         if (bInitialize)
2139         {
2140             *m_pAddress = true;
2141         }
2142     }
2143
2144 protected:
2145     bool* m_pAddress;
2146 };
2147
2148 bool Compiler::impSpillStackEntry(unsigned level,
2149                                   unsigned tnum
2150 #ifdef DEBUG
2151                                   ,
2152                                   bool        bAssertOnRecursion,
2153                                   const char* reason
2154 #endif
2155                                   )
2156 {
2157
2158 #ifdef DEBUG
2159     RecursiveGuard guard;
2160     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2161 #endif
2162
2163     GenTreePtr tree = verCurrentState.esStack[level].val;
2164
2165     /* Allocate a temp if we haven't been asked to use a particular one */
2166
2167     if (tiVerificationNeeded)
2168     {
2169         // Ignore bad temp requests (they will happen with bad code and will be
2170         // catched when importing the destblock)
2171         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2172         {
2173             return false;
2174         }
2175     }
2176     else
2177     {
2178         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2179         {
2180             return false;
2181         }
2182     }
2183
2184     if (tnum == BAD_VAR_NUM)
2185     {
2186         tnum = lvaGrabTemp(true DEBUGARG(reason));
2187     }
2188     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2189     {
2190         // if verification is needed and tnum's type is incompatible with
2191         // type on that stack, we grab a new temp. This is safe since
2192         // we will throw a verification exception in the dest block.
2193
2194         var_types valTyp = tree->TypeGet();
2195         var_types dstTyp = lvaTable[tnum].TypeGet();
2196
2197         // if the two types are different, we return. This will only happen with bad code and will
2198         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2200             !(
2201 #ifndef _TARGET_64BIT_
2202                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2205         {
2206             if (verNeedsVerification())
2207             {
2208                 return false;
2209             }
2210         }
2211     }
2212
2213     /* Assign the spilled entry to the temp */
2214     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2215
2216     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2218     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2219     verCurrentState.esStack[level].val = temp;
2220
2221     return true;
2222 }
2223
2224 /*****************************************************************************
2225  *
2226  *  Ensure that the stack has only spilled values
2227  */
2228
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2230 {
2231     assert(!spillLeaves || opts.compDbgCode);
2232
2233     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2234     {
2235         GenTreePtr tree = verCurrentState.esStack[level].val;
2236
2237         if (!spillLeaves && tree->OperIsLeaf())
2238         {
2239             continue;
2240         }
2241
2242         // Temps introduced by the importer itself don't need to be spilled
2243
2244         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2245
2246         if (isTempLcl)
2247         {
2248             continue;
2249         }
2250
2251         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2252     }
2253 }
2254
2255 void Compiler::impSpillEvalStack()
2256 {
2257     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2258     {
2259         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2260     }
2261 }
2262
2263 /*****************************************************************************
2264  *
2265  *  If the stack contains any trees with side effects in them, assign those
2266  *  trees to temps and append the assignments to the statement list.
2267  *  On return the stack is guaranteed to be empty.
2268  */
2269
2270 inline void Compiler::impEvalSideEffects()
2271 {
2272     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273     verCurrentState.esStackDepth = 0;
2274 }
2275
2276 /*****************************************************************************
2277  *
2278  *  If the stack contains any trees with side effects in them, assign those
2279  *  trees to temps and replace them on the stack with refs to their temps.
2280  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2281  */
2282
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2284 {
2285     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2286
2287     /* Before we make any appends to the tree list we must spill the
2288      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2289
2290     impSpillSpecialSideEff();
2291
2292     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2293     {
2294         chkLevel = verCurrentState.esStackDepth;
2295     }
2296
2297     assert(chkLevel <= verCurrentState.esStackDepth);
2298
2299     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2300
2301     for (unsigned i = 0; i < chkLevel; i++)
2302     {
2303         GenTreePtr tree = verCurrentState.esStack[i].val;
2304
2305         GenTreePtr lclVarTree;
2306
2307         if ((tree->gtFlags & spillFlags) != 0 ||
2308             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2309              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311                                            // lvAddrTaken flag.
2312         {
2313             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2314         }
2315     }
2316 }
2317
2318 /*****************************************************************************
2319  *
2320  *  If the stack contains any trees with special side effects in them, assign
2321  *  those trees to temps and replace them on the stack with refs to their temps.
2322  */
2323
2324 inline void Compiler::impSpillSpecialSideEff()
2325 {
2326     // Only exception objects need to be carefully handled
2327
2328     if (!compCurBB->bbCatchTyp)
2329     {
2330         return;
2331     }
2332
2333     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2334     {
2335         GenTreePtr tree = verCurrentState.esStack[level].val;
2336         // Make sure if we have an exception object in the sub tree we spill ourselves.
2337         if (gtHasCatchArg(tree))
2338         {
2339             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2340         }
2341     }
2342 }
2343
2344 /*****************************************************************************
2345  *
2346  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2347  */
2348
2349 void Compiler::impSpillValueClasses()
2350 {
2351     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2352     {
2353         GenTreePtr tree = verCurrentState.esStack[level].val;
2354
2355         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2356         {
2357             // Tree walk was aborted, which means that we found a
2358             // value class on the stack.  Need to spill that
2359             // stack entry.
2360
2361             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2362         }
2363     }
2364 }
2365
2366 /*****************************************************************************
2367  *
2368  *  Callback that checks if a tree node is TYP_STRUCT
2369  */
2370
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2372 {
2373     fgWalkResult walkResult = WALK_CONTINUE;
2374
2375     if ((*pTree)->gtType == TYP_STRUCT)
2376     {
2377         // Abort the walk and indicate that we found a value class
2378
2379         walkResult = WALK_ABORT;
2380     }
2381
2382     return walkResult;
2383 }
2384
2385 /*****************************************************************************
2386  *
2387  *  If the stack contains any trees with references to local #lclNum, assign
2388  *  those trees to temps and replace their place on the stack with refs to
2389  *  their temps.
2390  */
2391
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2393 {
2394     /* Before we make any appends to the tree list we must spill the
2395      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2396
2397     impSpillSpecialSideEff();
2398
2399     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2400     {
2401         GenTreePtr tree = verCurrentState.esStack[level].val;
2402
2403         /* If the tree may throw an exception, and the block has a handler,
2404            then we need to spill assignments to the local if the local is
2405            live on entry to the handler.
2406            Just spill 'em all without considering the liveness */
2407
2408         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2409
2410         /* Skip the tree if it doesn't have an affected reference,
2411            unless xcptnCaught */
2412
2413         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2414         {
2415             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2416         }
2417     }
2418 }
2419
2420 /*****************************************************************************
2421  *
2422  *  Push catch arg onto the stack.
2423  *  If there are jumps to the beginning of the handler, insert basic block
2424  *  and spill catch arg to a temp. Update the handler block if necessary.
2425  *
2426  *  Returns the basic block of the actual handler.
2427  */
2428
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2430 {
2431     // Do not inject the basic block twice on reimport. This should be
2432     // hit only under JIT stress. See if the block is the one we injected.
2433     // Note that EH canonicalization can inject internal blocks here. We might
2434     // be able to re-use such a block (but we don't, right now).
2435     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2437     {
2438         GenTreePtr tree = hndBlk->bbTreeList;
2439
2440         if (tree != nullptr && tree->gtOper == GT_STMT)
2441         {
2442             tree = tree->gtStmt.gtStmtExpr;
2443             assert(tree != nullptr);
2444
2445             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2447             {
2448                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2449
2450                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2451
2452                 return hndBlk->bbNext;
2453             }
2454         }
2455
2456         // If we get here, it must have been some other kind of internal block. It's possible that
2457         // someone prepended something to our injected block, but that's unlikely.
2458     }
2459
2460     /* Push the exception address value on the stack */
2461     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2462
2463     /* Mark the node as having a side-effect - i.e. cannot be
2464      * moved around since it is tied to a fixed location (EAX) */
2465     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2466
2467     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2469     {
2470         if (hndBlk->bbRefs == 1)
2471         {
2472             hndBlk->bbRefs++;
2473         }
2474
2475         /* Create extra basic block for the spill */
2476         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478         newBlk->setBBWeight(hndBlk->bbWeight);
2479         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2480
2481         /* Account for the new link we are about to create */
2482         hndBlk->bbRefs++;
2483
2484         /* Spill into a temp */
2485         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486         lvaTable[tempNum].lvType = TYP_REF;
2487         arg                      = gtNewTempAssign(tempNum, arg);
2488
2489         hndBlk->bbStkTempsIn = tempNum;
2490
2491         /* Report the debug info. impImportBlockCode won't treat
2492          * the actual handler as exception block and thus won't do it for us. */
2493         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2494         {
2495             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496             arg            = gtNewStmt(arg, impCurStmtOffs);
2497         }
2498
2499         fgInsertStmtAtEnd(newBlk, arg);
2500
2501         arg = gtNewLclvNode(tempNum, TYP_REF);
2502     }
2503
2504     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2505
2506     return hndBlk;
2507 }
2508
2509 /*****************************************************************************
2510  *
2511  *  Given a tree, clone it. *pClone is set to the cloned tree.
2512  *  Returns the original tree if the cloning was easy,
2513  *   else returns the temp to which the tree had to be spilled to.
2514  *  If the tree has side-effects, it will be spilled to a temp.
2515  */
2516
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2518                                   GenTreePtr*          pClone,
2519                                   CORINFO_CLASS_HANDLE structHnd,
2520                                   unsigned             curLevel,
2521                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2522 {
2523     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2524     {
2525         GenTreePtr clone = gtClone(tree, true);
2526
2527         if (clone)
2528         {
2529             *pClone = clone;
2530             return tree;
2531         }
2532     }
2533
2534     /* Store the operand in a temp and return the temp */
2535
2536     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2537
2538     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539     // return a struct type. It also may modify the struct type to a more
2540     // specialized type (e.g. a SIMD type).  So we will get the type from
2541     // the lclVar AFTER calling impAssignTempGen().
2542
2543     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544     var_types type = genActualType(lvaTable[temp].TypeGet());
2545
2546     *pClone = gtNewLclvNode(temp, type);
2547     return gtNewLclvNode(temp, type);
2548 }
2549
2550 /*****************************************************************************
2551  * Remember the IL offset (including stack-empty info) for the trees we will
2552  * generate now.
2553  */
2554
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2556 {
2557     if (compIsForInlining())
2558     {
2559         GenTreePtr callStmt = impInlineInfo->iciStmt;
2560         assert(callStmt->gtOper == GT_STMT);
2561         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2562     }
2563     else
2564     {
2565         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567         impCurStmtOffs    = offs | stkBit;
2568     }
2569 }
2570
2571 /*****************************************************************************
2572  * Returns current IL offset with stack-empty and call-instruction info incorporated
2573  */
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2575 {
2576     if (compIsForInlining())
2577     {
2578         return BAD_IL_OFFSET;
2579     }
2580     else
2581     {
2582         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585         return offs | stkBit | callInstructionBit;
2586     }
2587 }
2588
2589 /*****************************************************************************
2590  *
2591  *  Remember the instr offset for the statements
2592  *
2593  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2595  *  as some of the trees corresponding to code up to impCurOpcOffs might
2596  *  still be sitting on the stack.
2597  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598  *  This should be called when an opcode finally/explicitly causes
2599  *  impAppendTree(tree) to be called (as opposed to being called because of
2600  *  a spill caused by the opcode)
2601  */
2602
2603 #ifdef DEBUG
2604
2605 void Compiler::impNoteLastILoffs()
2606 {
2607     if (impLastILoffsStmt == nullptr)
2608     {
2609         // We should have added a statement for the current basic block
2610         // Is this assert correct ?
2611
2612         assert(impTreeLast);
2613         assert(impTreeLast->gtOper == GT_STMT);
2614
2615         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2616     }
2617     else
2618     {
2619         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620         impLastILoffsStmt                          = nullptr;
2621     }
2622 }
2623
2624 #endif // DEBUG
2625
2626 /*****************************************************************************
2627  * We don't create any GenTree (excluding spills) for a branch.
2628  * For debugging info, we need a placeholder so that we can note
2629  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2630  */
2631
2632 void Compiler::impNoteBranchOffs()
2633 {
2634     if (opts.compDbgCode)
2635     {
2636         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2637     }
2638 }
2639
2640 /*****************************************************************************
2641  * Locate the next stmt boundary for which we need to record info.
2642  * We will have to spill the stack at such boundaries if it is not
2643  * already empty.
2644  * Returns the next stmt boundary (after the start of the block)
2645  */
2646
2647 unsigned Compiler::impInitBlockLineInfo()
2648 {
2649     /* Assume the block does not correspond with any IL offset. This prevents
2650        us from reporting extra offsets. Extra mappings can cause confusing
2651        stepping, especially if the extra mapping is a jump-target, and the
2652        debugger does not ignore extra mappings, but instead rewinds to the
2653        nearest known offset */
2654
2655     impCurStmtOffsSet(BAD_IL_OFFSET);
2656
2657     if (compIsForInlining())
2658     {
2659         return ~0;
2660     }
2661
2662     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2663
2664     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2665     {
2666         impCurStmtOffsSet(blockOffs);
2667     }
2668
2669     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2670     {
2671         impCurStmtOffsSet(blockOffs);
2672     }
2673
2674     /* Always report IL offset 0 or some tests get confused.
2675        Probably a good idea anyways */
2676
2677     if (blockOffs == 0)
2678     {
2679         impCurStmtOffsSet(blockOffs);
2680     }
2681
2682     if (!info.compStmtOffsetsCount)
2683     {
2684         return ~0;
2685     }
2686
2687     /* Find the lowest explicit stmt boundary within the block */
2688
2689     /* Start looking at an entry that is based on our instr offset */
2690
2691     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2692
2693     if (index >= info.compStmtOffsetsCount)
2694     {
2695         index = info.compStmtOffsetsCount - 1;
2696     }
2697
2698     /* If we've guessed too far, back up */
2699
2700     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2701     {
2702         index--;
2703     }
2704
2705     /* If we guessed short, advance ahead */
2706
2707     while (info.compStmtOffsets[index] < blockOffs)
2708     {
2709         index++;
2710
2711         if (index == info.compStmtOffsetsCount)
2712         {
2713             return info.compStmtOffsetsCount;
2714         }
2715     }
2716
2717     assert(index < info.compStmtOffsetsCount);
2718
2719     if (info.compStmtOffsets[index] == blockOffs)
2720     {
2721         /* There is an explicit boundary for the start of this basic block.
2722            So we will start with bbCodeOffs. Else we will wait until we
2723            get to the next explicit boundary */
2724
2725         impCurStmtOffsSet(blockOffs);
2726
2727         index++;
2728     }
2729
2730     return index;
2731 }
2732
2733 /*****************************************************************************/
2734
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2736 {
2737     switch (opcode)
2738     {
2739         case CEE_CALL:
2740         case CEE_CALLI:
2741         case CEE_CALLVIRT:
2742             return true;
2743
2744         default:
2745             return false;
2746     }
2747 }
2748
2749 /*****************************************************************************/
2750
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2752 {
2753     switch (opcode)
2754     {
2755         case CEE_CALL:
2756         case CEE_CALLI:
2757         case CEE_CALLVIRT:
2758         case CEE_JMP:
2759         case CEE_NEWOBJ:
2760         case CEE_NEWARR:
2761             return true;
2762
2763         default:
2764             return false;
2765     }
2766 }
2767
2768 /*****************************************************************************/
2769
2770 // One might think it is worth caching these values, but results indicate
2771 // that it isn't.
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2775 {
2776     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2778     return refAnyClass;
2779 }
2780
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2782 {
2783     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785     return typeHandleClass;
2786 }
2787
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2789 {
2790     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792     return argIteratorClass;
2793 }
2794
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2796 {
2797     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2799     return stringClass;
2800 }
2801
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2803 {
2804     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2806     return objectClass;
2807 }
2808
2809 /*****************************************************************************
2810  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811  *  set its type to TYP_BYREF when we create it. We know if it can be
2812  *  changed to TYP_I_IMPL only at the point where we use it
2813  */
2814
2815 /* static */
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2817 {
2818     if (tree1->IsVarAddr())
2819     {
2820         tree1->gtType = TYP_I_IMPL;
2821     }
2822
2823     if (tree2 && tree2->IsVarAddr())
2824     {
2825         tree2->gtType = TYP_I_IMPL;
2826     }
2827 }
2828
2829 /*****************************************************************************
2830  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831  *  to make that an explicit cast in our trees, so any implicit casts that
2832  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833  *  turned into explicit casts here.
2834  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2835  */
2836
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2838 {
2839     var_types currType   = genActualType(tree->gtType);
2840     var_types wantedType = genActualType(dstTyp);
2841
2842     if (wantedType != currType)
2843     {
2844         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2846         {
2847             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2848             {
2849                 tree->gtType = TYP_I_IMPL;
2850             }
2851         }
2852 #ifdef _TARGET_64BIT_
2853         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2854         {
2855             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2857         }
2858         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2859         {
2860             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2862         }
2863 #endif // _TARGET_64BIT_
2864     }
2865
2866     return tree;
2867 }
2868
2869 /*****************************************************************************
2870  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871  *  but we want to make that an explicit cast in our trees, so any implicit casts
2872  *  that exist in the IL are turned into explicit casts here.
2873  */
2874
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2876 {
2877 #ifndef LEGACY_BACKEND
2878     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2879     {
2880         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2881     }
2882 #endif // !LEGACY_BACKEND
2883
2884     return tree;
2885 }
2886
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 //    with a GT_COPYBLK node.
2890 //
2891 // Arguments:
2892 //    sig - The InitializeArray signature.
2893 //
2894 // Return Value:
2895 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 //    nullptr otherwise.
2897 //
2898 // Notes:
2899 //    The function recognizes the following IL pattern:
2900 //      ldc <length> or a list of ldc <lower bound>/<length>
2901 //      newarr or newobj
2902 //      dup
2903 //      ldtoken <field handle>
2904 //      call InitializeArray
2905 //    The lower bounds need not be constant except when the array rank is 1.
2906 //    The function recognizes all kinds of arrays thus enabling a small runtime
2907 //    such as CoreRT to skip providing an implementation for InitializeArray.
2908
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2910 {
2911     assert(sig->numArgs == 2);
2912
2913     GenTreePtr fieldTokenNode = impStackTop(0).val;
2914     GenTreePtr arrayLocalNode = impStackTop(1).val;
2915
2916     //
2917     // Verify that the field token is known and valid.  Note that It's also
2918     // possible for the token to come from reflection, in which case we cannot do
2919     // the optimization and must therefore revert to calling the helper.  You can
2920     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2921     //
2922
2923     // Check to see if the ldtoken helper call is what we see here.
2924     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2926     {
2927         return nullptr;
2928     }
2929
2930     // Strip helper call away
2931     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2932
2933     if (fieldTokenNode->gtOper == GT_IND)
2934     {
2935         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2936     }
2937
2938     // Check for constant
2939     if (fieldTokenNode->gtOper != GT_CNS_INT)
2940     {
2941         return nullptr;
2942     }
2943
2944     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2946     {
2947         return nullptr;
2948     }
2949
2950     //
2951     // We need to get the number of elements in the array and the size of each element.
2952     // We verify that the newarr statement is exactly what we expect it to be.
2953     // If it's not then we just return NULL and we don't optimize this call
2954     //
2955
2956     //
2957     // It is possible the we don't have any statements in the block yet
2958     //
2959     if (impTreeLast->gtOper != GT_STMT)
2960     {
2961         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2962         return nullptr;
2963     }
2964
2965     //
2966     // We start by looking at the last statement, making sure it's an assignment, and
2967     // that the target of the assignment is the array passed to InitializeArray.
2968     //
2969     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2973     {
2974         return nullptr;
2975     }
2976
2977     //
2978     // Make sure that the object being assigned is a helper call.
2979     //
2980
2981     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2983     {
2984         return nullptr;
2985     }
2986
2987     //
2988     // Verify that it is one of the new array helpers.
2989     //
2990
2991     bool isMDArray = false;
2992
2993     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2999 #endif
3000             )
3001     {
3002 #if COR_JIT_EE_VERSION > 460
3003         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3004         {
3005             return nullptr;
3006         }
3007
3008         isMDArray = true;
3009 #endif
3010     }
3011
3012     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3013
3014     //
3015     // Make sure we found a compile time handle to the array
3016     //
3017
3018     if (!arrayClsHnd)
3019     {
3020         return nullptr;
3021     }
3022
3023     unsigned rank = 0;
3024     S_UINT32 numElements;
3025
3026     if (isMDArray)
3027     {
3028         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3029
3030         if (rank == 0)
3031         {
3032             return nullptr;
3033         }
3034
3035         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036         assert(tokenArg != nullptr);
3037         GenTreeArgList* numArgsArg = tokenArg->Rest();
3038         assert(numArgsArg != nullptr);
3039         GenTreeArgList* argsArg = numArgsArg->Rest();
3040         assert(argsArg != nullptr);
3041
3042         //
3043         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044         // so at least one length must be present and the rank can't exceed 32 so there can
3045         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3046         //
3047
3048         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3050         {
3051             return nullptr;
3052         }
3053
3054         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055         bool     lowerBoundsSpecified;
3056
3057         if (numArgs == rank * 2)
3058         {
3059             lowerBoundsSpecified = true;
3060         }
3061         else if (numArgs == rank)
3062         {
3063             lowerBoundsSpecified = false;
3064
3065             //
3066             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068             // we get a SDArray as well, see the for loop below.
3069             //
3070
3071             if (rank == 1)
3072             {
3073                 isMDArray = false;
3074             }
3075         }
3076         else
3077         {
3078             return nullptr;
3079         }
3080
3081         //
3082         // The rank is known to be at least 1 so we can start with numElements being 1
3083         // to avoid the need to special case the first dimension.
3084         //
3085
3086         numElements = S_UINT32(1);
3087
3088         struct Match
3089         {
3090             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3091             {
3092                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3094             }
3095
3096             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3097             {
3098                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3101             }
3102
3103             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3104             {
3105                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3107             }
3108
3109             static bool IsComma(GenTree* tree)
3110             {
3111                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3112             }
3113         };
3114
3115         unsigned argIndex = 0;
3116         GenTree* comma;
3117
3118         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3119         {
3120             if (lowerBoundsSpecified)
3121             {
3122                 //
3123                 // In general lower bounds can be ignored because they're not needed to
3124                 // calculate the total number of elements. But for single dimensional arrays
3125                 // we need to know if the lower bound is 0 because in this case the runtime
3126                 // creates a SDArray and this affects the way the array data offset is calculated.
3127                 //
3128
3129                 if (rank == 1)
3130                 {
3131                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3132                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3134
3135                     if (lowerBoundNode->IsIntegralConst(0))
3136                     {
3137                         isMDArray = false;
3138                     }
3139                 }
3140
3141                 comma = comma->gtGetOp2();
3142                 argIndex++;
3143             }
3144
3145             GenTree* lengthNodeAssign = comma->gtGetOp1();
3146             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3148
3149             if (!lengthNode->IsCnsIntOrI())
3150             {
3151                 return nullptr;
3152             }
3153
3154             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3155             argIndex++;
3156         }
3157
3158         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3159
3160         if (argIndex != numArgs)
3161         {
3162             return nullptr;
3163         }
3164     }
3165     else
3166     {
3167         //
3168         // Make sure there are exactly two arguments:  the array class and
3169         // the number of elements.
3170         //
3171
3172         GenTreePtr arrayLengthNode;
3173
3174         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3177         {
3178             // Array length is 1st argument for readytorun helper
3179             arrayLengthNode = args->Current();
3180         }
3181         else
3182 #endif
3183         {
3184             // Array length is 2nd argument for regular helper
3185             arrayLengthNode = args->Rest()->Current();
3186         }
3187
3188         //
3189         // Make sure that the number of elements look valid.
3190         //
3191         if (arrayLengthNode->gtOper != GT_CNS_INT)
3192         {
3193             return nullptr;
3194         }
3195
3196         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3197
3198         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3199         {
3200             return nullptr;
3201         }
3202     }
3203
3204     CORINFO_CLASS_HANDLE elemClsHnd;
3205     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3206
3207     //
3208     // Note that genTypeSize will return zero for non primitive types, which is exactly
3209     // what we want (size will then be 0, and we will catch this in the conditional below).
3210     // Note that we don't expect this to fail for valid binaries, so we assert in the
3211     // non-verification case (the verification case should not assert but rather correctly
3212     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3213     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3214     // why.
3215     //
3216
3217     S_UINT32 elemSize(genTypeSize(elementType));
3218     S_UINT32 size = elemSize * S_UINT32(numElements);
3219
3220     if (size.IsOverflow())
3221     {
3222         return nullptr;
3223     }
3224
3225     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3226     {
3227         assert(verNeedsVerification());
3228         return nullptr;
3229     }
3230
3231     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3232     if (!initData)
3233     {
3234         return nullptr;
3235     }
3236
3237     //
3238     // At this point we are ready to commit to implementing the InitializeArray
3239     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3240     // return the struct assignment node.
3241     //
3242
3243     impPopStack();
3244     impPopStack();
3245
3246     const unsigned blkSize = size.Value();
3247     GenTreePtr     dst;
3248
3249     if (isMDArray)
3250     {
3251         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3252
3253         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3254     }
3255     else
3256     {
3257         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3258     }
3259     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3260     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3262
3263     return gtNewBlkOpNode(blk,     // dst
3264                           src,     // src
3265                           blkSize, // size
3266                           false,   // volatil
3267                           true);   // copyBlock
3268 }
3269
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3273
3274 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE  clsHnd,
3275                                   CORINFO_METHOD_HANDLE method,
3276                                   CORINFO_SIG_INFO*     sig,
3277                                   int                   memberRef,
3278                                   bool                  readonlyCall,
3279                                   bool                  tailCall,
3280                                   CorInfoIntrinsics*    pIntrinsicID)
3281 {
3282     bool mustExpand = false;
3283 #if COR_JIT_EE_VERSION > 460
3284     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3285 #else
3286     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3287 #endif
3288     *pIntrinsicID = intrinsicID;
3289
3290 #ifndef _TARGET_ARM_
3291     genTreeOps interlockedOperator;
3292 #endif
3293
3294     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3295     {
3296         // must be done regardless of DbgCode and MinOpts
3297         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3298     }
3299 #ifdef _TARGET_64BIT_
3300     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3301     {
3302         // must be done regardless of DbgCode and MinOpts
3303         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3304     }
3305 #else
3306     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3307 #endif
3308
3309     GenTreePtr retNode = nullptr;
3310
3311     //
3312     // We disable the inlining of instrinsics for MinOpts.
3313     //
3314     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3315     {
3316         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3317         return retNode;
3318     }
3319
3320     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3321     // seem to work properly for Infinity values, we don't do
3322     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3323
3324     var_types callType = JITtype2varType(sig->retType);
3325
3326     /* First do the intrinsics which are always smaller than a call */
3327
3328     switch (intrinsicID)
3329     {
3330         GenTreePtr op1, op2;
3331
3332         case CORINFO_INTRINSIC_Sin:
3333         case CORINFO_INTRINSIC_Sqrt:
3334         case CORINFO_INTRINSIC_Abs:
3335         case CORINFO_INTRINSIC_Cos:
3336         case CORINFO_INTRINSIC_Round:
3337         case CORINFO_INTRINSIC_Cosh:
3338         case CORINFO_INTRINSIC_Sinh:
3339         case CORINFO_INTRINSIC_Tan:
3340         case CORINFO_INTRINSIC_Tanh:
3341         case CORINFO_INTRINSIC_Asin:
3342         case CORINFO_INTRINSIC_Acos:
3343         case CORINFO_INTRINSIC_Atan:
3344         case CORINFO_INTRINSIC_Atan2:
3345         case CORINFO_INTRINSIC_Log10:
3346         case CORINFO_INTRINSIC_Pow:
3347         case CORINFO_INTRINSIC_Exp:
3348         case CORINFO_INTRINSIC_Ceiling:
3349         case CORINFO_INTRINSIC_Floor:
3350
3351             // These are math intrinsics
3352
3353             assert(callType != TYP_STRUCT);
3354
3355             op1 = nullptr;
3356
3357 #if defined(LEGACY_BACKEND)
3358             if (IsTargetIntrinsic(intrinsicID))
3359 #elif !defined(_TARGET_X86_)
3360             // Intrinsics that are not implemented directly by target instructions will
3361             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3362             // don't do this optimization, because
3363             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3364             //  b) It will be non-trivial task or too late to re-materialize a surviving
3365             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3366             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3367 #else
3368             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3369             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3370             // code generation for certain EH constructs.
3371             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3372 #endif
3373             {
3374                 switch (sig->numArgs)
3375                 {
3376                     case 1:
3377                         op1 = impPopStack().val;
3378
3379 #if FEATURE_X87_DOUBLES
3380
3381                         // X87 stack doesn't differentiate between float/double
3382                         // so it doesn't need a cast, but everybody else does
3383                         // Just double check it is at least a FP type
3384                         noway_assert(varTypeIsFloating(op1));
3385
3386 #else // FEATURE_X87_DOUBLES
3387
3388                         if (op1->TypeGet() != callType)
3389                         {
3390                             op1 = gtNewCastNode(callType, op1, callType);
3391                         }
3392
3393 #endif // FEATURE_X87_DOUBLES
3394
3395                         op1 = new (this, GT_INTRINSIC)
3396                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3397                         break;
3398
3399                     case 2:
3400                         op2 = impPopStack().val;
3401                         op1 = impPopStack().val;
3402
3403 #if FEATURE_X87_DOUBLES
3404
3405                         // X87 stack doesn't differentiate between float/double
3406                         // so it doesn't need a cast, but everybody else does
3407                         // Just double check it is at least a FP type
3408                         noway_assert(varTypeIsFloating(op2));
3409                         noway_assert(varTypeIsFloating(op1));
3410
3411 #else // FEATURE_X87_DOUBLES
3412
3413                         if (op2->TypeGet() != callType)
3414                         {
3415                             op2 = gtNewCastNode(callType, op2, callType);
3416                         }
3417                         if (op1->TypeGet() != callType)
3418                         {
3419                             op1 = gtNewCastNode(callType, op1, callType);
3420                         }
3421
3422 #endif // FEATURE_X87_DOUBLES
3423
3424                         op1 = new (this, GT_INTRINSIC)
3425                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3426                         break;
3427
3428                     default:
3429                         NO_WAY("Unsupported number of args for Math Instrinsic");
3430                 }
3431
3432 #ifndef LEGACY_BACKEND
3433                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3434                 {
3435                     op1->gtFlags |= GTF_CALL;
3436                 }
3437 #endif
3438             }
3439
3440             retNode = op1;
3441             break;
3442
3443 #ifdef _TARGET_XARCH_
3444         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3445         case CORINFO_INTRINSIC_InterlockedAdd32:
3446             interlockedOperator = GT_LOCKADD;
3447             goto InterlockedBinOpCommon;
3448         case CORINFO_INTRINSIC_InterlockedXAdd32:
3449             interlockedOperator = GT_XADD;
3450             goto InterlockedBinOpCommon;
3451         case CORINFO_INTRINSIC_InterlockedXchg32:
3452             interlockedOperator = GT_XCHG;
3453             goto InterlockedBinOpCommon;
3454
3455 #ifdef _TARGET_AMD64_
3456         case CORINFO_INTRINSIC_InterlockedAdd64:
3457             interlockedOperator = GT_LOCKADD;
3458             goto InterlockedBinOpCommon;
3459         case CORINFO_INTRINSIC_InterlockedXAdd64:
3460             interlockedOperator = GT_XADD;
3461             goto InterlockedBinOpCommon;
3462         case CORINFO_INTRINSIC_InterlockedXchg64:
3463             interlockedOperator = GT_XCHG;
3464             goto InterlockedBinOpCommon;
3465 #endif // _TARGET_AMD64_
3466
3467         InterlockedBinOpCommon:
3468             assert(callType != TYP_STRUCT);
3469             assert(sig->numArgs == 2);
3470
3471             op2 = impPopStack().val;
3472             op1 = impPopStack().val;
3473
3474             // This creates:
3475             //   val
3476             // XAdd
3477             //   addr
3478             //     field (for example)
3479             //
3480             // In the case where the first argument is the address of a local, we might
3481             // want to make this *not* make the var address-taken -- but atomic instructions
3482             // on a local are probably pretty useless anyway, so we probably don't care.
3483
3484             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3485             op1->gtFlags |= GTF_GLOB_EFFECT;
3486             retNode = op1;
3487             break;
3488 #endif // _TARGET_XARCH_
3489
3490         case CORINFO_INTRINSIC_MemoryBarrier:
3491
3492             assert(sig->numArgs == 0);
3493
3494             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3495             op1->gtFlags |= GTF_GLOB_EFFECT;
3496             retNode = op1;
3497             break;
3498
3499 #ifdef _TARGET_XARCH_
3500         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3501         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3502 #ifdef _TARGET_AMD64_
3503         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3504 #endif
3505         {
3506             assert(callType != TYP_STRUCT);
3507             assert(sig->numArgs == 3);
3508             GenTreePtr op3;
3509
3510             op3 = impPopStack().val; // comparand
3511             op2 = impPopStack().val; // value
3512             op1 = impPopStack().val; // location
3513
3514             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3515
3516             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3517             retNode = node;
3518             break;
3519         }
3520 #endif
3521
3522         case CORINFO_INTRINSIC_StringLength:
3523             op1 = impPopStack().val;
3524             if (!opts.MinOpts() && !opts.compDbgCode)
3525             {
3526                 GenTreeArrLen* arrLen =
3527                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3528                 op1 = arrLen;
3529             }
3530             else
3531             {
3532                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3533                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3534                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3535                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3536             }
3537             retNode = op1;
3538             break;
3539
3540         case CORINFO_INTRINSIC_StringGetChar:
3541             op2 = impPopStack().val;
3542             op1 = impPopStack().val;
3543             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3544             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3545             retNode = op1;
3546             break;
3547
3548         case CORINFO_INTRINSIC_InitializeArray:
3549             retNode = impInitializeArrayIntrinsic(sig);
3550             break;
3551
3552         case CORINFO_INTRINSIC_Array_Address:
3553         case CORINFO_INTRINSIC_Array_Get:
3554         case CORINFO_INTRINSIC_Array_Set:
3555             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3556             break;
3557
3558         case CORINFO_INTRINSIC_GetTypeFromHandle:
3559             op1 = impStackTop(0).val;
3560             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3561                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3562             {
3563                 op1 = impPopStack().val;
3564                 // Change call to return RuntimeType directly.
3565                 op1->gtType = TYP_REF;
3566                 retNode     = op1;
3567             }
3568             // Call the regular function.
3569             break;
3570
3571         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3572             op1 = impStackTop(0).val;
3573             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3574                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3575             {
3576                 // Old tree
3577                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3578                 //
3579                 // New tree
3580                 // TreeToGetNativeTypeHandle
3581
3582                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3583                 // to that helper.
3584
3585                 op1 = impPopStack().val;
3586
3587                 // Get native TypeHandle argument to old helper
3588                 op1 = op1->gtCall.gtCallArgs;
3589                 assert(op1->OperIsList());
3590                 assert(op1->gtOp.gtOp2 == nullptr);
3591                 op1     = op1->gtOp.gtOp1;
3592                 retNode = op1;
3593             }
3594             // Call the regular function.
3595             break;
3596
3597 #ifndef LEGACY_BACKEND
3598         case CORINFO_INTRINSIC_Object_GetType:
3599
3600             op1 = impPopStack().val;
3601             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3602
3603             // Set the CALL flag to indicate that the operator is implemented by a call.
3604             // Set also the EXCEPTION flag because the native implementation of
3605             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3606             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3607             retNode = op1;
3608             break;
3609 #endif
3610
3611         default:
3612             /* Unknown intrinsic */
3613             break;
3614     }
3615
3616     if (mustExpand)
3617     {
3618         if (retNode == nullptr)
3619         {
3620             NO_WAY("JIT must expand the intrinsic!");
3621         }
3622     }
3623
3624     return retNode;
3625 }
3626
3627 /*****************************************************************************/
3628
3629 GenTreePtr Compiler::impArrayAccessIntrinsic(
3630     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3631 {
3632     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3633        the following, as it generates fatter code.
3634     */
3635
3636     if (compCodeOpt() == SMALL_CODE)
3637     {
3638         return nullptr;
3639     }
3640
3641     /* These intrinsics generate fatter (but faster) code and are only
3642        done if we don't need SMALL_CODE */
3643
3644     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3645
3646     // The rank 1 case is special because it has to handle two array formats
3647     // we will simply not do that case
3648     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3649     {
3650         return nullptr;
3651     }
3652
3653     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3654     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3655
3656     // For the ref case, we will only be able to inline if the types match
3657     // (verifier checks for this, we don't care for the nonverified case and the
3658     // type is final (so we don't need to do the cast)
3659     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3660     {
3661         // Get the call site signature
3662         CORINFO_SIG_INFO LocalSig;
3663         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3664         assert(LocalSig.hasThis());
3665
3666         CORINFO_CLASS_HANDLE actualElemClsHnd;
3667
3668         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3669         {
3670             // Fetch the last argument, the one that indicates the type we are setting.
3671             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3672             for (unsigned r = 0; r < rank; r++)
3673             {
3674                 argType = info.compCompHnd->getArgNext(argType);
3675             }
3676
3677             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3678             actualElemClsHnd = argInfo.GetClassHandle();
3679         }
3680         else
3681         {
3682             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3683
3684             // Fetch the return type
3685             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3686             assert(retInfo.IsByRef());
3687             actualElemClsHnd = retInfo.GetClassHandle();
3688         }
3689
3690         // if it's not final, we can't do the optimization
3691         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3692         {
3693             return nullptr;
3694         }
3695     }
3696
3697     unsigned arrayElemSize;
3698     if (elemType == TYP_STRUCT)
3699     {
3700         assert(arrElemClsHnd);
3701
3702         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3703     }
3704     else
3705     {
3706         arrayElemSize = genTypeSize(elemType);
3707     }
3708
3709     if ((unsigned char)arrayElemSize != arrayElemSize)
3710     {
3711         // arrayElemSize would be truncated as an unsigned char.
3712         // This means the array element is too large. Don't do the optimization.
3713         return nullptr;
3714     }
3715
3716     GenTreePtr val = nullptr;
3717
3718     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3719     {
3720         // Assignment of a struct is more work, and there are more gets than sets.
3721         if (elemType == TYP_STRUCT)
3722         {
3723             return nullptr;
3724         }
3725
3726         val = impPopStack().val;
3727         assert(genActualType(elemType) == genActualType(val->gtType) ||
3728                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3729                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3730                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3731     }
3732
3733     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3734
3735     GenTreePtr inds[GT_ARR_MAX_RANK];
3736     for (unsigned k = rank; k > 0; k--)
3737     {
3738         inds[k - 1] = impPopStack().val;
3739     }
3740
3741     GenTreePtr arr = impPopStack().val;
3742     assert(arr->gtType == TYP_REF);
3743
3744     GenTreePtr arrElem =
3745         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3746                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3747
3748     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3749     {
3750         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3751     }
3752
3753     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3754     {
3755         assert(val != nullptr);
3756         return gtNewAssignNode(arrElem, val);
3757     }
3758     else
3759     {
3760         return arrElem;
3761     }
3762 }
3763
3764 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3765 {
3766     unsigned i;
3767
3768     // do some basic checks first
3769     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3770     {
3771         return FALSE;
3772     }
3773
3774     if (verCurrentState.esStackDepth > 0)
3775     {
3776         // merge stack types
3777         StackEntry* parentStack = block->bbStackOnEntry();
3778         StackEntry* childStack  = verCurrentState.esStack;
3779
3780         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3781         {
3782             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3783             {
3784                 return FALSE;
3785             }
3786         }
3787     }
3788
3789     // merge initialization status of this ptr
3790
3791     if (verTrackObjCtorInitState)
3792     {
3793         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3794         assert(verCurrentState.thisInitialized != TIS_Bottom);
3795
3796         // If the successor block's thisInit state is unknown, copy it from the current state.
3797         if (block->bbThisOnEntry() == TIS_Bottom)
3798         {
3799             *changed = true;
3800             verSetThisInit(block, verCurrentState.thisInitialized);
3801         }
3802         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3803         {
3804             if (block->bbThisOnEntry() != TIS_Top)
3805             {
3806                 *changed = true;
3807                 verSetThisInit(block, TIS_Top);
3808
3809                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3810                 {
3811                     // The block is bad. Control can flow through the block to any handler that catches the
3812                     // verification exception, but the importer ignores bad blocks and therefore won't model
3813                     // this flow in the normal way. To complete the merge into the bad block, the new state
3814                     // needs to be manually pushed to the handlers that may be reached after the verification
3815                     // exception occurs.
3816                     //
3817                     // Usually, the new state was already propagated to the relevant handlers while processing
3818                     // the predecessors of the bad block. The exception is when the bad block is at the start
3819                     // of a try region, meaning it is protected by additional handlers that do not protect its
3820                     // predecessors.
3821                     //
3822                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3823                     {
3824                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3825                         // recursive calls back into this code path (if successors of the current bad block are
3826                         // also bad blocks).
3827                         //
3828                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3829                         verCurrentState.thisInitialized = TIS_Top;
3830                         impVerifyEHBlock(block, true);
3831                         verCurrentState.thisInitialized = origTIS;
3832                     }
3833                 }
3834             }
3835         }
3836     }
3837     else
3838     {
3839         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3840     }
3841
3842     return TRUE;
3843 }
3844
3845 /*****************************************************************************
3846  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3847  *   already logged it (presumably in a more detailed fashion than done here)
3848  * 'bVerificationException' is true for a verification exception, false for a
3849  *   "call unauthorized by host" exception.
3850  */
3851
3852 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3853 {
3854     block->bbJumpKind = BBJ_THROW;
3855     block->bbFlags |= BBF_FAILED_VERIFICATION;
3856
3857     impCurStmtOffsSet(block->bbCodeOffs);
3858
3859 #ifdef DEBUG
3860     // we need this since BeginTreeList asserts otherwise
3861     impTreeList = impTreeLast = nullptr;
3862     block->bbFlags &= ~BBF_IMPORTED;
3863
3864     if (logMsg)
3865     {
3866         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3867                 block->bbCodeOffs, block->bbCodeOffsEnd));
3868         if (verbose)
3869         {
3870             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3871         }
3872     }
3873
3874     if (JitConfig.DebugBreakOnVerificationFailure())
3875     {
3876         DebugBreak();
3877     }
3878 #endif
3879
3880     impBeginTreeList();
3881
3882     // if the stack is non-empty evaluate all the side-effects
3883     if (verCurrentState.esStackDepth > 0)
3884     {
3885         impEvalSideEffects();
3886     }
3887     assert(verCurrentState.esStackDepth == 0);
3888
3889     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3890                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3891     // verCurrentState.esStackDepth = 0;
3892     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3893
3894     // The inliner is not able to handle methods that require throw block, so
3895     // make sure this methods never gets inlined.
3896     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3897 }
3898
3899 /*****************************************************************************
3900  *
3901  */
3902 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3903
3904 {
3905     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3906     // slightly different mechanism in which it calls the JIT to perform IL verification:
3907     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3908     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3909     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3910     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3911     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3912     // to fail upon runtime of the jitted method.
3913     //
3914     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3915     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3916     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3917     // we detect these two conditions, instead of generating a throw statement inside the offending
3918     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3919     // to return false and make RyuJIT behave the same way JIT64 does.
3920     //
3921     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3922     // RyuJIT for the time being until we completely replace JIT64.
3923     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3924
3925     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3926     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3927     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3928     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3929     // be turned off during importation).
3930     CLANG_FORMAT_COMMENT_ANCHOR;
3931
3932 #ifdef _TARGET_64BIT_
3933
3934 #ifdef DEBUG
3935     bool canSkipVerificationResult =
3936         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3937     assert(tiVerificationNeeded || canSkipVerificationResult);
3938 #endif // DEBUG
3939
3940     // Add the non verifiable flag to the compiler
3941     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3942     {
3943         tiIsVerifiableCode = FALSE;
3944     }
3945 #endif //_TARGET_64BIT_
3946     verResetCurrentState(block, &verCurrentState);
3947     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3948
3949 #ifdef DEBUG
3950     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3951 #endif                   // DEBUG
3952 }
3953
3954 /******************************************************************************/
3955 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3956 {
3957     assert(ciType < CORINFO_TYPE_COUNT);
3958
3959     typeInfo tiResult;
3960     switch (ciType)
3961     {
3962         case CORINFO_TYPE_STRING:
3963         case CORINFO_TYPE_CLASS:
3964             tiResult = verMakeTypeInfo(clsHnd);
3965             if (!tiResult.IsType(TI_REF))
3966             { // type must be consistent with element type
3967                 return typeInfo();
3968             }
3969             break;
3970
3971 #ifdef _TARGET_64BIT_
3972         case CORINFO_TYPE_NATIVEINT:
3973         case CORINFO_TYPE_NATIVEUINT:
3974             if (clsHnd)
3975             {
3976                 // If we have more precise information, use it
3977                 return verMakeTypeInfo(clsHnd);
3978             }
3979             else
3980             {
3981                 return typeInfo::nativeInt();
3982             }
3983             break;
3984 #endif // _TARGET_64BIT_
3985
3986         case CORINFO_TYPE_VALUECLASS:
3987         case CORINFO_TYPE_REFANY:
3988             tiResult = verMakeTypeInfo(clsHnd);
3989             // type must be constant with element type;
3990             if (!tiResult.IsValueClass())
3991             {
3992                 return typeInfo();
3993             }
3994             break;
3995         case CORINFO_TYPE_VAR:
3996             return verMakeTypeInfo(clsHnd);
3997
3998         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3999         case CORINFO_TYPE_VOID:
4000             return typeInfo();
4001             break;
4002
4003         case CORINFO_TYPE_BYREF:
4004         {
4005             CORINFO_CLASS_HANDLE childClassHandle;
4006             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4007             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4008         }
4009         break;
4010
4011         default:
4012             if (clsHnd)
4013             { // If we have more precise information, use it
4014                 return typeInfo(TI_STRUCT, clsHnd);
4015             }
4016             else
4017             {
4018                 return typeInfo(JITtype2tiType(ciType));
4019             }
4020     }
4021     return tiResult;
4022 }
4023
4024 /******************************************************************************/
4025
4026 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4027 {
4028     if (clsHnd == nullptr)
4029     {
4030         return typeInfo();
4031     }
4032
4033     // Byrefs should only occur in method and local signatures, which are accessed
4034     // using ICorClassInfo and ICorClassInfo.getChildType.
4035     // So findClass() and getClassAttribs() should not be called for byrefs
4036
4037     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4038     {
4039         assert(!"Did findClass() return a Byref?");
4040         return typeInfo();
4041     }
4042
4043     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4044
4045     if (attribs & CORINFO_FLG_VALUECLASS)
4046     {
4047         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4048
4049         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4050         // not occur here, so we may want to change this to an assert instead.
4051         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4052         {
4053             return typeInfo();
4054         }
4055
4056 #ifdef _TARGET_64BIT_
4057         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4058         {
4059             return typeInfo::nativeInt();
4060         }
4061 #endif // _TARGET_64BIT_
4062
4063         if (t != CORINFO_TYPE_UNDEF)
4064         {
4065             return (typeInfo(JITtype2tiType(t)));
4066         }
4067         else if (bashStructToRef)
4068         {
4069             return (typeInfo(TI_REF, clsHnd));
4070         }
4071         else
4072         {
4073             return (typeInfo(TI_STRUCT, clsHnd));
4074         }
4075     }
4076     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4077     {
4078         // See comment in _typeInfo.h for why we do it this way.
4079         return (typeInfo(TI_REF, clsHnd, true));
4080     }
4081     else
4082     {
4083         return (typeInfo(TI_REF, clsHnd));
4084     }
4085 }
4086
4087 /******************************************************************************/
4088 BOOL Compiler::verIsSDArray(typeInfo ti)
4089 {
4090     if (ti.IsNullObjRef())
4091     { // nulls are SD arrays
4092         return TRUE;
4093     }
4094
4095     if (!ti.IsType(TI_REF))
4096     {
4097         return FALSE;
4098     }
4099
4100     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4101     {
4102         return FALSE;
4103     }
4104     return TRUE;
4105 }
4106
4107 /******************************************************************************/
4108 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4109 /* Returns an error type if anything goes wrong */
4110
4111 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4112 {
4113     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4114
4115     if (!verIsSDArray(arrayObjectType))
4116     {
4117         return typeInfo();
4118     }
4119
4120     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4121     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4122
4123     return verMakeTypeInfo(ciType, childClassHandle);
4124 }
4125
4126 /*****************************************************************************
4127  */
4128 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4129 {
4130     CORINFO_CLASS_HANDLE classHandle;
4131     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4132
4133     var_types type = JITtype2varType(ciType);
4134     if (varTypeIsGC(type))
4135     {
4136         // For efficiency, getArgType only returns something in classHandle for
4137         // value types.  For other types that have addition type info, you
4138         // have to call back explicitly
4139         classHandle = info.compCompHnd->getArgClass(sig, args);
4140         if (!classHandle)
4141         {
4142             NO_WAY("Could not figure out Class specified in argument or local signature");
4143         }
4144     }
4145
4146     return verMakeTypeInfo(ciType, classHandle);
4147 }
4148
4149 /*****************************************************************************/
4150
4151 // This does the expensive check to figure out whether the method
4152 // needs to be verified. It is called only when we fail verification,
4153 // just before throwing the verification exception.
4154
4155 BOOL Compiler::verNeedsVerification()
4156 {
4157     // If we have previously determined that verification is NOT needed
4158     // (for example in Compiler::compCompile), that means verification is really not needed.
4159     // Return the same decision we made before.
4160     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4161
4162     if (!tiVerificationNeeded)
4163     {
4164         return tiVerificationNeeded;
4165     }
4166
4167     assert(tiVerificationNeeded);
4168
4169     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4170     // obtain the answer.
4171     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4172         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4173
4174     // canSkipVerification will return one of the following three values:
4175     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4176     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4177     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4178     //     but need to insert a callout to the VM to ask during runtime
4179     //     whether to skip verification or not.
4180
4181     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4182     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4183     {
4184         tiRuntimeCalloutNeeded = true;
4185     }
4186
4187     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4188     {
4189         // Dev10 706080 - Testers don't like the assert, so just silence it
4190         // by not using the macros that invoke debugAssert.
4191         badCode();
4192     }
4193
4194     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4195     // The following line means we will NOT do jit time verification if canSkipVerification
4196     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4197     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4198     return tiVerificationNeeded;
4199 }
4200
4201 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4202 {
4203     if (ti.IsByRef())
4204     {
4205         return TRUE;
4206     }
4207     if (!ti.IsType(TI_STRUCT))
4208     {
4209         return FALSE;
4210     }
4211     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4212 }
4213
4214 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4215 {
4216     if (ti.IsPermanentHomeByRef())
4217     {
4218         return TRUE;
4219     }
4220     else
4221     {
4222         return FALSE;
4223     }
4224 }
4225
4226 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4227 {
4228     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4229             || ti.IsUnboxedGenericTypeVar() ||
4230             (ti.IsType(TI_STRUCT) &&
4231              // exclude byreflike structs
4232              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4233 }
4234
4235 // Is it a boxed value type?
4236 bool Compiler::verIsBoxedValueType(typeInfo ti)
4237 {
4238     if (ti.GetType() == TI_REF)
4239     {
4240         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4241         return !!eeIsValueClass(clsHnd);
4242     }
4243     else
4244     {
4245         return false;
4246     }
4247 }
4248
4249 /*****************************************************************************
4250  *
4251  *  Check if a TailCall is legal.
4252  */
4253
4254 bool Compiler::verCheckTailCallConstraint(
4255     OPCODE                  opcode,
4256     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4257     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4258     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4259                                                        // return false to the caller.
4260                                                        // If false, it will throw.
4261     )
4262 {
4263     DWORD            mflags;
4264     CORINFO_SIG_INFO sig;
4265     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4266                                    // this counter is used to keep track of how many items have been
4267                                    // virtually popped
4268
4269     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4270     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4271     unsigned              methodClassFlgs = 0;
4272
4273     assert(impOpcodeIsCallOpcode(opcode));
4274
4275     if (compIsForInlining())
4276     {
4277         return false;
4278     }
4279
4280     // for calli, VerifyOrReturn that this is not a virtual method
4281     if (opcode == CEE_CALLI)
4282     {
4283         /* Get the call sig */
4284         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4285
4286         // We don't know the target method, so we have to infer the flags, or
4287         // assume the worst-case.
4288         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4289     }
4290     else
4291     {
4292         methodHnd = pResolvedToken->hMethod;
4293
4294         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4295
4296         // When verifying generic code we pair the method handle with its
4297         // owning class to get the exact method signature.
4298         methodClassHnd = pResolvedToken->hClass;
4299         assert(methodClassHnd);
4300
4301         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4302
4303         // opcode specific check
4304         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4305     }
4306
4307     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4308     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4309
4310     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4311     {
4312         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4313     }
4314
4315     // check compatibility of the arguments
4316     unsigned int argCount;
4317     argCount = sig.numArgs;
4318     CORINFO_ARG_LIST_HANDLE args;
4319     args = sig.args;
4320     while (argCount--)
4321     {
4322         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4323
4324         // check that the argument is not a byref for tailcalls
4325         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4326
4327         // For unsafe code, we might have parameters containing pointer to the stack location.
4328         // Disallow the tailcall for this kind.
4329         CORINFO_CLASS_HANDLE classHandle;
4330         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4331         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4332
4333         args = info.compCompHnd->getArgNext(args);
4334     }
4335
4336     // update popCount
4337     popCount += sig.numArgs;
4338
4339     // check for 'this' which is on non-static methods, not called via NEWOBJ
4340     if (!(mflags & CORINFO_FLG_STATIC))
4341     {
4342         // Always update the popCount.
4343         // This is crucial for the stack calculation to be correct.
4344         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4345         popCount++;
4346
4347         if (opcode == CEE_CALLI)
4348         {
4349             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4350             // on the stack.
4351             if (tiThis.IsValueClass())
4352             {
4353                 tiThis.MakeByRef();
4354             }
4355             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4356         }
4357         else
4358         {
4359             // Check type compatibility of the this argument
4360             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4361             if (tiDeclaredThis.IsValueClass())
4362             {
4363                 tiDeclaredThis.MakeByRef();
4364             }
4365
4366             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4367         }
4368     }
4369
4370     // Tail calls on constrained calls should be illegal too:
4371     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4372     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4373
4374     // Get the exact view of the signature for an array method
4375     if (sig.retType != CORINFO_TYPE_VOID)
4376     {
4377         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4378         {
4379             assert(opcode != CEE_CALLI);
4380             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4381         }
4382     }
4383
4384     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4385     typeInfo tiCallerRetType =
4386         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4387
4388     // void return type gets morphed into the error type, so we have to treat them specially here
4389     if (sig.retType == CORINFO_TYPE_VOID)
4390     {
4391         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4392                                   speculative);
4393     }
4394     else
4395     {
4396         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4397                                                    NormaliseForStack(tiCallerRetType), true),
4398                                   "tailcall return mismatch", speculative);
4399     }
4400
4401     // for tailcall, stack must be empty
4402     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4403
4404     return true; // Yes, tailcall is legal
4405 }
4406
4407 /*****************************************************************************
4408  *
4409  *  Checks the IL verification rules for the call
4410  */
4411
4412 void Compiler::verVerifyCall(OPCODE                  opcode,
4413                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4414                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4415                              bool                    tailCall,
4416                              bool                    readonlyCall,
4417                              const BYTE*             delegateCreateStart,
4418                              const BYTE*             codeAddr,
4419                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4420 {
4421     DWORD             mflags;
4422     CORINFO_SIG_INFO* sig      = nullptr;
4423     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4424                                     // this counter is used to keep track of how many items have been
4425                                     // virtually popped
4426
4427     // for calli, VerifyOrReturn that this is not a virtual method
4428     if (opcode == CEE_CALLI)
4429     {
4430         Verify(false, "Calli not verifiable");
4431         return;
4432     }
4433
4434     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4435     mflags = callInfo->verMethodFlags;
4436
4437     sig = &callInfo->verSig;
4438
4439     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4440     {
4441         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4442     }
4443
4444     // opcode specific check
4445     unsigned methodClassFlgs = callInfo->classFlags;
4446     switch (opcode)
4447     {
4448         case CEE_CALLVIRT:
4449             // cannot do callvirt on valuetypes
4450             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4451             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4452             break;
4453
4454         case CEE_NEWOBJ:
4455         {
4456             assert(!tailCall); // Importer should not allow this
4457             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4458                            "newobj must be on instance");
4459
4460             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4461             {
4462                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4463                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4464                 typeInfo tiDeclaredFtn =
4465                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4466                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4467
4468                 assert(popCount == 0);
4469                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4470                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4471
4472                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4473                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4474                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4475                                "delegate object type mismatch");
4476
4477                 CORINFO_CLASS_HANDLE objTypeHandle =
4478                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4479
4480                 // the method signature must be compatible with the delegate's invoke method
4481
4482                 // check that for virtual functions, the type of the object used to get the
4483                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4484                 // since this is a bit of work to determine in general, we pattern match stylized
4485                 // code sequences
4486
4487                 // the delegate creation code check, which used to be done later, is now done here
4488                 // so we can read delegateMethodRef directly from
4489                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4490                 // we then use it in our call to isCompatibleDelegate().
4491
4492                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4493                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4494                                "must create delegates with certain IL");
4495
4496                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4497                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4498                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4499                 delegateResolvedToken.token        = delegateMethodRef;
4500                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4501                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4502
4503                 CORINFO_CALL_INFO delegateCallInfo;
4504                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4505                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4506
4507                 BOOL isOpenDelegate = FALSE;
4508                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4509                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4510                                                                       &isOpenDelegate),
4511                                "function incompatible with delegate");
4512
4513                 // check the constraints on the target method
4514                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4515                                "delegate target has unsatisfied class constraints");
4516                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4517                                                                             tiActualFtn.GetMethod()),
4518                                "delegate target has unsatisfied method constraints");
4519
4520                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4521                 // for additional verification rules for delegates
4522                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4523                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4524                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4525                 {
4526
4527                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4528 #ifdef DEBUG
4529                         && StrictCheckForNonVirtualCallToVirtualMethod()
4530 #endif
4531                             )
4532                     {
4533                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4534                         {
4535                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4536                                                verIsBoxedValueType(tiActualObj),
4537                                            "The 'this' parameter to the call must be either the calling method's "
4538                                            "'this' parameter or "
4539                                            "a boxed value type.");
4540                         }
4541                     }
4542                 }
4543
4544                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4545                 {
4546                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4547
4548                     Verify(targetIsStatic || !isOpenDelegate,
4549                            "Unverifiable creation of an open instance delegate for a protected member.");
4550
4551                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4552                                                                 ? info.compClassHnd
4553                                                                 : tiActualObj.GetClassHandleForObjRef();
4554
4555                     // In the case of protected methods, it is a requirement that the 'this'
4556                     // pointer be a subclass of the current context.  Perform this check.
4557                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4558                            "Accessing protected method through wrong type.");
4559                 }
4560                 goto DONE_ARGS;
4561             }
4562         }
4563         // fall thru to default checks
4564         default:
4565             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4566     }
4567     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4568                    "can only newobj a delegate constructor");
4569
4570     // check compatibility of the arguments
4571     unsigned int argCount;
4572     argCount = sig->numArgs;
4573     CORINFO_ARG_LIST_HANDLE args;
4574     args = sig->args;
4575     while (argCount--)
4576     {
4577         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4578
4579         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4580         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4581
4582         args = info.compCompHnd->getArgNext(args);
4583     }
4584
4585 DONE_ARGS:
4586
4587     // update popCount
4588     popCount += sig->numArgs;
4589
4590     // check for 'this' which are is non-static methods, not called via NEWOBJ
4591     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4592     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4593     {
4594         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4595         popCount++;
4596
4597         // If it is null, we assume we can access it (since it will AV shortly)
4598         // If it is anything but a reference class, there is no hierarchy, so
4599         // again, we don't need the precise instance class to compute 'protected' access
4600         if (tiThis.IsType(TI_REF))
4601         {
4602             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4603         }
4604
4605         // Check type compatibility of the this argument
4606         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4607         if (tiDeclaredThis.IsValueClass())
4608         {
4609             tiDeclaredThis.MakeByRef();
4610         }
4611
4612         // If this is a call to the base class .ctor, set thisPtr Init for
4613         // this block.
4614         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4615         {
4616             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4617                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4618             {
4619                 assert(verCurrentState.thisInitialized !=
4620                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4621                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4622                                "Call to base class constructor when 'this' is possibly initialized");
4623                 // Otherwise, 'this' is now initialized.
4624                 verCurrentState.thisInitialized = TIS_Init;
4625                 tiThis.SetInitialisedObjRef();
4626             }
4627             else
4628             {
4629                 // We allow direct calls to value type constructors
4630                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4631                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4632                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4633                                "Bad call to a constructor");
4634             }
4635         }
4636
4637         if (pConstrainedResolvedToken != nullptr)
4638         {
4639             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4640
4641             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4642
4643             // We just dereference this and test for equality
4644             tiThis.DereferenceByRef();
4645             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4646                            "this type mismatch with constrained type operand");
4647
4648             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4649             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4650         }
4651
4652         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4653         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4654         {
4655             tiDeclaredThis.SetIsReadonlyByRef();
4656         }
4657
4658         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4659
4660         if (tiThis.IsByRef())
4661         {
4662             // Find the actual type where the method exists (as opposed to what is declared
4663             // in the metadata). This is to prevent passing a byref as the "this" argument
4664             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4665
4666             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4667             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4668                            "Call to base type of valuetype (which is never a valuetype)");
4669         }
4670
4671         // Rules for non-virtual call to a non-final virtual method:
4672
4673         // Define:
4674         // The "this" pointer is considered to be "possibly written" if
4675         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4676         //   (or)
4677         //   2. It has been stored to (STARG.0) anywhere in the method.
4678
4679         // A non-virtual call to a non-final virtual method is only allowed if
4680         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4681         //   (or)
4682         //   2. The this pointer passed to the callee is the current method's this pointer.
4683         //      (and) The current method's this pointer is not "possibly written".
4684
4685         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4686         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4687         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4688         // hard and more error prone.
4689
4690         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4691 #ifdef DEBUG
4692             && StrictCheckForNonVirtualCallToVirtualMethod()
4693 #endif
4694                 )
4695         {
4696             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4697             {
4698                 VerifyOrReturn(
4699                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4700                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4701                     "a boxed value type.");
4702             }
4703         }
4704     }
4705
4706     // check any constraints on the callee's class and type parameters
4707     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4708                    "method has unsatisfied class constraints");
4709     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4710                    "method has unsatisfied method constraints");
4711
4712     if (mflags & CORINFO_FLG_PROTECTED)
4713     {
4714         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4715                        "Can't access protected method");
4716     }
4717
4718     // Get the exact view of the signature for an array method
4719     if (sig->retType != CORINFO_TYPE_VOID)
4720     {
4721         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4722     }
4723
4724     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4725     // The methods supported by array types are under the control of the EE
4726     // so we can trust that only the Address operation returns a byref.
4727     if (readonlyCall)
4728     {
4729         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4730         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4731                        "unexpected use of readonly prefix");
4732     }
4733
4734     // Verify the tailcall
4735     if (tailCall)
4736     {
4737         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4738     }
4739 }
4740
4741 /*****************************************************************************
4742  *  Checks that a delegate creation is done using the following pattern:
4743  *     dup
4744  *     ldvirtftn targetMemberRef
4745  *  OR
4746  *     ldftn targetMemberRef
4747  *
4748  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4749  *  not in this basic block)
4750  *
4751  *  targetMemberRef is read from the code sequence.
4752  *  targetMemberRef is validated iff verificationNeeded.
4753  */
4754
4755 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4756                                         const BYTE*  codeAddr,
4757                                         mdMemberRef& targetMemberRef)
4758 {
4759     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4760     {
4761         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4762         return TRUE;
4763     }
4764     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4765     {
4766         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4767         return TRUE;
4768     }
4769
4770     return FALSE;
4771 }
4772
4773 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4774 {
4775     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4776     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4777     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4778     if (!tiCompatibleWith(value, normPtrVal, true))
4779     {
4780         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4781         compUnsafeCastUsed = true;
4782     }
4783     return ptrVal;
4784 }
4785
4786 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4787 {
4788     assert(!instrType.IsStruct());
4789
4790     typeInfo ptrVal;
4791     if (ptr.IsByRef())
4792     {
4793         ptrVal = DereferenceByRef(ptr);
4794         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4795         {
4796             Verify(false, "bad pointer");
4797             compUnsafeCastUsed = true;
4798         }
4799         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4800         {
4801             Verify(false, "pointer not consistent with instr");
4802             compUnsafeCastUsed = true;
4803         }
4804     }
4805     else
4806     {
4807         Verify(false, "pointer not byref");
4808         compUnsafeCastUsed = true;
4809     }
4810
4811     return ptrVal;
4812 }
4813
4814 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4815 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4816 // ld*flda or a st*fld.
4817 // 'enclosingClass' is given if we are accessing a field in some specific type.
4818
4819 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4820                               const CORINFO_FIELD_INFO& fieldInfo,
4821                               const typeInfo*           tiThis,
4822                               BOOL                      mutator,
4823                               BOOL                      allowPlainStructAsThis)
4824 {
4825     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4826     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4827     CORINFO_CLASS_HANDLE instanceClass =
4828         info.compClassHnd; // for statics, we imagine the instance is the current class.
4829
4830     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4831     if (mutator)
4832     {
4833         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4834         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4835         {
4836             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4837                        info.compIsStatic == isStaticField,
4838                    "bad use of initonly field (set or address taken)");
4839         }
4840     }
4841
4842     if (tiThis == nullptr)
4843     {
4844         Verify(isStaticField, "used static opcode with non-static field");
4845     }
4846     else
4847     {
4848         typeInfo tThis = *tiThis;
4849
4850         if (allowPlainStructAsThis && tThis.IsValueClass())
4851         {
4852             tThis.MakeByRef();
4853         }
4854
4855         // If it is null, we assume we can access it (since it will AV shortly)
4856         // If it is anything but a refernce class, there is no hierarchy, so
4857         // again, we don't need the precise instance class to compute 'protected' access
4858         if (tiThis->IsType(TI_REF))
4859         {
4860             instanceClass = tiThis->GetClassHandleForObjRef();
4861         }
4862
4863         // Note that even if the field is static, we require that the this pointer
4864         // satisfy the same constraints as a non-static field  This happens to
4865         // be simpler and seems reasonable
4866         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4867         if (tiDeclaredThis.IsValueClass())
4868         {
4869             tiDeclaredThis.MakeByRef();
4870
4871             // we allow read-only tThis, on any field access (even stores!), because if the
4872             // class implementor wants to prohibit stores he should make the field private.
4873             // we do this by setting the read-only bit on the type we compare tThis to.
4874             tiDeclaredThis.SetIsReadonlyByRef();
4875         }
4876         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4877         {
4878             // Any field access is legal on "uninitialized" this pointers.
4879             // The easiest way to implement this is to simply set the
4880             // initialized bit for the duration of the type check on the
4881             // field access only.  It does not change the state of the "this"
4882             // for the function as a whole. Note that the "tThis" is a copy
4883             // of the original "this" type (*tiThis) passed in.
4884             tThis.SetInitialisedObjRef();
4885         }
4886
4887         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4888     }
4889
4890     // Presently the JIT does not check that we don't store or take the address of init-only fields
4891     // since we cannot guarantee their immutability and it is not a security issue.
4892
4893     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4894     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4895                    "field has unsatisfied class constraints");
4896     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4897     {
4898         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4899                "Accessing protected method through wrong type.");
4900     }
4901 }
4902
4903 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4904 {
4905     if (tiOp1.IsNumberType())
4906     {
4907 #ifdef _TARGET_64BIT_
4908         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4909 #else  // _TARGET_64BIT
4910         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4911         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4912         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4913         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4914 #endif // !_TARGET_64BIT_
4915     }
4916     else if (tiOp1.IsObjRef())
4917     {
4918         switch (opcode)
4919         {
4920             case CEE_BEQ_S:
4921             case CEE_BEQ:
4922             case CEE_BNE_UN_S:
4923             case CEE_BNE_UN:
4924             case CEE_CEQ:
4925             case CEE_CGT_UN:
4926                 break;
4927             default:
4928                 Verify(FALSE, "Cond not allowed on object types");
4929         }
4930         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4931     }
4932     else if (tiOp1.IsByRef())
4933     {
4934         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4935     }
4936     else
4937     {
4938         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4939     }
4940 }
4941
4942 void Compiler::verVerifyThisPtrInitialised()
4943 {
4944     if (verTrackObjCtorInitState)
4945     {
4946         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4947     }
4948 }
4949
4950 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4951 {
4952     // Either target == context, in this case calling an alternate .ctor
4953     // Or target is the immediate parent of context
4954
4955     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4956 }
4957
4958 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4959                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4960                                         CORINFO_CALL_INFO*      pCallInfo)
4961 {
4962     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4963     {
4964         NO_WAY("Virtual call to a function added via EnC is not supported");
4965     }
4966
4967 #ifdef FEATURE_READYTORUN_COMPILER
4968     if (opts.IsReadyToRun())
4969     {
4970         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4971         {
4972             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4973                                                     gtNewArgList(thisPtr));
4974
4975             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4976
4977             return call;
4978         }
4979
4980         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
4981         if (IsTargetAbi(CORINFO_CORERT_ABI))
4982         {
4983             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
4984
4985             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
4986                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
4987         }
4988     }
4989 #endif
4990
4991     // Get the exact descriptor for the static callsite
4992     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4993     if (exactTypeDesc == nullptr)
4994     { // compDonotInline()
4995         return nullptr;
4996     }
4997
4998     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4999     if (exactMethodDesc == nullptr)
5000     { // compDonotInline()
5001         return nullptr;
5002     }
5003
5004     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5005
5006     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5007
5008     helpArgs = gtNewListNode(thisPtr, helpArgs);
5009
5010     // Call helper function.  This gets the target address of the final destination callsite.
5011
5012     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5013 }
5014
5015 /*****************************************************************************
5016  *
5017  *  Build and import a box node
5018  */
5019
5020 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5021 {
5022     // Get the tree for the type handle for the boxed object.  In the case
5023     // of shared generic code or ngen'd code this might be an embedded
5024     // computation.
5025     // Note we can only box do it if the class construtor has been called
5026     // We can always do it on primitive types
5027
5028     GenTreePtr op1 = nullptr;
5029     GenTreePtr op2 = nullptr;
5030     var_types  lclTyp;
5031
5032     impSpillSpecialSideEff();
5033
5034     // Now get the expression to box from the stack.
5035     CORINFO_CLASS_HANDLE operCls;
5036     GenTreePtr           exprToBox = impPopStack(operCls).val;
5037
5038     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5039     if (boxHelper == CORINFO_HELP_BOX)
5040     {
5041         // we are doing 'normal' boxing.  This means that we can inline the box operation
5042         // Box(expr) gets morphed into
5043         // temp = new(clsHnd)
5044         // cpobj(temp+4, expr, clsHnd)
5045         // push temp
5046         // The code paths differ slightly below for structs and primitives because
5047         // "cpobj" differs in these cases.  In one case you get
5048         //    impAssignStructPtr(temp+4, expr, clsHnd)
5049         // and the other you get
5050         //    *(temp+4) = expr
5051
5052         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5053         {
5054             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5055         }
5056
5057         // needs to stay in use until this box expression is appended
5058         // some other node.  We approximate this by keeping it alive until
5059         // the opcode stack becomes empty
5060         impBoxTempInUse = true;
5061
5062 #ifdef FEATURE_READYTORUN_COMPILER
5063         bool usingReadyToRunHelper = false;
5064
5065         if (opts.IsReadyToRun())
5066         {
5067             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5068             usingReadyToRunHelper = (op1 != nullptr);
5069         }
5070
5071         if (!usingReadyToRunHelper)
5072 #endif
5073         {
5074             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5075             // and the newfast call with a single call to a dynamic R2R cell that will:
5076             //      1) Load the context
5077             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5078             //      3) Allocate and return the new object for boxing
5079             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5080
5081             // Ensure that the value class is restored
5082             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5083             if (op2 == nullptr)
5084             { // compDonotInline()
5085                 return;
5086             }
5087
5088             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5089                                       gtNewArgList(op2));
5090         }
5091
5092         /* Remember that this basic block contains 'new' of an array */
5093         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5094
5095         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5096
5097         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5098
5099         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5100         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5101         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5102
5103         if (varTypeIsStruct(exprToBox))
5104         {
5105             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5106             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5107         }
5108         else
5109         {
5110             lclTyp = exprToBox->TypeGet();
5111             if (lclTyp == TYP_BYREF)
5112             {
5113                 lclTyp = TYP_I_IMPL;
5114             }
5115             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5116             if (impIsPrimitive(jitType))
5117             {
5118                 lclTyp = JITtype2varType(jitType);
5119             }
5120             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5121                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5122             var_types srcTyp = exprToBox->TypeGet();
5123             var_types dstTyp = lclTyp;
5124
5125             if (srcTyp != dstTyp)
5126             {
5127                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5128                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5129                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5130             }
5131             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5132         }
5133
5134         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5135         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5136
5137         // Record that this is a "box" node.
5138         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5139
5140         // If it is a value class, mark the "box" node.  We can use this information
5141         // to optimise several cases:
5142         //    "box(x) == null" --> false
5143         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5144         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5145
5146         op1->gtFlags |= GTF_BOX_VALUE;
5147         assert(op1->IsBoxedValue());
5148         assert(asg->gtOper == GT_ASG);
5149     }
5150     else
5151     {
5152         // Don't optimize, just call the helper and be done with it
5153
5154         // Ensure that the value class is restored
5155         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5156         if (op2 == nullptr)
5157         { // compDonotInline()
5158             return;
5159         }
5160
5161         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5162         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5163     }
5164
5165     /* Push the result back on the stack, */
5166     /* even if clsHnd is a value class we want the TI_REF */
5167     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5168     impPushOnStack(op1, tiRetVal);
5169 }
5170
5171 //------------------------------------------------------------------------
5172 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5173 //
5174 // Arguments:
5175 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5176 //                     by a call to CEEInfo::resolveToken().
5177 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5178 //                by a call to CEEInfo::getCallInfo().
5179 //
5180 // Assumptions:
5181 //    The multi-dimensional array constructor arguments (array dimensions) are
5182 //    pushed on the IL stack on entry to this method.
5183 //
5184 // Notes:
5185 //    Multi-dimensional array constructors are imported as calls to a JIT
5186 //    helper, not as regular calls.
5187
5188 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5189 {
5190     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5191     if (classHandle == nullptr)
5192     { // compDonotInline()
5193         return;
5194     }
5195
5196     assert(pCallInfo->sig.numArgs);
5197
5198     GenTreePtr      node;
5199     GenTreeArgList* args;
5200
5201     //
5202     // There are two different JIT helpers that can be used to allocate
5203     // multi-dimensional arrays:
5204     //
5205     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5206     //      This variant is deprecated. It should be eventually removed.
5207     //
5208     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5209     //      pointer to block of int32s. This variant is more portable.
5210     //
5211     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5212     // unconditionally would require ReadyToRun version bump.
5213     //
5214     CLANG_FORMAT_COMMENT_ANCHOR;
5215
5216 #if COR_JIT_EE_VERSION > 460
5217     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5218     {
5219         LclVarDsc* newObjArrayArgsVar;
5220
5221         // Reuse the temp used to pass the array dimensions to avoid bloating
5222         // the stack frame in case there are multiple calls to multi-dim array
5223         // constructors within a single method.
5224         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5225         {
5226             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5227             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5228             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5229         }
5230
5231         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5232         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5233         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5234             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5235
5236         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5237         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5238         // to one allocation at a time.
5239         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5240
5241         //
5242         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5243         //  - Array class handle
5244         //  - Number of dimension arguments
5245         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5246         //
5247
5248         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5249         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5250
5251         // Pop dimension arguments from the stack one at a time and store it
5252         // into lvaNewObjArrayArgs temp.
5253         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5254         {
5255             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5256
5257             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5258             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5259             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5260                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5261             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5262
5263             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5264         }
5265
5266         args = gtNewArgList(node);
5267
5268         // pass number of arguments to the helper
5269         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5270
5271         args = gtNewListNode(classHandle, args);
5272
5273         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5274     }
5275     else
5276 #endif
5277     {
5278         //
5279         // The varargs helper needs the type and method handles as last
5280         // and  last-1 param (this is a cdecl call, so args will be
5281         // pushed in reverse order on the CPU stack)
5282         //
5283
5284         args = gtNewArgList(classHandle);
5285
5286         // pass number of arguments to the helper
5287         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5288
5289         unsigned argFlags = 0;
5290         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5291
5292         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5293
5294         // varargs, so we pop the arguments
5295         node->gtFlags |= GTF_CALL_POP_ARGS;
5296
5297 #ifdef DEBUG
5298         // At the present time we don't track Caller pop arguments
5299         // that have GC references in them
5300         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5301         {
5302             assert(temp->Current()->gtType != TYP_REF);
5303         }
5304 #endif
5305     }
5306
5307     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5308     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5309
5310     // Remember that this basic block contains 'new' of a md array
5311     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5312
5313     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5314 }
5315
5316 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5317                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5318                                       CORINFO_THIS_TRANSFORM  transform)
5319 {
5320     switch (transform)
5321     {
5322         case CORINFO_DEREF_THIS:
5323         {
5324             GenTreePtr obj = thisPtr;
5325
5326             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5327             impBashVarAddrsToI(obj);
5328             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5329             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5330
5331             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5332             // ldind could point anywhere, example a boxed class static int
5333             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5334
5335             return obj;
5336         }
5337
5338         case CORINFO_BOX_THIS:
5339         {
5340             // Constraint calls where there might be no
5341             // unboxed entry point require us to implement the call via helper.
5342             // These only occur when a possible target of the call
5343             // may have inherited an implementation of an interface
5344             // method from System.Object or System.ValueType.  The EE does not provide us with
5345             // "unboxed" versions of these methods.
5346
5347             GenTreePtr obj = thisPtr;
5348
5349             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5350             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5351             obj->gtFlags |= GTF_EXCEPT;
5352
5353             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5354             var_types   objType = JITtype2varType(jitTyp);
5355             if (impIsPrimitive(jitTyp))
5356             {
5357                 if (obj->OperIsBlk())
5358                 {
5359                     obj->ChangeOperUnchecked(GT_IND);
5360
5361                     // Obj could point anywhere, example a boxed class static int
5362                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5363                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5364                 }
5365
5366                 obj->gtType = JITtype2varType(jitTyp);
5367                 assert(varTypeIsArithmetic(obj->gtType));
5368             }
5369
5370             // This pushes on the dereferenced byref
5371             // This is then used immediately to box.
5372             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5373
5374             // This pops off the byref-to-a-value-type remaining on the stack and
5375             // replaces it with a boxed object.
5376             // This is then used as the object to the virtual call immediately below.
5377             impImportAndPushBox(pConstrainedResolvedToken);
5378             if (compDonotInline())
5379             {
5380                 return nullptr;
5381             }
5382
5383             obj = impPopStack().val;
5384             return obj;
5385         }
5386         case CORINFO_NO_THIS_TRANSFORM:
5387         default:
5388             return thisPtr;
5389     }
5390 }
5391
5392 //------------------------------------------------------------------------
5393 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5394 //
5395 // Return Value:
5396 //    true if PInvoke inlining should be enabled in current method, false otherwise
5397 //
5398 // Notes:
5399 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5400
5401 bool Compiler::impCanPInvokeInline()
5402 {
5403     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5404            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5405         ;
5406 }
5407
5408 //------------------------------------------------------------------------
5409 // impCanPInvokeInlineCallSite: basic legality checks using information
5410 // from a call to see if the call qualifies as an inline pinvoke.
5411 //
5412 // Arguments:
5413 //    block      - block contaning the call, or for inlinees, block
5414 //                 containing the call being inlined
5415 //
5416 // Return Value:
5417 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5418 //
5419 // Notes:
5420 //    For runtimes that support exception handling interop there are
5421 //    restrictions on using inline pinvoke in handler regions.
5422 //
5423 //    * We have to disable pinvoke inlining inside of filters because
5424 //    in case the main execution (i.e. in the try block) is inside
5425 //    unmanaged code, we cannot reuse the inlined stub (we still need
5426 //    the original state until we are in the catch handler)
5427 //
5428 //    * We disable pinvoke inlining inside handlers since the GSCookie
5429 //    is in the inlined Frame (see
5430 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5431 //    this would not protect framelets/return-address of handlers.
5432 //
5433 //    These restrictions are currently also in place for CoreCLR but
5434 //    can be relaxed when coreclr/#8459 is addressed.
5435
5436 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5437 {
5438     if (block->hasHndIndex())
5439     {
5440         return false;
5441     }
5442
5443     // The remaining limitations do not apply to CoreRT
5444     if (IsTargetAbi(CORINFO_CORERT_ABI))
5445     {
5446         return true;
5447     }
5448
5449 #ifdef _TARGET_AMD64_
5450     // On x64, we disable pinvoke inlining inside of try regions.
5451     // Here is the comment from JIT64 explaining why:
5452     //
5453     //   [VSWhidbey: 611015] - because the jitted code links in the
5454     //   Frame (instead of the stub) we rely on the Frame not being
5455     //   'active' until inside the stub.  This normally happens by the
5456     //   stub setting the return address pointer in the Frame object
5457     //   inside the stub.  On a normal return, the return address
5458     //   pointer is zeroed out so the Frame can be safely re-used, but
5459     //   if an exception occurs, nobody zeros out the return address
5460     //   pointer.  Thus if we re-used the Frame object, it would go
5461     //   'active' as soon as we link it into the Frame chain.
5462     //
5463     //   Technically we only need to disable PInvoke inlining if we're
5464     //   in a handler or if we're in a try body with a catch or
5465     //   filter/except where other non-handler code in this method
5466     //   might run and try to re-use the dirty Frame object.
5467     //
5468     //   A desktop test case where this seems to matter is
5469     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5470     if (block->hasTryIndex())
5471     {
5472         return false;
5473     }
5474 #endif // _TARGET_AMD64_
5475
5476     return true;
5477 }
5478
5479 //------------------------------------------------------------------------
5480 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5481 // if it can be expressed as an inline pinvoke.
5482 //
5483 // Arguments:
5484 //    call       - tree for the call
5485 //    methHnd    - handle for the method being called (may be null)
5486 //    sig        - signature of the method being called
5487 //    mflags     - method flags for the method being called
5488 //    block      - block contaning the call, or for inlinees, block
5489 //                 containing the call being inlined
5490 //
5491 // Notes:
5492 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5493 //
5494 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5495 //   call passes a combination of legality and profitabilty checks.
5496 //
5497 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5498
5499 void Compiler::impCheckForPInvokeCall(
5500     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5501 {
5502     CorInfoUnmanagedCallConv unmanagedCallConv;
5503
5504     // If VM flagged it as Pinvoke, flag the call node accordingly
5505     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5506     {
5507         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5508     }
5509
5510     if (methHnd)
5511     {
5512         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5513         {
5514             return;
5515         }
5516
5517         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5518     }
5519     else
5520     {
5521         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5522         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5523         {
5524             // Used by the IL Stubs.
5525             callConv = CORINFO_CALLCONV_C;
5526         }
5527         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5528         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5529         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5530         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5531
5532         assert(!call->gtCall.gtCallCookie);
5533     }
5534
5535     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5536         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5537     {
5538         return;
5539     }
5540     optNativeCallCount++;
5541
5542     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5543     {
5544         // PInvoke CALLI in IL stubs must be inlined
5545     }
5546     else
5547     {
5548         // Check legality
5549         if (!impCanPInvokeInlineCallSite(block))
5550         {
5551             return;
5552         }
5553
5554         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5555         // profitability checks
5556         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5557         {
5558             if (impCanPInvokeInline())
5559             {
5560                 return;
5561             }
5562
5563             // Size-speed tradeoff: don't use inline pinvoke at rarely
5564             // executed call sites.  The non-inline version is more
5565             // compact.
5566             if (block->isRunRarely())
5567             {
5568                 return;
5569             }
5570         }
5571
5572         // The expensive check should be last
5573         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5574         {
5575             return;
5576         }
5577     }
5578
5579     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5580
5581     call->gtFlags |= GTF_CALL_UNMANAGED;
5582     info.compCallUnmanaged++;
5583
5584     // AMD64 convention is same for native and managed
5585     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5586     {
5587         call->gtFlags |= GTF_CALL_POP_ARGS;
5588     }
5589
5590     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5591     {
5592         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5593     }
5594 }
5595
5596 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5597 {
5598     var_types callRetTyp = JITtype2varType(sig->retType);
5599
5600     /* The function pointer is on top of the stack - It may be a
5601      * complex expression. As it is evaluated after the args,
5602      * it may cause registered args to be spilled. Simply spill it.
5603      */
5604
5605     // Ignore this trivial case.
5606     if (impStackTop().val->gtOper != GT_LCL_VAR)
5607     {
5608         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5609                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5610     }
5611
5612     /* Get the function pointer */
5613
5614     GenTreePtr fptr = impPopStack().val;
5615     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5616
5617 #ifdef DEBUG
5618     // This temporary must never be converted to a double in stress mode,
5619     // because that can introduce a call to the cast helper after the
5620     // arguments have already been evaluated.
5621
5622     if (fptr->OperGet() == GT_LCL_VAR)
5623     {
5624         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5625     }
5626 #endif
5627
5628     /* Create the call node */
5629
5630     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5631
5632     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5633
5634     return call;
5635 }
5636
5637 /*****************************************************************************/
5638
5639 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5640 {
5641     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5642
5643     /* Since we push the arguments in reverse order (i.e. right -> left)
5644      * spill any side effects from the stack
5645      *
5646      * OBS: If there is only one side effect we do not need to spill it
5647      *      thus we have to spill all side-effects except last one
5648      */
5649
5650     unsigned lastLevelWithSideEffects = UINT_MAX;
5651
5652     unsigned argsToReverse = sig->numArgs;
5653
5654     // For "thiscall", the first argument goes in a register. Since its
5655     // order does not need to be changed, we do not need to spill it
5656
5657     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5658     {
5659         assert(argsToReverse);
5660         argsToReverse--;
5661     }
5662
5663 #ifndef _TARGET_X86_
5664     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5665     argsToReverse = 0;
5666 #endif
5667
5668     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5669     {
5670         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5671         {
5672             assert(lastLevelWithSideEffects == UINT_MAX);
5673
5674             impSpillStackEntry(level,
5675                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5676         }
5677         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5678         {
5679             if (lastLevelWithSideEffects != UINT_MAX)
5680             {
5681                 /* We had a previous side effect - must spill it */
5682                 impSpillStackEntry(lastLevelWithSideEffects,
5683                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5684
5685                 /* Record the level for the current side effect in case we will spill it */
5686                 lastLevelWithSideEffects = level;
5687             }
5688             else
5689             {
5690                 /* This is the first side effect encountered - record its level */
5691
5692                 lastLevelWithSideEffects = level;
5693             }
5694         }
5695     }
5696
5697     /* The argument list is now "clean" - no out-of-order side effects
5698      * Pop the argument list in reverse order */
5699
5700     unsigned   argFlags = 0;
5701     GenTreePtr args     = call->gtCall.gtCallArgs =
5702         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5703
5704     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5705     {
5706         GenTreePtr thisPtr = args->Current();
5707         impBashVarAddrsToI(thisPtr);
5708         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5709     }
5710
5711     if (args)
5712     {
5713         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5714     }
5715 }
5716
5717 //------------------------------------------------------------------------
5718 // impInitClass: Build a node to initialize the class before accessing the
5719 //               field if necessary
5720 //
5721 // Arguments:
5722 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5723 //                     by a call to CEEInfo::resolveToken().
5724 //
5725 // Return Value: If needed, a pointer to the node that will perform the class
5726 //               initializtion.  Otherwise, nullptr.
5727 //
5728
5729 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5730 {
5731     CorInfoInitClassResult initClassResult =
5732         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5733
5734     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5735     {
5736         return nullptr;
5737     }
5738     BOOL runtimeLookup;
5739
5740     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5741
5742     if (node == nullptr)
5743     {
5744         assert(compDonotInline());
5745         return nullptr;
5746     }
5747
5748     if (runtimeLookup)
5749     {
5750         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5751     }
5752     else
5753     {
5754         // Call the shared non gc static helper, as its the fastest
5755         node = fgGetSharedCCtor(pResolvedToken->hClass);
5756     }
5757
5758     return node;
5759 }
5760
5761 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5762 {
5763     GenTreePtr op1 = nullptr;
5764
5765     switch (lclTyp)
5766     {
5767         int     ival;
5768         __int64 lval;
5769         double  dval;
5770
5771         case TYP_BOOL:
5772             ival = *((bool*)fldAddr);
5773             goto IVAL_COMMON;
5774
5775         case TYP_BYTE:
5776             ival = *((signed char*)fldAddr);
5777             goto IVAL_COMMON;
5778
5779         case TYP_UBYTE:
5780             ival = *((unsigned char*)fldAddr);
5781             goto IVAL_COMMON;
5782
5783         case TYP_SHORT:
5784             ival = *((short*)fldAddr);
5785             goto IVAL_COMMON;
5786
5787         case TYP_CHAR:
5788         case TYP_USHORT:
5789             ival = *((unsigned short*)fldAddr);
5790             goto IVAL_COMMON;
5791
5792         case TYP_UINT:
5793         case TYP_INT:
5794             ival = *((int*)fldAddr);
5795         IVAL_COMMON:
5796             op1 = gtNewIconNode(ival);
5797             break;
5798
5799         case TYP_LONG:
5800         case TYP_ULONG:
5801             lval = *((__int64*)fldAddr);
5802             op1  = gtNewLconNode(lval);
5803             break;
5804
5805         case TYP_FLOAT:
5806             dval = *((float*)fldAddr);
5807             op1  = gtNewDconNode(dval);
5808 #if !FEATURE_X87_DOUBLES
5809             // X87 stack doesn't differentiate between float/double
5810             // so R4 is treated as R8, but everybody else does
5811             op1->gtType = TYP_FLOAT;
5812 #endif // FEATURE_X87_DOUBLES
5813             break;
5814
5815         case TYP_DOUBLE:
5816             dval = *((double*)fldAddr);
5817             op1  = gtNewDconNode(dval);
5818             break;
5819
5820         default:
5821             assert(!"Unexpected lclTyp");
5822             break;
5823     }
5824
5825     return op1;
5826 }
5827
5828 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5829                                                 CORINFO_ACCESS_FLAGS    access,
5830                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5831                                                 var_types               lclTyp)
5832 {
5833     GenTreePtr op1;
5834
5835     switch (pFieldInfo->fieldAccessor)
5836     {
5837         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5838         {
5839             assert(!compIsForInlining());
5840
5841             // We first call a special helper to get the statics base pointer
5842             op1 = impParentClassTokenToHandle(pResolvedToken);
5843
5844             // compIsForInlining() is false so we should not neve get NULL here
5845             assert(op1 != nullptr);
5846
5847             var_types type = TYP_BYREF;
5848
5849             switch (pFieldInfo->helper)
5850             {
5851                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5852                     type = TYP_I_IMPL;
5853                     break;
5854                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5855                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5856                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5857                     break;
5858                 default:
5859                     assert(!"unknown generic statics helper");
5860                     break;
5861             }
5862
5863             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5864
5865             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5866             op1              = gtNewOperNode(GT_ADD, type, op1,
5867                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5868         }
5869         break;
5870
5871         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5872         {
5873 #ifdef FEATURE_READYTORUN_COMPILER
5874             if (opts.IsReadyToRun())
5875             {
5876                 unsigned callFlags = 0;
5877
5878                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5879                 {
5880                     callFlags |= GTF_CALL_HOISTABLE;
5881                 }
5882
5883                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5884
5885                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5886             }
5887             else
5888 #endif
5889             {
5890                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5891             }
5892
5893             {
5894                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5895                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5896                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5897             }
5898             break;
5899         }
5900 #if COR_JIT_EE_VERSION > 460
5901         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5902         {
5903 #ifdef FEATURE_READYTORUN_COMPILER
5904             noway_assert(opts.IsReadyToRun());
5905             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5906             assert(kind.needsRuntimeLookup);
5907
5908             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5909             GenTreeArgList* args    = gtNewArgList(ctxTree);
5910
5911             unsigned callFlags = 0;
5912
5913             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5914             {
5915                 callFlags |= GTF_CALL_HOISTABLE;
5916             }
5917             var_types type = TYP_BYREF;
5918             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5919
5920             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5921             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922             op1              = gtNewOperNode(GT_ADD, type, op1,
5923                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5924 #else
5925             unreached();
5926 #endif // FEATURE_READYTORUN_COMPILER
5927         }
5928         break;
5929 #endif // COR_JIT_EE_VERSION > 460
5930         default:
5931         {
5932             if (!(access & CORINFO_ACCESS_ADDRESS))
5933             {
5934                 // In future, it may be better to just create the right tree here instead of folding it later.
5935                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5936
5937                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5938                 {
5939                     op1->gtType = TYP_REF; // points at boxed object
5940                     FieldSeqNode* firstElemFldSeq =
5941                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5942                     op1 =
5943                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5944                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5945
5946                     if (varTypeIsStruct(lclTyp))
5947                     {
5948                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5949                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5950                     }
5951                     else
5952                     {
5953                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5954                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5955                     }
5956                 }
5957
5958                 return op1;
5959             }
5960             else
5961             {
5962                 void** pFldAddr = nullptr;
5963                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5964
5965                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5966
5967                 /* Create the data member node */
5968                 if (pFldAddr == nullptr)
5969                 {
5970                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5971                 }
5972                 else
5973                 {
5974                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5975
5976                     // There are two cases here, either the static is RVA based,
5977                     // in which case the type of the FIELD node is not a GC type
5978                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
5979                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
5980                     // because handles to statics now go into the large object heap
5981
5982                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5983                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
5984                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5985                 }
5986             }
5987             break;
5988         }
5989     }
5990
5991     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5992     {
5993         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5994
5995         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5996
5997         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5998                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
5999     }
6000
6001     if (!(access & CORINFO_ACCESS_ADDRESS))
6002     {
6003         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6004         op1->gtFlags |= GTF_GLOB_REF;
6005     }
6006
6007     return op1;
6008 }
6009
6010 // In general try to call this before most of the verification work.  Most people expect the access
6011 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6012 // out if you can't access something we also think that you're unverifiable for other reasons.
6013 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6014 {
6015     if (result != CORINFO_ACCESS_ALLOWED)
6016     {
6017         impHandleAccessAllowedInternal(result, helperCall);
6018     }
6019 }
6020
6021 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6022 {
6023     switch (result)
6024     {
6025         case CORINFO_ACCESS_ALLOWED:
6026             break;
6027         case CORINFO_ACCESS_ILLEGAL:
6028             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6029             // method is verifiable.  Otherwise, delay the exception to runtime.
6030             if (compIsForImportOnly())
6031             {
6032                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6033             }
6034             else
6035             {
6036                 impInsertHelperCall(helperCall);
6037             }
6038             break;
6039         case CORINFO_ACCESS_RUNTIME_CHECK:
6040             impInsertHelperCall(helperCall);
6041             break;
6042     }
6043 }
6044
6045 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6046 {
6047     // Construct the argument list
6048     GenTreeArgList* args = nullptr;
6049     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6050     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6051     {
6052         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6053         GenTreePtr                currentArg = nullptr;
6054         switch (helperArg.argType)
6055         {
6056             case CORINFO_HELPER_ARG_TYPE_Field:
6057                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6058                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6059                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6060                 break;
6061             case CORINFO_HELPER_ARG_TYPE_Method:
6062                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6063                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6064                 break;
6065             case CORINFO_HELPER_ARG_TYPE_Class:
6066                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6067                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6068                 break;
6069             case CORINFO_HELPER_ARG_TYPE_Module:
6070                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6071                 break;
6072             case CORINFO_HELPER_ARG_TYPE_Const:
6073                 currentArg = gtNewIconNode(helperArg.constant);
6074                 break;
6075             default:
6076                 NO_WAY("Illegal helper arg type");
6077         }
6078         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6079     }
6080
6081     /* TODO-Review:
6082      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6083      * Also, consider sticking this in the first basic block.
6084      */
6085     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6086     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6087 }
6088
6089 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6090                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6091                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6092 {
6093 #ifdef FEATURE_CORECLR
6094     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6095     {
6096         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6097         // This helper throws an exception if the CLR host disallows the call.
6098
6099         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6100                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6101                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6102         // Append the callout statement
6103         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6104     }
6105 #endif // FEATURE_CORECLR
6106 }
6107
6108 // Checks whether the return types of caller and callee are compatible
6109 // so that callee can be tail called. Note that here we don't check
6110 // compatibility in IL Verifier sense, but on the lines of return type
6111 // sizes are equal and get returned in the same return register.
6112 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6113                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6114                                             var_types            calleeRetType,
6115                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6116 {
6117     // Note that we can not relax this condition with genActualType() as the
6118     // calling convention dictates that the caller of a function with a small
6119     // typed return value is responsible for normalizing the return val.
6120     if (callerRetType == calleeRetType)
6121     {
6122         return true;
6123     }
6124
6125 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6126     // Jit64 compat:
6127     if (callerRetType == TYP_VOID)
6128     {
6129         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6130         //     tail.call
6131         //     pop
6132         //     ret
6133         //
6134         // Note that the above IL pattern is not valid as per IL verification rules.
6135         // Therefore, only full trust code can take advantage of this pattern.
6136         return true;
6137     }
6138
6139     // These checks return true if the return value type sizes are the same and
6140     // get returned in the same return register i.e. caller doesn't need to normalize
6141     // return value. Some of the tail calls permitted by below checks would have
6142     // been rejected by IL Verifier before we reached here.  Therefore, only full
6143     // trust code can make those tail calls.
6144     unsigned callerRetTypeSize = 0;
6145     unsigned calleeRetTypeSize = 0;
6146     bool     isCallerRetTypMBEnreg =
6147         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6148     bool isCalleeRetTypMBEnreg =
6149         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6150
6151     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6152     {
6153         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6154     }
6155 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6156
6157     return false;
6158 }
6159
6160 // For prefixFlags
6161 enum
6162 {
6163     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6164     PREFIX_TAILCALL_IMPLICIT =
6165         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6166     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6167     PREFIX_VOLATILE    = 0x00000100,
6168     PREFIX_UNALIGNED   = 0x00001000,
6169     PREFIX_CONSTRAINED = 0x00010000,
6170     PREFIX_READONLY    = 0x00100000
6171 };
6172
6173 /********************************************************************************
6174  *
6175  * Returns true if the current opcode and and the opcodes following it correspond
6176  * to a supported tail call IL pattern.
6177  *
6178  */
6179 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6180                                       OPCODE      curOpcode,
6181                                       const BYTE* codeAddrOfNextOpcode,
6182                                       const BYTE* codeEnd,
6183                                       bool        isRecursive,
6184                                       bool*       isCallPopAndRet /* = nullptr */)
6185 {
6186     // Bail out if the current opcode is not a call.
6187     if (!impOpcodeIsCallOpcode(curOpcode))
6188     {
6189         return false;
6190     }
6191
6192 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6193     // If shared ret tail opt is not enabled, we will enable
6194     // it for recursive methods.
6195     if (isRecursive)
6196 #endif
6197     {
6198         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6199         // sequence. Make sure we don't go past the end of the IL however.
6200         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6201     }
6202
6203     // Bail out if there is no next opcode after call
6204     if (codeAddrOfNextOpcode >= codeEnd)
6205     {
6206         return false;
6207     }
6208
6209     // Scan the opcodes to look for the following IL patterns if either
6210     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6211     //  ii) if tail prefixed, IL verification is not needed for the method.
6212     //
6213     // Only in the above two cases we can allow the below tail call patterns
6214     // violating ECMA spec.
6215     //
6216     // Pattern1:
6217     //       call
6218     //       nop*
6219     //       ret
6220     //
6221     // Pattern2:
6222     //       call
6223     //       nop*
6224     //       pop
6225     //       nop*
6226     //       ret
6227     int    cntPop = 0;
6228     OPCODE nextOpcode;
6229
6230 #ifdef _TARGET_AMD64_
6231     do
6232     {
6233         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6234         codeAddrOfNextOpcode += sizeof(__int8);
6235     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6236              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6237              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6238                                                                                          // one pop seen so far.
6239 #else
6240     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6241 #endif
6242
6243     if (isCallPopAndRet)
6244     {
6245         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6246         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6247     }
6248
6249 #ifdef _TARGET_AMD64_
6250     // Jit64 Compat:
6251     // Tail call IL pattern could be either of the following
6252     // 1) call/callvirt/calli + ret
6253     // 2) call/callvirt/calli + pop + ret in a method returning void.
6254     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6255 #else //!_TARGET_AMD64_
6256     return (nextOpcode == CEE_RET) && (cntPop == 0);
6257 #endif
6258 }
6259
6260 /*****************************************************************************
6261  *
6262  * Determine whether the call could be converted to an implicit tail call
6263  *
6264  */
6265 bool Compiler::impIsImplicitTailCallCandidate(
6266     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6267 {
6268
6269 #if FEATURE_TAILCALL_OPT
6270     if (!opts.compTailCallOpt)
6271     {
6272         return false;
6273     }
6274
6275     if (opts.compDbgCode || opts.MinOpts())
6276     {
6277         return false;
6278     }
6279
6280     // must not be tail prefixed
6281     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6282     {
6283         return false;
6284     }
6285
6286 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6287     // the block containing call is marked as BBJ_RETURN
6288     // We allow shared ret tail call optimization on recursive calls even under
6289     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6290     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6291         return false;
6292 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6293
6294     // must be call+ret or call+pop+ret
6295     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6296     {
6297         return false;
6298     }
6299
6300     return true;
6301 #else
6302     return false;
6303 #endif // FEATURE_TAILCALL_OPT
6304 }
6305
6306 //------------------------------------------------------------------------
6307 // impImportCall: import a call-inspiring opcode
6308 //
6309 // Arguments:
6310 //    opcode                    - opcode that inspires the call
6311 //    pResolvedToken            - resolved token for the call target
6312 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6313 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6314 //    prefixFlags               - IL prefix flags for the call
6315 //    callInfo                  - EE supplied info for the call
6316 //    rawILOffset               - IL offset of the opcode
6317 //
6318 // Returns:
6319 //    Type of the call's return value.
6320 //
6321 // Notes:
6322 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6323 //
6324 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6325 //    uninitalized object.
6326
6327 #ifdef _PREFAST_
6328 #pragma warning(push)
6329 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6330 #endif
6331
6332 var_types Compiler::impImportCall(OPCODE                  opcode,
6333                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6334                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6335                                   GenTreePtr              newobjThis,
6336                                   int                     prefixFlags,
6337                                   CORINFO_CALL_INFO*      callInfo,
6338                                   IL_OFFSET               rawILOffset)
6339 {
6340     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6341
6342     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6343     var_types              callRetTyp                     = TYP_COUNT;
6344     CORINFO_SIG_INFO*      sig                            = nullptr;
6345     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6346     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6347     unsigned               clsFlags                       = 0;
6348     unsigned               mflags                         = 0;
6349     unsigned               argFlags                       = 0;
6350     GenTreePtr             call                           = nullptr;
6351     GenTreeArgList*        args                           = nullptr;
6352     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6353     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6354     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6355     bool                   canTailCall                    = true;
6356     const char*            szCanTailCallFailReason        = nullptr;
6357     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6358     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6359
6360     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6361     // do that before tailcalls, but that is probably not the intended
6362     // semantic. So just disallow tailcalls from synchronized methods.
6363     // Also, popping arguments in a varargs function is more work and NYI
6364     // If we have a security object, we have to keep our frame around for callers
6365     // to see any imperative security.
6366     if (info.compFlags & CORINFO_FLG_SYNCH)
6367     {
6368         canTailCall             = false;
6369         szCanTailCallFailReason = "Caller is synchronized";
6370     }
6371 #if !FEATURE_FIXED_OUT_ARGS
6372     else if (info.compIsVarArgs)
6373     {
6374         canTailCall             = false;
6375         szCanTailCallFailReason = "Caller is varargs";
6376     }
6377 #endif // FEATURE_FIXED_OUT_ARGS
6378     else if (opts.compNeedSecurityCheck)
6379     {
6380         canTailCall             = false;
6381         szCanTailCallFailReason = "Caller requires a security check.";
6382     }
6383
6384     // We only need to cast the return value of pinvoke inlined calls that return small types
6385
6386     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6387     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6388     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6389     // the time being that the callee might be compiled by the other JIT and thus the return
6390     // value will need to be widened by us (or not widened at all...)
6391
6392     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6393
6394     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6395     bool bIntrinsicImported = false;
6396
6397     CORINFO_SIG_INFO calliSig;
6398     GenTreeArgList*  extraArg = nullptr;
6399
6400     /*-------------------------------------------------------------------------
6401      * First create the call node
6402      */
6403
6404     if (opcode == CEE_CALLI)
6405     {
6406         /* Get the call site sig */
6407         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6408
6409         callRetTyp = JITtype2varType(calliSig.retType);
6410
6411         call = impImportIndirectCall(&calliSig, ilOffset);
6412
6413         // We don't know the target method, so we have to infer the flags, or
6414         // assume the worst-case.
6415         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6416
6417 #ifdef DEBUG
6418         if (verbose)
6419         {
6420             unsigned structSize =
6421                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6422             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6423                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6424         }
6425 #endif
6426         // This should be checked in impImportBlockCode.
6427         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6428
6429         sig = &calliSig;
6430
6431 #ifdef DEBUG
6432         // We cannot lazily obtain the signature of a CALLI call because it has no method
6433         // handle that we can use, so we need to save its full call signature here.
6434         assert(call->gtCall.callSig == nullptr);
6435         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6436         *call->gtCall.callSig = calliSig;
6437 #endif // DEBUG
6438     }
6439     else // (opcode != CEE_CALLI)
6440     {
6441         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6442
6443         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6444         // supply the instantiation parameters necessary to make direct calls to underlying
6445         // shared generic code, rather than calling through instantiating stubs.  If the
6446         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6447         // must indeed pass an instantiation parameter.
6448
6449         methHnd = callInfo->hMethod;
6450
6451         sig        = &(callInfo->sig);
6452         callRetTyp = JITtype2varType(sig->retType);
6453
6454         mflags = callInfo->methodFlags;
6455
6456 #ifdef DEBUG
6457         if (verbose)
6458         {
6459             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6460             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6461                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6462         }
6463 #endif
6464         if (compIsForInlining())
6465         {
6466             /* Does this call site have security boundary restrictions? */
6467
6468             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6469             {
6470                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6471                 return callRetTyp;
6472             }
6473
6474             /* Does the inlinee need a security check token on the frame */
6475
6476             if (mflags & CORINFO_FLG_SECURITYCHECK)
6477             {
6478                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6479                 return callRetTyp;
6480             }
6481
6482             /* Does the inlinee use StackCrawlMark */
6483
6484             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6485             {
6486                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6487                 return callRetTyp;
6488             }
6489
6490             /* For now ignore delegate invoke */
6491
6492             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6493             {
6494                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6495                 return callRetTyp;
6496             }
6497
6498             /* For now ignore varargs */
6499             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6500             {
6501                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6502                 return callRetTyp;
6503             }
6504
6505             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6506             {
6507                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6508                 return callRetTyp;
6509             }
6510
6511             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6512             {
6513                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6514                 return callRetTyp;
6515             }
6516         }
6517
6518         clsHnd = pResolvedToken->hClass;
6519
6520         clsFlags = callInfo->classFlags;
6521
6522 #ifdef DEBUG
6523         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6524
6525         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6526         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6527         const char* modName;
6528         const char* className;
6529         const char* methodName;
6530         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6531             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6532             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6533         {
6534             return impImportJitTestLabelMark(sig->numArgs);
6535         }
6536 #endif // DEBUG
6537
6538         // <NICE> Factor this into getCallInfo </NICE>
6539         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6540         {
6541             call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6542                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6543
6544             if (call != nullptr)
6545             {
6546                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6547                        (clsFlags & CORINFO_FLG_FINAL));
6548
6549 #ifdef FEATURE_READYTORUN_COMPILER
6550                 if (call->OperGet() == GT_INTRINSIC)
6551                 {
6552                     if (opts.IsReadyToRun())
6553                     {
6554                         noway_assert(callInfo->kind == CORINFO_CALL);
6555                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6556                     }
6557                     else
6558                     {
6559                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6560                     }
6561                 }
6562 #endif
6563
6564                 bIntrinsicImported = true;
6565                 goto DONE_CALL;
6566             }
6567         }
6568
6569 #ifdef FEATURE_SIMD
6570         if (featureSIMD)
6571         {
6572             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6573             if (call != nullptr)
6574             {
6575                 bIntrinsicImported = true;
6576                 goto DONE_CALL;
6577             }
6578         }
6579 #endif // FEATURE_SIMD
6580
6581         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6582         {
6583             NO_WAY("Virtual call to a function added via EnC is not supported");
6584             goto DONE_CALL;
6585         }
6586
6587         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6588             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6589             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6590         {
6591             BADCODE("Bad calling convention");
6592         }
6593
6594         //-------------------------------------------------------------------------
6595         //  Construct the call node
6596         //
6597         // Work out what sort of call we're making.
6598         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6599
6600         constraintCallThisTransform = callInfo->thisTransform;
6601
6602         exactContextHnd                = callInfo->contextHandle;
6603         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6604
6605         // Recursive call is treaded as a loop to the begining of the method.
6606         if (methHnd == info.compMethodHnd)
6607         {
6608 #ifdef DEBUG
6609             if (verbose)
6610             {
6611                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6612                         fgFirstBB->bbNum, compCurBB->bbNum);
6613             }
6614 #endif
6615             fgMarkBackwardJump(fgFirstBB, compCurBB);
6616         }
6617
6618         switch (callInfo->kind)
6619         {
6620
6621             case CORINFO_VIRTUALCALL_STUB:
6622             {
6623                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6624                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6625                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6626                 {
6627
6628                     if (compIsForInlining())
6629                     {
6630                         // Don't import runtime lookups when inlining
6631                         // Inlining has to be aborted in such a case
6632                         /* XXX Fri 3/20/2009
6633                          * By the way, this would never succeed.  If the handle lookup is into the generic
6634                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6635                          * inlined code will crash.
6636                          *
6637                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6638                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6639                          * failing here.
6640                          */
6641                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6642                         return callRetTyp;
6643                     }
6644
6645                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6646                     assert(!compDonotInline());
6647
6648                     // This is the rough code to set up an indirect stub call
6649                     assert(stubAddr != nullptr);
6650
6651                     // The stubAddr may be a
6652                     // complex expression. As it is evaluated after the args,
6653                     // it may cause registered args to be spilled. Simply spill it.
6654
6655                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6656                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6657                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6658
6659                     // Create the actual call node
6660
6661                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6662                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6663
6664                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6665
6666                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6667                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6668
6669 #ifdef _TARGET_X86_
6670                     // No tailcalls allowed for these yet...
6671                     canTailCall             = false;
6672                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6673 #endif
6674                 }
6675                 else
6676                 {
6677                     // ok, the stub is available at compile type.
6678
6679                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6680                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6681                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6682                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6683                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6684                     {
6685                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6686                     }
6687                 }
6688
6689 #ifdef FEATURE_READYTORUN_COMPILER
6690                 if (opts.IsReadyToRun())
6691                 {
6692                     // Null check is sometimes needed for ready to run to handle
6693                     // non-virtual <-> virtual changes between versions
6694                     if (callInfo->nullInstanceCheck)
6695                     {
6696                         call->gtFlags |= GTF_CALL_NULLCHECK;
6697                     }
6698                 }
6699 #endif
6700
6701                 break;
6702             }
6703
6704             case CORINFO_VIRTUALCALL_VTABLE:
6705             {
6706                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6707                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6708                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6709                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6710                 break;
6711             }
6712
6713             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6714             {
6715                 if (compIsForInlining())
6716                 {
6717                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6718                     return callRetTyp;
6719                 }
6720
6721                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6722                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6723                 // OK, We've been told to call via LDVIRTFTN, so just
6724                 // take the call now....
6725
6726                 args = impPopList(sig->numArgs, &argFlags, sig);
6727
6728                 GenTreePtr thisPtr = impPopStack().val;
6729                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6730                 if (compDonotInline())
6731                 {
6732                     return callRetTyp;
6733                 }
6734
6735                 // Clone the (possibly transformed) "this" pointer
6736                 GenTreePtr thisPtrCopy;
6737                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6738                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6739
6740                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6741                 if (compDonotInline())
6742                 {
6743                     return callRetTyp;
6744                 }
6745
6746                 thisPtr = nullptr; // can't reuse it
6747
6748                 // Now make an indirect call through the function pointer
6749
6750                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6751                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6752                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6753
6754                 // Create the actual call node
6755
6756                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6757                 call->gtCall.gtCallObjp = thisPtrCopy;
6758                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6759
6760 #ifdef FEATURE_READYTORUN_COMPILER
6761                 if (opts.IsReadyToRun())
6762                 {
6763                     // Null check is needed for ready to run to handle
6764                     // non-virtual <-> virtual changes between versions
6765                     call->gtFlags |= GTF_CALL_NULLCHECK;
6766                 }
6767 #endif
6768
6769                 // Sine we are jumping over some code, check that its OK to skip that code
6770                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6771                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6772                 goto DONE;
6773             }
6774
6775             case CORINFO_CALL:
6776             {
6777                 // This is for a non-virtual, non-interface etc. call
6778                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6779
6780                 // We remove the nullcheck for the GetType call instrinsic.
6781                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6782                 // and instrinsics.
6783                 if (callInfo->nullInstanceCheck &&
6784                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6785                 {
6786                     call->gtFlags |= GTF_CALL_NULLCHECK;
6787                 }
6788
6789 #ifdef FEATURE_READYTORUN_COMPILER
6790                 if (opts.IsReadyToRun())
6791                 {
6792                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6793                 }
6794 #endif
6795                 break;
6796             }
6797
6798             case CORINFO_CALL_CODE_POINTER:
6799             {
6800                 // The EE has asked us to call by computing a code pointer and then doing an
6801                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6802
6803                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6804                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6805
6806                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6807                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6808
6809                 GenTreePtr fptr =
6810                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6811
6812                 if (compDonotInline())
6813                 {
6814                     return callRetTyp;
6815                 }
6816
6817                 // Now make an indirect call through the function pointer
6818
6819                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6820                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6821                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6822
6823                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6824                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6825                 if (callInfo->nullInstanceCheck)
6826                 {
6827                     call->gtFlags |= GTF_CALL_NULLCHECK;
6828                 }
6829
6830                 break;
6831             }
6832
6833             default:
6834                 assert(!"unknown call kind");
6835                 break;
6836         }
6837
6838         //-------------------------------------------------------------------------
6839         // Set more flags
6840
6841         PREFIX_ASSUME(call != nullptr);
6842
6843         if (mflags & CORINFO_FLG_NOGCCHECK)
6844         {
6845             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6846         }
6847
6848         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6849         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6850             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6851             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6852         {
6853             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6854         }
6855     }
6856     assert(sig);
6857     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6858
6859     /* Some sanity checks */
6860
6861     // CALL_VIRT and NEWOBJ must have a THIS pointer
6862     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6863     // static bit and hasThis are negations of one another
6864     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6865     assert(call != nullptr);
6866
6867     /*-------------------------------------------------------------------------
6868      * Check special-cases etc
6869      */
6870
6871     /* Special case - Check if it is a call to Delegate.Invoke(). */
6872
6873     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6874     {
6875         assert(!compIsForInlining());
6876         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6877         assert(mflags & CORINFO_FLG_FINAL);
6878
6879         /* Set the delegate flag */
6880         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6881
6882         if (callInfo->secureDelegateInvoke)
6883         {
6884             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6885         }
6886
6887         if (opcode == CEE_CALLVIRT)
6888         {
6889             assert(mflags & CORINFO_FLG_FINAL);
6890
6891             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6892             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6893             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6894         }
6895     }
6896
6897     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6898     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6899     if (varTypeIsStruct(callRetTyp))
6900     {
6901         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6902         call->gtType = callRetTyp;
6903     }
6904
6905 #if !FEATURE_VARARG
6906     /* Check for varargs */
6907     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6908         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6909     {
6910         BADCODE("Varargs not supported.");
6911     }
6912 #endif // !FEATURE_VARARG
6913
6914     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6915         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6916     {
6917         assert(!compIsForInlining());
6918
6919         /* Set the right flags */
6920
6921         call->gtFlags |= GTF_CALL_POP_ARGS;
6922         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6923
6924         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6925            will be expecting to pop a certain number of arguments, but if we
6926            tailcall to a function with a different number of arguments, we
6927            are hosed. There are ways around this (caller remembers esp value,
6928            varargs is not caller-pop, etc), but not worth it. */
6929         CLANG_FORMAT_COMMENT_ANCHOR;
6930
6931 #ifdef _TARGET_X86_
6932         if (canTailCall)
6933         {
6934             canTailCall             = false;
6935             szCanTailCallFailReason = "Callee is varargs";
6936         }
6937 #endif
6938
6939         /* Get the total number of arguments - this is already correct
6940          * for CALLI - for methods we have to get it from the call site */
6941
6942         if (opcode != CEE_CALLI)
6943         {
6944 #ifdef DEBUG
6945             unsigned numArgsDef = sig->numArgs;
6946 #endif
6947             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6948
6949 #ifdef DEBUG
6950             // We cannot lazily obtain the signature of a vararg call because using its method
6951             // handle will give us only the declared argument list, not the full argument list.
6952             assert(call->gtCall.callSig == nullptr);
6953             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6954             *call->gtCall.callSig = *sig;
6955 #endif
6956
6957             // For vararg calls we must be sure to load the return type of the
6958             // method actually being called, as well as the return types of the
6959             // specified in the vararg signature. With type equivalency, these types
6960             // may not be the same.
6961             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6962             {
6963                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6964                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6965                     sig->retType != CORINFO_TYPE_VAR)
6966                 {
6967                     // Make sure that all valuetypes (including enums) that we push are loaded.
6968                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6969                     // all valuetypes in the method signature are already loaded.
6970                     // We need to be able to find the size of the valuetypes, but we cannot
6971                     // do a class-load from within GC.
6972                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6973                 }
6974             }
6975
6976             assert(numArgsDef <= sig->numArgs);
6977         }
6978
6979         /* We will have "cookie" as the last argument but we cannot push
6980          * it on the operand stack because we may overflow, so we append it
6981          * to the arg list next after we pop them */
6982     }
6983
6984     if (mflags & CORINFO_FLG_SECURITYCHECK)
6985     {
6986         assert(!compIsForInlining());
6987
6988         // Need security prolog/epilog callouts when there is
6989         // imperative security in the method. This is to give security a
6990         // chance to do any setup in the prolog and cleanup in the epilog if needed.
6991
6992         if (compIsForInlining())
6993         {
6994             // Cannot handle this if the method being imported is an inlinee by itself.
6995             // Because inlinee method does not have its own frame.
6996
6997             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6998             return callRetTyp;
6999         }
7000         else
7001         {
7002             tiSecurityCalloutNeeded = true;
7003
7004             // If the current method calls a method which needs a security check,
7005             // (i.e. the method being compiled has imperative security)
7006             // we need to reserve a slot for the security object in
7007             // the current method's stack frame
7008             opts.compNeedSecurityCheck = true;
7009         }
7010     }
7011
7012     //--------------------------- Inline NDirect ------------------------------
7013
7014     // For inline cases we technically should look at both the current
7015     // block and the call site block (or just the latter if we've
7016     // fused the EH trees). However the block-related checks pertain to
7017     // EH and we currently won't inline a method with EH. So for
7018     // inlinees, just checking the call site block is sufficient.
7019     {
7020         // New lexical block here to avoid compilation errors because of GOTOs.
7021         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7022         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7023     }
7024
7025     if (call->gtFlags & GTF_CALL_UNMANAGED)
7026     {
7027         // We set up the unmanaged call by linking the frame, disabling GC, etc
7028         // This needs to be cleaned up on return
7029         if (canTailCall)
7030         {
7031             canTailCall             = false;
7032             szCanTailCallFailReason = "Callee is native";
7033         }
7034
7035         checkForSmallType = true;
7036
7037         impPopArgsForUnmanagedCall(call, sig);
7038
7039         goto DONE;
7040     }
7041     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7042                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7043                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7044                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7045     {
7046         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7047         {
7048             // Normally this only happens with inlining.
7049             // However, a generic method (or type) being NGENd into another module
7050             // can run into this issue as well.  There's not an easy fall-back for NGEN
7051             // so instead we fallback to JIT.
7052             if (compIsForInlining())
7053             {
7054                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7055             }
7056             else
7057             {
7058                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7059             }
7060
7061             return callRetTyp;
7062         }
7063
7064         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7065
7066         // This cookie is required to be either a simple GT_CNS_INT or
7067         // an indirection of a GT_CNS_INT
7068         //
7069         GenTreePtr cookieConst = cookie;
7070         if (cookie->gtOper == GT_IND)
7071         {
7072             cookieConst = cookie->gtOp.gtOp1;
7073         }
7074         assert(cookieConst->gtOper == GT_CNS_INT);
7075
7076         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7077         // we won't allow this tree to participate in any CSE logic
7078         //
7079         cookie->gtFlags |= GTF_DONT_CSE;
7080         cookieConst->gtFlags |= GTF_DONT_CSE;
7081
7082         call->gtCall.gtCallCookie = cookie;
7083
7084         if (canTailCall)
7085         {
7086             canTailCall             = false;
7087             szCanTailCallFailReason = "PInvoke calli";
7088         }
7089     }
7090
7091     /*-------------------------------------------------------------------------
7092      * Create the argument list
7093      */
7094
7095     //-------------------------------------------------------------------------
7096     // Special case - for varargs we have an implicit last argument
7097
7098     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7099     {
7100         assert(!compIsForInlining());
7101
7102         void *varCookie, *pVarCookie;
7103         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7104         {
7105             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7106             return callRetTyp;
7107         }
7108
7109         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7110         assert((!varCookie) != (!pVarCookie));
7111         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7112
7113         assert(extraArg == nullptr);
7114         extraArg = gtNewArgList(cookie);
7115     }
7116
7117     //-------------------------------------------------------------------------
7118     // Extra arg for shared generic code and array methods
7119     //
7120     // Extra argument containing instantiation information is passed in the
7121     // following circumstances:
7122     // (a) To the "Address" method on array classes; the extra parameter is
7123     //     the array's type handle (a TypeDesc)
7124     // (b) To shared-code instance methods in generic structs; the extra parameter
7125     //     is the struct's type handle (a vtable ptr)
7126     // (c) To shared-code per-instantiation non-generic static methods in generic
7127     //     classes and structs; the extra parameter is the type handle
7128     // (d) To shared-code generic methods; the extra parameter is an
7129     //     exact-instantiation MethodDesc
7130     //
7131     // We also set the exact type context associated with the call so we can
7132     // inline the call correctly later on.
7133
7134     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7135     {
7136         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7137         if (clsHnd == nullptr)
7138         {
7139             NO_WAY("CALLI on parameterized type");
7140         }
7141
7142         assert(opcode != CEE_CALLI);
7143
7144         GenTreePtr instParam;
7145         BOOL       runtimeLookup;
7146
7147         // Instantiated generic method
7148         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7149         {
7150             CORINFO_METHOD_HANDLE exactMethodHandle =
7151                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7152
7153             if (!exactContextNeedsRuntimeLookup)
7154             {
7155 #ifdef FEATURE_READYTORUN_COMPILER
7156                 if (opts.IsReadyToRun())
7157                 {
7158                     instParam =
7159                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7160                     if (instParam == nullptr)
7161                     {
7162                         return callRetTyp;
7163                     }
7164                 }
7165                 else
7166 #endif
7167                 {
7168                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7169                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7170                 }
7171             }
7172             else
7173             {
7174                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7175                 if (instParam == nullptr)
7176                 {
7177                     return callRetTyp;
7178                 }
7179             }
7180         }
7181
7182         // otherwise must be an instance method in a generic struct,
7183         // a static method in a generic type, or a runtime-generated array method
7184         else
7185         {
7186             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7187             CORINFO_CLASS_HANDLE exactClassHandle =
7188                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7189
7190             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7191             {
7192                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7193                 return callRetTyp;
7194             }
7195
7196             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7197             {
7198                 // We indicate "readonly" to the Address operation by using a null
7199                 // instParam.
7200                 instParam = gtNewIconNode(0, TYP_REF);
7201             }
7202
7203             if (!exactContextNeedsRuntimeLookup)
7204             {
7205 #ifdef FEATURE_READYTORUN_COMPILER
7206                 if (opts.IsReadyToRun())
7207                 {
7208                     instParam =
7209                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7210                     if (instParam == nullptr)
7211                     {
7212                         return callRetTyp;
7213                     }
7214                 }
7215                 else
7216 #endif
7217                 {
7218                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7219                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7220                 }
7221             }
7222             else
7223             {
7224                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7225                 if (instParam == nullptr)
7226                 {
7227                     return callRetTyp;
7228                 }
7229             }
7230         }
7231
7232         assert(extraArg == nullptr);
7233         extraArg = gtNewArgList(instParam);
7234     }
7235
7236     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7237     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7238     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7239     // exactContextHnd is not currently required when inlining shared generic code into shared
7240     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7241     // (e.g. anything marked needsRuntimeLookup)
7242     if (exactContextNeedsRuntimeLookup)
7243     {
7244         exactContextHnd = nullptr;
7245     }
7246
7247     //-------------------------------------------------------------------------
7248     // The main group of arguments
7249
7250     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7251
7252     if (args)
7253     {
7254         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7255     }
7256
7257     //-------------------------------------------------------------------------
7258     // The "this" pointer
7259
7260     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7261     {
7262         GenTreePtr obj;
7263
7264         if (opcode == CEE_NEWOBJ)
7265         {
7266             obj = newobjThis;
7267         }
7268         else
7269         {
7270             obj = impPopStack().val;
7271             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7272             if (compDonotInline())
7273             {
7274                 return callRetTyp;
7275             }
7276         }
7277
7278         /* Is this a virtual or interface call? */
7279
7280         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7281         {
7282             /* only true object pointers can be virtual */
7283
7284             assert(obj->gtType == TYP_REF);
7285         }
7286         else
7287         {
7288             if (impIsThis(obj))
7289             {
7290                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7291             }
7292         }
7293
7294         /* Store the "this" value in the call */
7295
7296         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7297         call->gtCall.gtCallObjp = obj;
7298     }
7299
7300     //-------------------------------------------------------------------------
7301     // The "this" pointer for "newobj"
7302
7303     if (opcode == CEE_NEWOBJ)
7304     {
7305         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7306         {
7307             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7308             // This is a 'new' of a variable sized object, wher
7309             // the constructor is to return the object.  In this case
7310             // the constructor claims to return VOID but we know it
7311             // actually returns the new object
7312             assert(callRetTyp == TYP_VOID);
7313             callRetTyp   = TYP_REF;
7314             call->gtType = TYP_REF;
7315             impSpillSpecialSideEff();
7316
7317             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7318         }
7319         else
7320         {
7321             if (clsFlags & CORINFO_FLG_DELEGATE)
7322             {
7323                 // New inliner morph it in impImportCall.
7324                 // This will allow us to inline the call to the delegate constructor.
7325                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7326             }
7327
7328             if (!bIntrinsicImported)
7329             {
7330
7331 #if defined(DEBUG) || defined(INLINE_DATA)
7332
7333                 // Keep track of the raw IL offset of the call
7334                 call->gtCall.gtRawILOffset = rawILOffset;
7335
7336 #endif // defined(DEBUG) || defined(INLINE_DATA)
7337
7338                 // Is it an inline candidate?
7339                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7340             }
7341
7342             // append the call node.
7343             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7344
7345             // Now push the value of the 'new onto the stack
7346
7347             // This is a 'new' of a non-variable sized object.
7348             // Append the new node (op1) to the statement list,
7349             // and then push the local holding the value of this
7350             // new instruction on the stack.
7351
7352             if (clsFlags & CORINFO_FLG_VALUECLASS)
7353             {
7354                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7355
7356                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7357                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7358             }
7359             else
7360             {
7361                 if (newobjThis->gtOper == GT_COMMA)
7362                 {
7363                     // In coreclr the callout can be inserted even if verification is disabled
7364                     // so we cannot rely on tiVerificationNeeded alone
7365
7366                     // We must have inserted the callout. Get the real newobj.
7367                     newobjThis = newobjThis->gtOp.gtOp2;
7368                 }
7369
7370                 assert(newobjThis->gtOper == GT_LCL_VAR);
7371                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7372             }
7373         }
7374         return callRetTyp;
7375     }
7376
7377 DONE:
7378
7379     if (tailCall)
7380     {
7381         // This check cannot be performed for implicit tail calls for the reason
7382         // that impIsImplicitTailCallCandidate() is not checking whether return
7383         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7384         // As a result it is possible that in the following case, we find that
7385         // the type stack is non-empty if Callee() is considered for implicit
7386         // tail calling.
7387         //      int Caller(..) { .... void Callee(); ret val; ... }
7388         //
7389         // Note that we cannot check return type compatibility before ImpImportCall()
7390         // as we don't have required info or need to duplicate some of the logic of
7391         // ImpImportCall().
7392         //
7393         // For implicit tail calls, we perform this check after return types are
7394         // known to be compatible.
7395         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7396         {
7397             BADCODE("Stack should be empty after tailcall");
7398         }
7399
7400         // Note that we can not relax this condition with genActualType() as
7401         // the calling convention dictates that the caller of a function with
7402         // a small-typed return value is responsible for normalizing the return val
7403
7404         if (canTailCall &&
7405             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7406                                           callInfo->sig.retTypeClass))
7407         {
7408             canTailCall             = false;
7409             szCanTailCallFailReason = "Return types are not tail call compatible";
7410         }
7411
7412         // Stack empty check for implicit tail calls.
7413         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7414         {
7415 #ifdef _TARGET_AMD64_
7416             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7417             // in JIT64, not an InvalidProgramException.
7418             Verify(false, "Stack should be empty after tailcall");
7419 #else  // _TARGET_64BIT_
7420             BADCODE("Stack should be empty after tailcall");
7421 #endif //!_TARGET_64BIT_
7422         }
7423
7424         // assert(compCurBB is not a catch, finally or filter block);
7425         // assert(compCurBB is not a try block protected by a finally block);
7426
7427         // Check for permission to tailcall
7428         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7429
7430         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7431
7432         if (canTailCall)
7433         {
7434             // True virtual or indirect calls, shouldn't pass in a callee handle.
7435             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7436                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7437                                                        ? nullptr
7438                                                        : methHnd;
7439             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7440
7441             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7442             {
7443                 canTailCall = true;
7444                 if (explicitTailCall)
7445                 {
7446                     // In case of explicit tail calls, mark it so that it is not considered
7447                     // for in-lining.
7448                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7449 #ifdef DEBUG
7450                     if (verbose)
7451                     {
7452                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7453                         printTreeID(call);
7454                         printf("\n");
7455                     }
7456 #endif
7457                 }
7458                 else
7459                 {
7460 #if FEATURE_TAILCALL_OPT
7461                     // Must be an implicit tail call.
7462                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7463
7464                     // It is possible that a call node is both an inline candidate and marked
7465                     // for opportunistic tail calling.  In-lining happens before morhphing of
7466                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7467                     // reason, it will survive to the morphing stage at which point it will be
7468                     // transformed into a tail call after performing additional checks.
7469
7470                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7471 #ifdef DEBUG
7472                     if (verbose)
7473                     {
7474                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7475                         printTreeID(call);
7476                         printf("\n");
7477                     }
7478 #endif
7479
7480 #else //! FEATURE_TAILCALL_OPT
7481                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7482
7483 #endif // FEATURE_TAILCALL_OPT
7484                 }
7485
7486                 // we can't report success just yet...
7487             }
7488             else
7489             {
7490                 canTailCall = false;
7491 // canTailCall reported its reasons already
7492 #ifdef DEBUG
7493                 if (verbose)
7494                 {
7495                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7496                     printTreeID(call);
7497                     printf("\n");
7498                 }
7499 #endif
7500             }
7501         }
7502         else
7503         {
7504             // If this assert fires it means that canTailCall was set to false without setting a reason!
7505             assert(szCanTailCallFailReason != nullptr);
7506
7507 #ifdef DEBUG
7508             if (verbose)
7509             {
7510                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7511                 printTreeID(call);
7512                 printf(": %s\n", szCanTailCallFailReason);
7513             }
7514 #endif
7515             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7516                                                      szCanTailCallFailReason);
7517         }
7518     }
7519
7520 // Note: we assume that small return types are already normalized by the managed callee
7521 // or by the pinvoke stub for calls to unmanaged code.
7522
7523 DONE_CALL:
7524
7525     if (!bIntrinsicImported)
7526     {
7527         //
7528         // Things needed to be checked when bIntrinsicImported is false.
7529         //
7530
7531         assert(call->gtOper == GT_CALL);
7532         assert(sig != nullptr);
7533
7534         // Tail calls require us to save the call site's sig info so we can obtain an argument
7535         // copying thunk from the EE later on.
7536         if (call->gtCall.callSig == nullptr)
7537         {
7538             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7539             *call->gtCall.callSig = *sig;
7540         }
7541
7542         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7543         {
7544             GenTreePtr callObj = call->gtCall.gtCallObjp;
7545             assert(callObj != nullptr);
7546
7547             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7548
7549             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7550                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7551                                                                    impInlineInfo->inlArgInfo))
7552             {
7553                 impInlineInfo->thisDereferencedFirst = true;
7554             }
7555         }
7556
7557 #if defined(DEBUG) || defined(INLINE_DATA)
7558
7559         // Keep track of the raw IL offset of the call
7560         call->gtCall.gtRawILOffset = rawILOffset;
7561
7562 #endif // defined(DEBUG) || defined(INLINE_DATA)
7563
7564         // Is it an inline candidate?
7565         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7566     }
7567
7568     // Push or append the result of the call
7569     if (callRetTyp == TYP_VOID)
7570     {
7571         if (opcode == CEE_NEWOBJ)
7572         {
7573             // we actually did push something, so don't spill the thing we just pushed.
7574             assert(verCurrentState.esStackDepth > 0);
7575             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7576         }
7577         else
7578         {
7579             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7580         }
7581     }
7582     else
7583     {
7584         impSpillSpecialSideEff();
7585
7586         if (clsFlags & CORINFO_FLG_ARRAY)
7587         {
7588             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7589         }
7590
7591         // Find the return type used for verification by interpreting the method signature.
7592         // NB: we are clobbering the already established sig.
7593         if (tiVerificationNeeded)
7594         {
7595             // Actually, we never get the sig for the original method.
7596             sig = &(callInfo->verSig);
7597         }
7598
7599         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7600         tiRetVal.NormaliseForStack();
7601
7602         // The CEE_READONLY prefix modifies the verification semantics of an Address
7603         // operation on an array type.
7604         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7605         {
7606             tiRetVal.SetIsReadonlyByRef();
7607         }
7608
7609         if (tiVerificationNeeded)
7610         {
7611             // We assume all calls return permanent home byrefs. If they
7612             // didn't they wouldn't be verifiable. This is also covering
7613             // the Address() helper for multidimensional arrays.
7614             if (tiRetVal.IsByRef())
7615             {
7616                 tiRetVal.SetIsPermanentHomeByRef();
7617             }
7618         }
7619
7620         if (call->gtOper == GT_CALL)
7621         {
7622             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7623             if (varTypeIsStruct(callRetTyp))
7624             {
7625                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7626             }
7627
7628             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7629             {
7630                 assert(opts.OptEnabled(CLFLG_INLINING));
7631
7632                 // Make the call its own tree (spill the stack if needed).
7633                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7634
7635                 // TODO: Still using the widened type.
7636                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7637             }
7638             else
7639             {
7640                 // For non-candidates we must also spill, since we
7641                 // might have locals live on the eval stack that this
7642                 // call can modify.
7643                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7644             }
7645         }
7646
7647         if (!bIntrinsicImported)
7648         {
7649             //-------------------------------------------------------------------------
7650             //
7651             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7652                 before returning.
7653                 However, we need to normalize small type values returned by unmanaged
7654                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7655                 if we use the shorter inlined pinvoke stub. */
7656
7657             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7658             {
7659                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7660             }
7661         }
7662
7663         impPushOnStack(call, tiRetVal);
7664     }
7665
7666     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7667     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7668     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7669     //  callInfoCache.uncacheCallInfo();
7670
7671     return callRetTyp;
7672 }
7673 #ifdef _PREFAST_
7674 #pragma warning(pop)
7675 #endif
7676
7677 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7678 {
7679     CorInfoType corType = methInfo->args.retType;
7680
7681     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7682     {
7683         // We have some kind of STRUCT being returned
7684
7685         structPassingKind howToReturnStruct = SPK_Unknown;
7686
7687         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7688
7689         if (howToReturnStruct == SPK_ByReference)
7690         {
7691             return true;
7692         }
7693     }
7694
7695     return false;
7696 }
7697
7698 #ifdef DEBUG
7699 //
7700 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7701 {
7702     TestLabelAndNum tlAndN;
7703     if (numArgs == 2)
7704     {
7705         tlAndN.m_num  = 0;
7706         StackEntry se = impPopStack();
7707         assert(se.seTypeInfo.GetType() == TI_INT);
7708         GenTreePtr val = se.val;
7709         assert(val->IsCnsIntOrI());
7710         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7711     }
7712     else if (numArgs == 3)
7713     {
7714         StackEntry se = impPopStack();
7715         assert(se.seTypeInfo.GetType() == TI_INT);
7716         GenTreePtr val = se.val;
7717         assert(val->IsCnsIntOrI());
7718         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7719         se           = impPopStack();
7720         assert(se.seTypeInfo.GetType() == TI_INT);
7721         val = se.val;
7722         assert(val->IsCnsIntOrI());
7723         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7724     }
7725     else
7726     {
7727         assert(false);
7728     }
7729
7730     StackEntry expSe = impPopStack();
7731     GenTreePtr node  = expSe.val;
7732
7733     // There are a small number of special cases, where we actually put the annotation on a subnode.
7734     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7735     {
7736         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7737         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7738         // offset within the the static field block whose address is returned by the helper call.
7739         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7740         GenTreePtr helperCall = nullptr;
7741         assert(node->OperGet() == GT_IND);
7742         tlAndN.m_num -= 100;
7743         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7744         GetNodeTestData()->Remove(node);
7745     }
7746     else
7747     {
7748         GetNodeTestData()->Set(node, tlAndN);
7749     }
7750
7751     impPushOnStack(node, expSe.seTypeInfo);
7752     return node->TypeGet();
7753 }
7754 #endif // DEBUG
7755
7756 //-----------------------------------------------------------------------------------
7757 //  impFixupCallStructReturn: For a call node that returns a struct type either
7758 //  adjust the return type to an enregisterable type, or set the flag to indicate
7759 //  struct return via retbuf arg.
7760 //
7761 //  Arguments:
7762 //    call       -  GT_CALL GenTree node
7763 //    retClsHnd  -  Class handle of return type of the call
7764 //
7765 //  Return Value:
7766 //    Returns new GenTree node after fixing struct return of call node
7767 //
7768 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7769 {
7770     assert(call->gtOper == GT_CALL);
7771
7772     if (!varTypeIsStruct(call))
7773     {
7774         return call;
7775     }
7776
7777     call->gtCall.gtRetClsHnd = retClsHnd;
7778
7779     GenTreeCall* callNode = call->AsCall();
7780
7781 #if FEATURE_MULTIREG_RET
7782     // Initialize Return type descriptor of call node
7783     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7784     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7785 #endif // FEATURE_MULTIREG_RET
7786
7787 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7788
7789     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7790     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7791
7792     // The return type will remain as the incoming struct type unless normalized to a
7793     // single eightbyte return type below.
7794     callNode->gtReturnType = call->gtType;
7795
7796     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7797     if (retRegCount != 0)
7798     {
7799         if (retRegCount == 1)
7800         {
7801             // struct returned in a single register
7802             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7803         }
7804         else
7805         {
7806             // must be a struct returned in two registers
7807             assert(retRegCount == 2);
7808
7809             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7810             {
7811                 // Force a call returning multi-reg struct to be always of the IR form
7812                 //   tmp = call
7813                 //
7814                 // No need to assign a multi-reg struct to a local var if:
7815                 //  - It is a tail call or
7816                 //  - The call is marked for in-lining later
7817                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7818             }
7819         }
7820     }
7821     else
7822     {
7823         // struct not returned in registers i.e returned via hiddden retbuf arg.
7824         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7825     }
7826
7827 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7828
7829 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7830     // There is no fixup necessary if the return type is a HFA struct.
7831     // HFA structs are returned in registers for ARM32 and ARM64
7832     //
7833     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7834     {
7835         if (call->gtCall.CanTailCall())
7836         {
7837             if (info.compIsVarArgs)
7838             {
7839                 // We cannot tail call because control needs to return to fixup the calling
7840                 // convention for result return.
7841                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7842             }
7843             else
7844             {
7845                 // If we can tail call returning HFA, then don't assign it to
7846                 // a variable back and forth.
7847                 return call;
7848             }
7849         }
7850
7851         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7852         {
7853             return call;
7854         }
7855
7856         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7857         if (retRegCount >= 2)
7858         {
7859             return impAssignMultiRegTypeToVar(call, retClsHnd);
7860         }
7861     }
7862 #endif // _TARGET_ARM_
7863
7864     // Check for TYP_STRUCT type that wraps a primitive type
7865     // Such structs are returned using a single register
7866     // and we change the return type on those calls here.
7867     //
7868     structPassingKind howToReturnStruct;
7869     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7870
7871     if (howToReturnStruct == SPK_ByReference)
7872     {
7873         assert(returnType == TYP_UNKNOWN);
7874         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7875     }
7876     else
7877     {
7878         assert(returnType != TYP_UNKNOWN);
7879         call->gtCall.gtReturnType = returnType;
7880
7881         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7882         if ((returnType == TYP_LONG) && (compLongUsed == false))
7883         {
7884             compLongUsed = true;
7885         }
7886         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7887         {
7888             compFloatingPointUsed = true;
7889         }
7890
7891 #if FEATURE_MULTIREG_RET
7892         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7893         assert(retRegCount != 0);
7894
7895         if (retRegCount >= 2)
7896         {
7897             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7898             {
7899                 // Force a call returning multi-reg struct to be always of the IR form
7900                 //   tmp = call
7901                 //
7902                 // No need to assign a multi-reg struct to a local var if:
7903                 //  - It is a tail call or
7904                 //  - The call is marked for in-lining later
7905                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7906             }
7907         }
7908 #endif // FEATURE_MULTIREG_RET
7909     }
7910
7911 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7912
7913     return call;
7914 }
7915
7916 /*****************************************************************************
7917    For struct return values, re-type the operand in the case where the ABI
7918    does not use a struct return buffer
7919    Note that this method is only call for !_TARGET_X86_
7920  */
7921
7922 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7923 {
7924     assert(varTypeIsStruct(info.compRetType));
7925     assert(info.compRetBuffArg == BAD_VAR_NUM);
7926
7927 #if defined(_TARGET_XARCH_)
7928
7929 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7930     // No VarArgs for CoreCLR on x64 Unix
7931     assert(!info.compIsVarArgs);
7932
7933     // Is method returning a multi-reg struct?
7934     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7935     {
7936         // In case of multi-reg struct return, we force IR to be one of the following:
7937         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7938         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7939
7940         if (op->gtOper == GT_LCL_VAR)
7941         {
7942             // Make sure that this struct stays in memory and doesn't get promoted.
7943             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7944             lvaTable[lclNum].lvIsMultiRegRet = true;
7945
7946             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7947             op->gtFlags |= GTF_DONT_CSE;
7948
7949             return op;
7950         }
7951
7952         if (op->gtOper == GT_CALL)
7953         {
7954             return op;
7955         }
7956
7957         return impAssignMultiRegTypeToVar(op, retClsHnd);
7958     }
7959 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7960     assert(info.compRetNativeType != TYP_STRUCT);
7961 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7962
7963 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7964
7965     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7966     {
7967         if (op->gtOper == GT_LCL_VAR)
7968         {
7969             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7970             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7971             // Make sure this struct type stays as struct so that we can return it as an HFA
7972             lvaTable[lclNum].lvIsMultiRegRet = true;
7973
7974             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7975             op->gtFlags |= GTF_DONT_CSE;
7976
7977             return op;
7978         }
7979
7980         if (op->gtOper == GT_CALL)
7981         {
7982             if (op->gtCall.IsVarargs())
7983             {
7984                 // We cannot tail call because control needs to return to fixup the calling
7985                 // convention for result return.
7986                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7987                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7988             }
7989             else
7990             {
7991                 return op;
7992             }
7993         }
7994         return impAssignMultiRegTypeToVar(op, retClsHnd);
7995     }
7996
7997 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7998
7999     // Is method returning a multi-reg struct?
8000     if (IsMultiRegReturnedType(retClsHnd))
8001     {
8002         if (op->gtOper == GT_LCL_VAR)
8003         {
8004             // This LCL_VAR stays as a TYP_STRUCT
8005             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8006
8007             // Make sure this struct type is not struct promoted
8008             lvaTable[lclNum].lvIsMultiRegRet = true;
8009
8010             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8011             op->gtFlags |= GTF_DONT_CSE;
8012
8013             return op;
8014         }
8015
8016         if (op->gtOper == GT_CALL)
8017         {
8018             if (op->gtCall.IsVarargs())
8019             {
8020                 // We cannot tail call because control needs to return to fixup the calling
8021                 // convention for result return.
8022                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8023                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8024             }
8025             else
8026             {
8027                 return op;
8028             }
8029         }
8030         return impAssignMultiRegTypeToVar(op, retClsHnd);
8031     }
8032
8033 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8034
8035 REDO_RETURN_NODE:
8036     // adjust the type away from struct to integral
8037     // and no normalizing
8038     if (op->gtOper == GT_LCL_VAR)
8039     {
8040         op->ChangeOper(GT_LCL_FLD);
8041     }
8042     else if (op->gtOper == GT_OBJ)
8043     {
8044         GenTreePtr op1 = op->AsObj()->Addr();
8045
8046         // We will fold away OBJ/ADDR
8047         // except for OBJ/ADDR/INDEX
8048         //     as the array type influences the array element's offset
8049         //     Later in this method we change op->gtType to info.compRetNativeType
8050         //     This is not correct when op is a GT_INDEX as the starting offset
8051         //     for the array elements 'elemOffs' is different for an array of
8052         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8053         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8054         //
8055         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8056         {
8057             // Change '*(&X)' to 'X' and see if we can do better
8058             op = op1->gtOp.gtOp1;
8059             goto REDO_RETURN_NODE;
8060         }
8061         op->gtObj.gtClass = NO_CLASS_HANDLE;
8062         op->ChangeOperUnchecked(GT_IND);
8063         op->gtFlags |= GTF_IND_TGTANYWHERE;
8064     }
8065     else if (op->gtOper == GT_CALL)
8066     {
8067         if (op->AsCall()->TreatAsHasRetBufArg(this))
8068         {
8069             // This must be one of those 'special' helpers that don't
8070             // really have a return buffer, but instead use it as a way
8071             // to keep the trees cleaner with fewer address-taken temps.
8072             //
8073             // Well now we have to materialize the the return buffer as
8074             // an address-taken temp. Then we can return the temp.
8075             //
8076             // NOTE: this code assumes that since the call directly
8077             // feeds the return, then the call must be returning the
8078             // same structure/class/type.
8079             //
8080             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8081
8082             // No need to spill anything as we're about to return.
8083             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8084
8085             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8086             // jump directly to a GT_LCL_FLD.
8087             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8088             op->ChangeOper(GT_LCL_FLD);
8089         }
8090         else
8091         {
8092             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8093
8094             // Don't change the gtType of the node just yet, it will get changed later.
8095             return op;
8096         }
8097     }
8098     else if (op->gtOper == GT_COMMA)
8099     {
8100         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8101     }
8102
8103     op->gtType = info.compRetNativeType;
8104
8105     return op;
8106 }
8107
8108 /*****************************************************************************
8109    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8110    finally-protected try. We find the finally blocks protecting the current
8111    offset (in order) by walking over the complete exception table and
8112    finding enclosing clauses. This assumes that the table is sorted.
8113    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8114
8115    If we are leaving a catch handler, we need to attach the
8116    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8117
8118    After this function, the BBJ_LEAVE block has been converted to a different type.
8119  */
8120
8121 #if !FEATURE_EH_FUNCLETS
8122
8123 void Compiler::impImportLeave(BasicBlock* block)
8124 {
8125 #ifdef DEBUG
8126     if (verbose)
8127     {
8128         printf("\nBefore import CEE_LEAVE:\n");
8129         fgDispBasicBlocks();
8130         fgDispHandlerTab();
8131     }
8132 #endif // DEBUG
8133
8134     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8135     unsigned    blkAddr         = block->bbCodeOffs;
8136     BasicBlock* leaveTarget     = block->bbJumpDest;
8137     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8138
8139     // LEAVE clears the stack, spill side effects, and set stack to 0
8140
8141     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8142     verCurrentState.esStackDepth = 0;
8143
8144     assert(block->bbJumpKind == BBJ_LEAVE);
8145     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8146
8147     BasicBlock* step         = DUMMY_INIT(NULL);
8148     unsigned    encFinallies = 0; // Number of enclosing finallies.
8149     GenTreePtr  endCatches   = NULL;
8150     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8151
8152     unsigned  XTnum;
8153     EHblkDsc* HBtab;
8154
8155     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8156     {
8157         // Grab the handler offsets
8158
8159         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8160         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8161         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8162         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8163
8164         /* Is this a catch-handler we are CEE_LEAVEing out of?
8165          * If so, we need to call CORINFO_HELP_ENDCATCH.
8166          */
8167
8168         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8169         {
8170             // Can't CEE_LEAVE out of a finally/fault handler
8171             if (HBtab->HasFinallyOrFaultHandler())
8172                 BADCODE("leave out of fault/finally block");
8173
8174             // Create the call to CORINFO_HELP_ENDCATCH
8175             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8176
8177             // Make a list of all the currently pending endCatches
8178             if (endCatches)
8179                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8180             else
8181                 endCatches = endCatch;
8182
8183 #ifdef DEBUG
8184             if (verbose)
8185             {
8186                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8187                        "CORINFO_HELP_ENDCATCH\n",
8188                        block->bbNum, XTnum);
8189             }
8190 #endif
8191         }
8192         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8193                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8194         {
8195             /* This is a finally-protected try we are jumping out of */
8196
8197             /* If there are any pending endCatches, and we have already
8198                jumped out of a finally-protected try, then the endCatches
8199                have to be put in a block in an outer try for async
8200                exceptions to work correctly.
8201                Else, just use append to the original block */
8202
8203             BasicBlock* callBlock;
8204
8205             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8206
8207             if (encFinallies == 0)
8208             {
8209                 assert(step == DUMMY_INIT(NULL));
8210                 callBlock             = block;
8211                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8212
8213                 if (endCatches)
8214                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8215
8216 #ifdef DEBUG
8217                 if (verbose)
8218                 {
8219                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8220                            "block BB%02u [%08p]\n",
8221                            callBlock->bbNum, dspPtr(callBlock));
8222                 }
8223 #endif
8224             }
8225             else
8226             {
8227                 assert(step != DUMMY_INIT(NULL));
8228
8229                 /* Calling the finally block */
8230                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8231                 assert(step->bbJumpKind == BBJ_ALWAYS);
8232                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8233                                               // finally in the chain)
8234                 step->bbJumpDest->bbRefs++;
8235
8236                 /* The new block will inherit this block's weight */
8237                 callBlock->setBBWeight(block->bbWeight);
8238                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8239
8240 #ifdef DEBUG
8241                 if (verbose)
8242                 {
8243                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8244                            "[%08p]\n",
8245                            callBlock->bbNum, dspPtr(callBlock));
8246                 }
8247 #endif
8248
8249                 GenTreePtr lastStmt;
8250
8251                 if (endCatches)
8252                 {
8253                     lastStmt         = gtNewStmt(endCatches);
8254                     endLFin->gtNext  = lastStmt;
8255                     lastStmt->gtPrev = endLFin;
8256                 }
8257                 else
8258                 {
8259                     lastStmt = endLFin;
8260                 }
8261
8262                 // note that this sets BBF_IMPORTED on the block
8263                 impEndTreeList(callBlock, endLFin, lastStmt);
8264             }
8265
8266             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8267             /* The new block will inherit this block's weight */
8268             step->setBBWeight(block->bbWeight);
8269             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8270
8271 #ifdef DEBUG
8272             if (verbose)
8273             {
8274                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8275                        "BB%02u [%08p]\n",
8276                        step->bbNum, dspPtr(step));
8277             }
8278 #endif
8279
8280             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8281             assert(finallyNesting <= compHndBBtabCount);
8282
8283             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8284             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8285             endLFin               = gtNewStmt(endLFin);
8286             endCatches            = NULL;
8287
8288             encFinallies++;
8289
8290             invalidatePreds = true;
8291         }
8292     }
8293
8294     /* Append any remaining endCatches, if any */
8295
8296     assert(!encFinallies == !endLFin);
8297
8298     if (encFinallies == 0)
8299     {
8300         assert(step == DUMMY_INIT(NULL));
8301         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8302
8303         if (endCatches)
8304             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8305
8306 #ifdef DEBUG
8307         if (verbose)
8308         {
8309             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8310                    "block BB%02u [%08p]\n",
8311                    block->bbNum, dspPtr(block));
8312         }
8313 #endif
8314     }
8315     else
8316     {
8317         // If leaveTarget is the start of another try block, we want to make sure that
8318         // we do not insert finalStep into that try block. Hence, we find the enclosing
8319         // try block.
8320         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8321
8322         // Insert a new BB either in the try region indicated by tryIndex or
8323         // the handler region indicated by leaveTarget->bbHndIndex,
8324         // depending on which is the inner region.
8325         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8326         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8327         step->bbJumpDest = finalStep;
8328
8329         /* The new block will inherit this block's weight */
8330         finalStep->setBBWeight(block->bbWeight);
8331         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8332
8333 #ifdef DEBUG
8334         if (verbose)
8335         {
8336             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8337                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8338         }
8339 #endif
8340
8341         GenTreePtr lastStmt;
8342
8343         if (endCatches)
8344         {
8345             lastStmt         = gtNewStmt(endCatches);
8346             endLFin->gtNext  = lastStmt;
8347             lastStmt->gtPrev = endLFin;
8348         }
8349         else
8350         {
8351             lastStmt = endLFin;
8352         }
8353
8354         impEndTreeList(finalStep, endLFin, lastStmt);
8355
8356         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8357
8358         // Queue up the jump target for importing
8359
8360         impImportBlockPending(leaveTarget);
8361
8362         invalidatePreds = true;
8363     }
8364
8365     if (invalidatePreds && fgComputePredsDone)
8366     {
8367         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8368         fgRemovePreds();
8369     }
8370
8371 #ifdef DEBUG
8372     fgVerifyHandlerTab();
8373
8374     if (verbose)
8375     {
8376         printf("\nAfter import CEE_LEAVE:\n");
8377         fgDispBasicBlocks();
8378         fgDispHandlerTab();
8379     }
8380 #endif // DEBUG
8381 }
8382
8383 #else // FEATURE_EH_FUNCLETS
8384
8385 void Compiler::impImportLeave(BasicBlock* block)
8386 {
8387 #ifdef DEBUG
8388     if (verbose)
8389     {
8390         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8391         fgDispBasicBlocks();
8392         fgDispHandlerTab();
8393     }
8394 #endif // DEBUG
8395
8396     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8397     unsigned    blkAddr         = block->bbCodeOffs;
8398     BasicBlock* leaveTarget     = block->bbJumpDest;
8399     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8400
8401     // LEAVE clears the stack, spill side effects, and set stack to 0
8402
8403     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8404     verCurrentState.esStackDepth = 0;
8405
8406     assert(block->bbJumpKind == BBJ_LEAVE);
8407     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8408
8409     BasicBlock* step = nullptr;
8410
8411     enum StepType
8412     {
8413         // No step type; step == NULL.
8414         ST_None,
8415
8416         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8417         // That is, is step->bbJumpDest where a finally will return to?
8418         ST_FinallyReturn,
8419
8420         // The step block is a catch return.
8421         ST_Catch,
8422
8423         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8424         ST_Try
8425     };
8426     StepType stepType = ST_None;
8427
8428     unsigned  XTnum;
8429     EHblkDsc* HBtab;
8430
8431     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8432     {
8433         // Grab the handler offsets
8434
8435         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8436         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8437         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8438         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8439
8440         /* Is this a catch-handler we are CEE_LEAVEing out of?
8441          */
8442
8443         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8444         {
8445             // Can't CEE_LEAVE out of a finally/fault handler
8446             if (HBtab->HasFinallyOrFaultHandler())
8447             {
8448                 BADCODE("leave out of fault/finally block");
8449             }
8450
8451             /* We are jumping out of a catch */
8452
8453             if (step == nullptr)
8454             {
8455                 step             = block;
8456                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8457                 stepType         = ST_Catch;
8458
8459 #ifdef DEBUG
8460                 if (verbose)
8461                 {
8462                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8463                            "block\n",
8464                            XTnum, step->bbNum);
8465                 }
8466 #endif
8467             }
8468             else
8469             {
8470                 BasicBlock* exitBlock;
8471
8472                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8473                  * scope */
8474                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8475
8476                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8477                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8478                                               // exit) returns to this block
8479                 step->bbJumpDest->bbRefs++;
8480
8481 #if defined(_TARGET_ARM_)
8482                 if (stepType == ST_FinallyReturn)
8483                 {
8484                     assert(step->bbJumpKind == BBJ_ALWAYS);
8485                     // Mark the target of a finally return
8486                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8487                 }
8488 #endif // defined(_TARGET_ARM_)
8489
8490                 /* The new block will inherit this block's weight */
8491                 exitBlock->setBBWeight(block->bbWeight);
8492                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8493
8494                 /* This exit block is the new step */
8495                 step     = exitBlock;
8496                 stepType = ST_Catch;
8497
8498                 invalidatePreds = true;
8499
8500 #ifdef DEBUG
8501                 if (verbose)
8502                 {
8503                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8504                            exitBlock->bbNum);
8505                 }
8506 #endif
8507             }
8508         }
8509         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8510                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8511         {
8512             /* We are jumping out of a finally-protected try */
8513
8514             BasicBlock* callBlock;
8515
8516             if (step == nullptr)
8517             {
8518 #if FEATURE_EH_CALLFINALLY_THUNKS
8519
8520                 // Put the call to the finally in the enclosing region.
8521                 unsigned callFinallyTryIndex =
8522                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8523                 unsigned callFinallyHndIndex =
8524                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8525                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8526
8527                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8528                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8529                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8530                 // next block, and flow optimizations will remove it.
8531                 block->bbJumpKind = BBJ_ALWAYS;
8532                 block->bbJumpDest = callBlock;
8533                 block->bbJumpDest->bbRefs++;
8534
8535                 /* The new block will inherit this block's weight */
8536                 callBlock->setBBWeight(block->bbWeight);
8537                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8538
8539 #ifdef DEBUG
8540                 if (verbose)
8541                 {
8542                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8543                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8544                            XTnum, block->bbNum, callBlock->bbNum);
8545                 }
8546 #endif
8547
8548 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8549
8550                 callBlock             = block;
8551                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8552
8553 #ifdef DEBUG
8554                 if (verbose)
8555                 {
8556                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8557                            "BBJ_CALLFINALLY block\n",
8558                            XTnum, callBlock->bbNum);
8559                 }
8560 #endif
8561
8562 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8563             }
8564             else
8565             {
8566                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8567                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8568                 // a 'finally'), or the step block is the return from a catch.
8569                 //
8570                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8571                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8572                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8573                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8574                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8575                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8576                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8577                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8578                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8579                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8580                 // stack walks.)
8581
8582                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8583
8584 #if FEATURE_EH_CALLFINALLY_THUNKS
8585                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8586                 {
8587                     // Need to create another step block in the 'try' region that will actually branch to the
8588                     // call-to-finally thunk.
8589                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8590                     step->bbJumpDest  = step2;
8591                     step->bbJumpDest->bbRefs++;
8592                     step2->setBBWeight(block->bbWeight);
8593                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8594
8595 #ifdef DEBUG
8596                     if (verbose)
8597                     {
8598                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8599                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8600                                XTnum, step->bbNum, step2->bbNum);
8601                     }
8602 #endif
8603
8604                     step = step2;
8605                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8606                 }
8607 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8608
8609 #if FEATURE_EH_CALLFINALLY_THUNKS
8610                 unsigned callFinallyTryIndex =
8611                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8612                 unsigned callFinallyHndIndex =
8613                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8614 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8615                 unsigned callFinallyTryIndex = XTnum + 1;
8616                 unsigned callFinallyHndIndex = 0; // don't care
8617 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8618
8619                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8620                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8621                                               // finally in the chain)
8622                 step->bbJumpDest->bbRefs++;
8623
8624 #if defined(_TARGET_ARM_)
8625                 if (stepType == ST_FinallyReturn)
8626                 {
8627                     assert(step->bbJumpKind == BBJ_ALWAYS);
8628                     // Mark the target of a finally return
8629                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8630                 }
8631 #endif // defined(_TARGET_ARM_)
8632
8633                 /* The new block will inherit this block's weight */
8634                 callBlock->setBBWeight(block->bbWeight);
8635                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8636
8637 #ifdef DEBUG
8638                 if (verbose)
8639                 {
8640                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8641                            "BB%02u\n",
8642                            XTnum, callBlock->bbNum);
8643                 }
8644 #endif
8645             }
8646
8647             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8648             stepType = ST_FinallyReturn;
8649
8650             /* The new block will inherit this block's weight */
8651             step->setBBWeight(block->bbWeight);
8652             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8653
8654 #ifdef DEBUG
8655             if (verbose)
8656             {
8657                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8658                        "block BB%02u\n",
8659                        XTnum, step->bbNum);
8660             }
8661 #endif
8662
8663             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8664
8665             invalidatePreds = true;
8666         }
8667         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8668                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8669         {
8670             // We are jumping out of a catch-protected try.
8671             //
8672             // If we are returning from a call to a finally, then we must have a step block within a try
8673             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8674             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8675             // and invoke the appropriate catch.
8676             //
8677             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8678             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8679             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8680             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8681             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8682             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8683             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8684             // For example:
8685             //
8686             // try {
8687             //    try {
8688             //       // something here raises ThreadAbortException
8689             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8690             //    } catch (Exception) {
8691             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8692             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8693             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8694             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8695             //       // need to do this transformation if the current EH block is a try/catch that catches
8696             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8697             //       // information, so currently we do it for all catch types.
8698             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8699             //    }
8700             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8701             // } catch (ThreadAbortException) {
8702             // }
8703             // LABEL_1:
8704             //
8705             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8706             // compiler.
8707
8708             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8709             {
8710                 BasicBlock* catchStep;
8711
8712                 assert(step);
8713
8714                 if (stepType == ST_FinallyReturn)
8715                 {
8716                     assert(step->bbJumpKind == BBJ_ALWAYS);
8717                 }
8718                 else
8719                 {
8720                     assert(stepType == ST_Catch);
8721                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8722                 }
8723
8724                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8725                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8726                 step->bbJumpDest = catchStep;
8727                 step->bbJumpDest->bbRefs++;
8728
8729 #if defined(_TARGET_ARM_)
8730                 if (stepType == ST_FinallyReturn)
8731                 {
8732                     // Mark the target of a finally return
8733                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8734                 }
8735 #endif // defined(_TARGET_ARM_)
8736
8737                 /* The new block will inherit this block's weight */
8738                 catchStep->setBBWeight(block->bbWeight);
8739                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8740
8741 #ifdef DEBUG
8742                 if (verbose)
8743                 {
8744                     if (stepType == ST_FinallyReturn)
8745                     {
8746                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8747                                "BBJ_ALWAYS block BB%02u\n",
8748                                XTnum, catchStep->bbNum);
8749                     }
8750                     else
8751                     {
8752                         assert(stepType == ST_Catch);
8753                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8754                                "BBJ_ALWAYS block BB%02u\n",
8755                                XTnum, catchStep->bbNum);
8756                     }
8757                 }
8758 #endif // DEBUG
8759
8760                 /* This block is the new step */
8761                 step     = catchStep;
8762                 stepType = ST_Try;
8763
8764                 invalidatePreds = true;
8765             }
8766         }
8767     }
8768
8769     if (step == nullptr)
8770     {
8771         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8772
8773 #ifdef DEBUG
8774         if (verbose)
8775         {
8776             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8777                    "block BB%02u to BBJ_ALWAYS\n",
8778                    block->bbNum);
8779         }
8780 #endif
8781     }
8782     else
8783     {
8784         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8785
8786 #if defined(_TARGET_ARM_)
8787         if (stepType == ST_FinallyReturn)
8788         {
8789             assert(step->bbJumpKind == BBJ_ALWAYS);
8790             // Mark the target of a finally return
8791             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8792         }
8793 #endif // defined(_TARGET_ARM_)
8794
8795 #ifdef DEBUG
8796         if (verbose)
8797         {
8798             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8799         }
8800 #endif
8801
8802         // Queue up the jump target for importing
8803
8804         impImportBlockPending(leaveTarget);
8805     }
8806
8807     if (invalidatePreds && fgComputePredsDone)
8808     {
8809         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8810         fgRemovePreds();
8811     }
8812
8813 #ifdef DEBUG
8814     fgVerifyHandlerTab();
8815
8816     if (verbose)
8817     {
8818         printf("\nAfter import CEE_LEAVE:\n");
8819         fgDispBasicBlocks();
8820         fgDispHandlerTab();
8821     }
8822 #endif // DEBUG
8823 }
8824
8825 #endif // FEATURE_EH_FUNCLETS
8826
8827 /*****************************************************************************/
8828 // This is called when reimporting a leave block. It resets the JumpKind,
8829 // JumpDest, and bbNext to the original values
8830
8831 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8832 {
8833 #if FEATURE_EH_FUNCLETS
8834     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8835     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8836     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8837     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8838     // only predecessor are also considered orphans and attempted to be deleted.
8839     //
8840     //  try  {
8841     //     ....
8842     //     try
8843     //     {
8844     //         ....
8845     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8846     //     } finally { }
8847     //  } finally { }
8848     //  OUTSIDE:
8849     //
8850     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8851     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8852     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8853     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8854     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8855     // will be treated as pair and handled correctly.
8856     if (block->bbJumpKind == BBJ_CALLFINALLY)
8857     {
8858         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8859         dupBlock->bbFlags    = block->bbFlags;
8860         dupBlock->bbJumpDest = block->bbJumpDest;
8861         dupBlock->copyEHRegion(block);
8862         dupBlock->bbCatchTyp = block->bbCatchTyp;
8863
8864         // Mark this block as
8865         //  a) not referenced by any other block to make sure that it gets deleted
8866         //  b) weight zero
8867         //  c) prevent from being imported
8868         //  d) as internal
8869         //  e) as rarely run
8870         dupBlock->bbRefs   = 0;
8871         dupBlock->bbWeight = 0;
8872         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8873
8874         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8875         // will be next to each other.
8876         fgInsertBBafter(block, dupBlock);
8877
8878 #ifdef DEBUG
8879         if (verbose)
8880         {
8881             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8882         }
8883 #endif
8884     }
8885 #endif // FEATURE_EH_FUNCLETS
8886
8887     block->bbJumpKind = BBJ_LEAVE;
8888     fgInitBBLookup();
8889     block->bbJumpDest = fgLookupBB(jmpAddr);
8890
8891     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8892     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8893     // reason we don't want to remove the block at this point is that if we call
8894     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8895     // added and the linked list length will be different than fgBBcount.
8896 }
8897
8898 /*****************************************************************************/
8899 // Get the first non-prefix opcode. Used for verification of valid combinations
8900 // of prefixes and actual opcodes.
8901
8902 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8903 {
8904     while (codeAddr < codeEndp)
8905     {
8906         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8907         codeAddr += sizeof(__int8);
8908
8909         if (opcode == CEE_PREFIX1)
8910         {
8911             if (codeAddr >= codeEndp)
8912             {
8913                 break;
8914             }
8915             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8916             codeAddr += sizeof(__int8);
8917         }
8918
8919         switch (opcode)
8920         {
8921             case CEE_UNALIGNED:
8922             case CEE_VOLATILE:
8923             case CEE_TAILCALL:
8924             case CEE_CONSTRAINED:
8925             case CEE_READONLY:
8926                 break;
8927             default:
8928                 return opcode;
8929         }
8930
8931         codeAddr += opcodeSizes[opcode];
8932     }
8933
8934     return CEE_ILLEGAL;
8935 }
8936
8937 /*****************************************************************************/
8938 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8939
8940 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8941 {
8942     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8943
8944     if (!(
8945             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8946             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8947             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8948             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8949             // volatile. prefix is allowed with the ldsfld and stsfld
8950             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8951     {
8952         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8953     }
8954 }
8955
8956 /*****************************************************************************/
8957
8958 #ifdef DEBUG
8959
8960 #undef RETURN // undef contracts RETURN macro
8961
8962 enum controlFlow_t
8963 {
8964     NEXT,
8965     CALL,
8966     RETURN,
8967     THROW,
8968     BRANCH,
8969     COND_BRANCH,
8970     BREAK,
8971     PHI,
8972     META,
8973 };
8974
8975 const static controlFlow_t controlFlow[] = {
8976 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8977 #include "opcode.def"
8978 #undef OPDEF
8979 };
8980
8981 #endif // DEBUG
8982
8983 /*****************************************************************************
8984  *  Determine the result type of an arithemetic operation
8985  *  On 64-bit inserts upcasts when native int is mixed with int32
8986  */
8987 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8988 {
8989     var_types  type = TYP_UNDEF;
8990     GenTreePtr op1 = *pOp1, op2 = *pOp2;
8991
8992     // Arithemetic operations are generally only allowed with
8993     // primitive types, but certain operations are allowed
8994     // with byrefs
8995
8996     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8997     {
8998         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8999         {
9000             // byref1-byref2 => gives a native int
9001             type = TYP_I_IMPL;
9002         }
9003         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9004         {
9005             // [native] int - byref => gives a native int
9006
9007             //
9008             // The reason is that it is possible, in managed C++,
9009             // to have a tree like this:
9010             //
9011             //              -
9012             //             / \
9013             //            /   \
9014             //           /     \
9015             //          /       \
9016             // const(h) int     addr byref
9017             //
9018             // <BUGNUM> VSW 318822 </BUGNUM>
9019             //
9020             // So here we decide to make the resulting type to be a native int.
9021             CLANG_FORMAT_COMMENT_ANCHOR;
9022
9023 #ifdef _TARGET_64BIT_
9024             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9025             {
9026                 // insert an explicit upcast
9027                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9028             }
9029 #endif // _TARGET_64BIT_
9030
9031             type = TYP_I_IMPL;
9032         }
9033         else
9034         {
9035             // byref - [native] int => gives a byref
9036             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9037
9038 #ifdef _TARGET_64BIT_
9039             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9040             {
9041                 // insert an explicit upcast
9042                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9043             }
9044 #endif // _TARGET_64BIT_
9045
9046             type = TYP_BYREF;
9047         }
9048     }
9049     else if ((oper == GT_ADD) &&
9050              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9051     {
9052         // byref + [native] int => gives a byref
9053         // (or)
9054         // [native] int + byref => gives a byref
9055
9056         // only one can be a byref : byref op byref not allowed
9057         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9058         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9059
9060 #ifdef _TARGET_64BIT_
9061         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9062         {
9063             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9064             {
9065                 // insert an explicit upcast
9066                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9067             }
9068         }
9069         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9070         {
9071             // insert an explicit upcast
9072             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9073         }
9074 #endif // _TARGET_64BIT_
9075
9076         type = TYP_BYREF;
9077     }
9078 #ifdef _TARGET_64BIT_
9079     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9080     {
9081         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9082
9083         // int + long => gives long
9084         // long + int => gives long
9085         // we get this because in the IL the long isn't Int64, it's just IntPtr
9086
9087         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9088         {
9089             // insert an explicit upcast
9090             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9091         }
9092         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9093         {
9094             // insert an explicit upcast
9095             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9096         }
9097
9098         type = TYP_I_IMPL;
9099     }
9100 #else  // 32-bit TARGET
9101     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9102     {
9103         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9104
9105         // int + long => gives long
9106         // long + int => gives long
9107
9108         type = TYP_LONG;
9109     }
9110 #endif // _TARGET_64BIT_
9111     else
9112     {
9113         // int + int => gives an int
9114         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9115
9116         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9117                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9118
9119         type = genActualType(op1->gtType);
9120
9121 #if FEATURE_X87_DOUBLES
9122
9123         // For x87, since we only have 1 size of registers, prefer double
9124         // For everybody else, be more precise
9125         if (type == TYP_FLOAT)
9126             type = TYP_DOUBLE;
9127
9128 #else // !FEATURE_X87_DOUBLES
9129
9130         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9131         // Otherwise, turn floats into doubles
9132         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9133         {
9134             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9135             type = TYP_DOUBLE;
9136         }
9137
9138 #endif // FEATURE_X87_DOUBLES
9139     }
9140
9141 #if FEATURE_X87_DOUBLES
9142     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9143 #else  // FEATURE_X87_DOUBLES
9144     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9145 #endif // FEATURE_X87_DOUBLES
9146
9147     return type;
9148 }
9149
9150 /*****************************************************************************
9151  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9152  *
9153  * typeRef contains the token, op1 to contain the value being cast,
9154  * and op2 to contain code that creates the type handle corresponding to typeRef
9155  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9156  */
9157 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9158                                                 GenTreePtr              op2,
9159                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9160                                                 bool                    isCastClass)
9161 {
9162     bool expandInline;
9163
9164     assert(op1->TypeGet() == TYP_REF);
9165
9166     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9167
9168     if (isCastClass)
9169     {
9170         // We only want to expand inline the normal CHKCASTCLASS helper;
9171         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9172     }
9173     else
9174     {
9175         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9176         {
9177             // Get the Class Handle abd class attributes for the type we are casting to
9178             //
9179             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9180
9181             //
9182             // If the class handle is marked as final we can also expand the IsInst check inline
9183             //
9184             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9185
9186             //
9187             // But don't expand inline these two cases
9188             //
9189             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9190             {
9191                 expandInline = false;
9192             }
9193             else if (flags & CORINFO_FLG_CONTEXTFUL)
9194             {
9195                 expandInline = false;
9196             }
9197         }
9198         else
9199         {
9200             //
9201             // We can't expand inline any other helpers
9202             //
9203             expandInline = false;
9204         }
9205     }
9206
9207     if (expandInline)
9208     {
9209         if (compCurBB->isRunRarely())
9210         {
9211             expandInline = false; // not worth the code expansion in a rarely run block
9212         }
9213
9214         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9215         {
9216             expandInline = false; // not worth creating an untracked local variable
9217         }
9218     }
9219
9220     if (!expandInline)
9221     {
9222         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9223         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9224         //
9225         op2->gtFlags |= GTF_DONT_CSE;
9226
9227         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9228     }
9229
9230     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9231
9232     GenTreePtr temp;
9233     GenTreePtr condMT;
9234     //
9235     // expand the methodtable match:
9236     //
9237     //  condMT ==>   GT_NE
9238     //               /    \
9239     //           GT_IND   op2 (typically CNS_INT)
9240     //              |
9241     //           op1Copy
9242     //
9243
9244     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9245     //
9246     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9247     //
9248     // op1 is now known to be a non-complex tree
9249     // thus we can use gtClone(op1) from now on
9250     //
9251
9252     GenTreePtr op2Var = op2;
9253     if (isCastClass)
9254     {
9255         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9256         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9257     }
9258     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9259     temp->gtFlags |= GTF_EXCEPT;
9260     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9261
9262     GenTreePtr condNull;
9263     //
9264     // expand the null check:
9265     //
9266     //  condNull ==>   GT_EQ
9267     //                 /    \
9268     //             op1Copy CNS_INT
9269     //                      null
9270     //
9271     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9272
9273     //
9274     // expand the true and false trees for the condMT
9275     //
9276     GenTreePtr condFalse = gtClone(op1);
9277     GenTreePtr condTrue;
9278     if (isCastClass)
9279     {
9280         //
9281         // use the special helper that skips the cases checked by our inlined cast
9282         //
9283         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9284
9285         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9286     }
9287     else
9288     {
9289         condTrue = gtNewIconNode(0, TYP_REF);
9290     }
9291
9292 #define USE_QMARK_TREES
9293
9294 #ifdef USE_QMARK_TREES
9295     GenTreePtr qmarkMT;
9296     //
9297     // Generate first QMARK - COLON tree
9298     //
9299     //  qmarkMT ==>   GT_QMARK
9300     //                 /     \
9301     //            condMT   GT_COLON
9302     //                      /     \
9303     //                condFalse  condTrue
9304     //
9305     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9306     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9307     condMT->gtFlags |= GTF_RELOP_QMARK;
9308
9309     GenTreePtr qmarkNull;
9310     //
9311     // Generate second QMARK - COLON tree
9312     //
9313     //  qmarkNull ==>  GT_QMARK
9314     //                 /     \
9315     //           condNull  GT_COLON
9316     //                      /     \
9317     //                qmarkMT   op1Copy
9318     //
9319     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9320     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9321     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9322     condNull->gtFlags |= GTF_RELOP_QMARK;
9323
9324     // Make QMark node a top level node by spilling it.
9325     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9326     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9327     return gtNewLclvNode(tmp, TYP_REF);
9328 #endif
9329 }
9330
9331 #ifndef DEBUG
9332 #define assertImp(cond) ((void)0)
9333 #else
9334 #define assertImp(cond)                                                                                                \
9335     do                                                                                                                 \
9336     {                                                                                                                  \
9337         if (!(cond))                                                                                                   \
9338         {                                                                                                              \
9339             const int cchAssertImpBuf = 600;                                                                           \
9340             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9341             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9342                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9343                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9344                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9345             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9346         }                                                                                                              \
9347     } while (0)
9348 #endif // DEBUG
9349
9350 #ifdef _PREFAST_
9351 #pragma warning(push)
9352 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9353 #endif
9354 /*****************************************************************************
9355  *  Import the instr for the given basic block
9356  */
9357 void Compiler::impImportBlockCode(BasicBlock* block)
9358 {
9359 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9360
9361 #ifdef DEBUG
9362
9363     if (verbose)
9364     {
9365         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9366     }
9367 #endif
9368
9369     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9370     IL_OFFSET nxtStmtOffs;
9371
9372     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9373     bool                         expandInline;
9374     CorInfoHelpFunc              helper;
9375     CorInfoIsAccessAllowedResult accessAllowedResult;
9376     CORINFO_HELPER_DESC          calloutHelper;
9377     const BYTE*                  lastLoadToken = nullptr;
9378
9379     // reject cyclic constraints
9380     if (tiVerificationNeeded)
9381     {
9382         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9383         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9384     }
9385
9386     /* Get the tree list started */
9387
9388     impBeginTreeList();
9389
9390     /* Walk the opcodes that comprise the basic block */
9391
9392     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9393     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9394
9395     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9396     IL_OFFSET lastSpillOffs = opcodeOffs;
9397
9398     signed jmpDist;
9399
9400     /* remember the start of the delegate creation sequence (used for verification) */
9401     const BYTE* delegateCreateStart = nullptr;
9402
9403     int  prefixFlags = 0;
9404     bool explicitTailCall, constraintCall, readonlyCall;
9405
9406     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9407     typeInfo tiRetVal;
9408
9409     unsigned numArgs = info.compArgsCount;
9410
9411     /* Now process all the opcodes in the block */
9412
9413     var_types callTyp    = TYP_COUNT;
9414     OPCODE    prevOpcode = CEE_ILLEGAL;
9415
9416     if (block->bbCatchTyp)
9417     {
9418         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9419         {
9420             impCurStmtOffsSet(block->bbCodeOffs);
9421         }
9422
9423         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9424         // to a temp. This is a trade off for code simplicity
9425         impSpillSpecialSideEff();
9426     }
9427
9428     while (codeAddr < codeEndp)
9429     {
9430         bool                   usingReadyToRunHelper = false;
9431         CORINFO_RESOLVED_TOKEN resolvedToken;
9432         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9433         CORINFO_CALL_INFO      callInfo;
9434         CORINFO_FIELD_INFO     fieldInfo;
9435
9436         tiRetVal = typeInfo(); // Default type info
9437
9438         //---------------------------------------------------------------------
9439
9440         /* We need to restrict the max tree depth as many of the Compiler
9441            functions are recursive. We do this by spilling the stack */
9442
9443         if (verCurrentState.esStackDepth)
9444         {
9445             /* Has it been a while since we last saw a non-empty stack (which
9446                guarantees that the tree depth isnt accumulating. */
9447
9448             if ((opcodeOffs - lastSpillOffs) > 200)
9449             {
9450                 impSpillStackEnsure();
9451                 lastSpillOffs = opcodeOffs;
9452             }
9453         }
9454         else
9455         {
9456             lastSpillOffs   = opcodeOffs;
9457             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9458         }
9459
9460         /* Compute the current instr offset */
9461
9462         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9463
9464 #ifndef DEBUG
9465         if (opts.compDbgInfo)
9466 #endif
9467         {
9468             if (!compIsForInlining())
9469             {
9470                 nxtStmtOffs =
9471                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9472
9473                 /* Have we reached the next stmt boundary ? */
9474
9475                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9476                 {
9477                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9478
9479                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9480                     {
9481                         /* We need to provide accurate IP-mapping at this point.
9482                            So spill anything on the stack so that it will form
9483                            gtStmts with the correct stmt offset noted */
9484
9485                         impSpillStackEnsure(true);
9486                     }
9487
9488                     // Has impCurStmtOffs been reported in any tree?
9489
9490                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9491                     {
9492                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9493                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9494
9495                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9496                     }
9497
9498                     if (impCurStmtOffs == BAD_IL_OFFSET)
9499                     {
9500                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9501                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9502
9503                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9504                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9505                         {
9506                             nxtStmtIndex++;
9507                         }
9508
9509                         /* Go to the new stmt */
9510
9511                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9512
9513                         /* Update the stmt boundary index */
9514
9515                         nxtStmtIndex++;
9516                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9517
9518                         /* Are there any more line# entries after this one? */
9519
9520                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9521                         {
9522                             /* Remember where the next line# starts */
9523
9524                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9525                         }
9526                         else
9527                         {
9528                             /* No more line# entries */
9529
9530                             nxtStmtOffs = BAD_IL_OFFSET;
9531                         }
9532                     }
9533                 }
9534                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9535                          (verCurrentState.esStackDepth == 0))
9536                 {
9537                     /* At stack-empty locations, we have already added the tree to
9538                        the stmt list with the last offset. We just need to update
9539                        impCurStmtOffs
9540                      */
9541
9542                     impCurStmtOffsSet(opcodeOffs);
9543                 }
9544                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9545                          impOpcodeIsCallSiteBoundary(prevOpcode))
9546                 {
9547                     /* Make sure we have a type cached */
9548                     assert(callTyp != TYP_COUNT);
9549
9550                     if (callTyp == TYP_VOID)
9551                     {
9552                         impCurStmtOffsSet(opcodeOffs);
9553                     }
9554                     else if (opts.compDbgCode)
9555                     {
9556                         impSpillStackEnsure(true);
9557                         impCurStmtOffsSet(opcodeOffs);
9558                     }
9559                 }
9560                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9561                 {
9562                     if (opts.compDbgCode)
9563                     {
9564                         impSpillStackEnsure(true);
9565                     }
9566
9567                     impCurStmtOffsSet(opcodeOffs);
9568                 }
9569
9570                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9571                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9572             }
9573         }
9574
9575         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9576         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9577         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9578
9579         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9580         GenTreePtr      op1           = DUMMY_INIT(NULL);
9581         GenTreePtr      op2           = DUMMY_INIT(NULL);
9582         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9583         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9584         bool            uns           = DUMMY_INIT(false);
9585
9586         /* Get the next opcode and the size of its parameters */
9587
9588         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9589         codeAddr += sizeof(__int8);
9590
9591 #ifdef DEBUG
9592         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9593         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9594 #endif
9595
9596     DECODE_OPCODE:
9597
9598         // Return if any previous code has caused inline to fail.
9599         if (compDonotInline())
9600         {
9601             return;
9602         }
9603
9604         /* Get the size of additional parameters */
9605
9606         signed int sz = opcodeSizes[opcode];
9607
9608 #ifdef DEBUG
9609         clsHnd  = NO_CLASS_HANDLE;
9610         lclTyp  = TYP_COUNT;
9611         callTyp = TYP_COUNT;
9612
9613         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9614         impCurOpcName = opcodeNames[opcode];
9615
9616         if (verbose && (opcode != CEE_PREFIX1))
9617         {
9618             printf("%s", impCurOpcName);
9619         }
9620
9621         /* Use assertImp() to display the opcode */
9622
9623         op1 = op2 = nullptr;
9624 #endif
9625
9626         /* See what kind of an opcode we have, then */
9627
9628         unsigned mflags   = 0;
9629         unsigned clsFlags = 0;
9630
9631         switch (opcode)
9632         {
9633             unsigned  lclNum;
9634             var_types type;
9635
9636             GenTreePtr op3;
9637             genTreeOps oper;
9638             unsigned   size;
9639
9640             int val;
9641
9642             CORINFO_SIG_INFO     sig;
9643             unsigned             flags;
9644             IL_OFFSET            jmpAddr;
9645             bool                 ovfl, unordered, callNode;
9646             bool                 ldstruct;
9647             CORINFO_CLASS_HANDLE tokenType;
9648
9649             union {
9650                 int     intVal;
9651                 float   fltVal;
9652                 __int64 lngVal;
9653                 double  dblVal;
9654             } cval;
9655
9656             case CEE_PREFIX1:
9657                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9658                 codeAddr += sizeof(__int8);
9659                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9660                 goto DECODE_OPCODE;
9661
9662             SPILL_APPEND:
9663
9664                 // We need to call impSpillLclRefs() for a struct type lclVar.
9665                 // This is done for non-block assignments in the handling of stloc.
9666                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9667                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9668                 {
9669                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9670                 }
9671
9672                 /* Append 'op1' to the list of statements */
9673                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9674                 goto DONE_APPEND;
9675
9676             APPEND:
9677
9678                 /* Append 'op1' to the list of statements */
9679
9680                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9681                 goto DONE_APPEND;
9682
9683             DONE_APPEND:
9684
9685 #ifdef DEBUG
9686                 // Remember at which BC offset the tree was finished
9687                 impNoteLastILoffs();
9688 #endif
9689                 break;
9690
9691             case CEE_LDNULL:
9692                 impPushNullObjRefOnStack();
9693                 break;
9694
9695             case CEE_LDC_I4_M1:
9696             case CEE_LDC_I4_0:
9697             case CEE_LDC_I4_1:
9698             case CEE_LDC_I4_2:
9699             case CEE_LDC_I4_3:
9700             case CEE_LDC_I4_4:
9701             case CEE_LDC_I4_5:
9702             case CEE_LDC_I4_6:
9703             case CEE_LDC_I4_7:
9704             case CEE_LDC_I4_8:
9705                 cval.intVal = (opcode - CEE_LDC_I4_0);
9706                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9707                 goto PUSH_I4CON;
9708
9709             case CEE_LDC_I4_S:
9710                 cval.intVal = getI1LittleEndian(codeAddr);
9711                 goto PUSH_I4CON;
9712             case CEE_LDC_I4:
9713                 cval.intVal = getI4LittleEndian(codeAddr);
9714                 goto PUSH_I4CON;
9715             PUSH_I4CON:
9716                 JITDUMP(" %d", cval.intVal);
9717                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9718                 break;
9719
9720             case CEE_LDC_I8:
9721                 cval.lngVal = getI8LittleEndian(codeAddr);
9722                 JITDUMP(" 0x%016llx", cval.lngVal);
9723                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9724                 break;
9725
9726             case CEE_LDC_R8:
9727                 cval.dblVal = getR8LittleEndian(codeAddr);
9728                 JITDUMP(" %#.17g", cval.dblVal);
9729                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9730                 break;
9731
9732             case CEE_LDC_R4:
9733                 cval.dblVal = getR4LittleEndian(codeAddr);
9734                 JITDUMP(" %#.17g", cval.dblVal);
9735                 {
9736                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9737 #if !FEATURE_X87_DOUBLES
9738                     // X87 stack doesn't differentiate between float/double
9739                     // so R4 is treated as R8, but everybody else does
9740                     cnsOp->gtType = TYP_FLOAT;
9741 #endif // FEATURE_X87_DOUBLES
9742                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9743                 }
9744                 break;
9745
9746             case CEE_LDSTR:
9747
9748                 if (compIsForInlining())
9749                 {
9750                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9751                     {
9752                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9753                         return;
9754                     }
9755                 }
9756
9757                 val = getU4LittleEndian(codeAddr);
9758                 JITDUMP(" %08X", val);
9759                 if (tiVerificationNeeded)
9760                 {
9761                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9762                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9763                 }
9764                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9765
9766                 break;
9767
9768             case CEE_LDARG:
9769                 lclNum = getU2LittleEndian(codeAddr);
9770                 JITDUMP(" %u", lclNum);
9771                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9772                 break;
9773
9774             case CEE_LDARG_S:
9775                 lclNum = getU1LittleEndian(codeAddr);
9776                 JITDUMP(" %u", lclNum);
9777                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9778                 break;
9779
9780             case CEE_LDARG_0:
9781             case CEE_LDARG_1:
9782             case CEE_LDARG_2:
9783             case CEE_LDARG_3:
9784                 lclNum = (opcode - CEE_LDARG_0);
9785                 assert(lclNum >= 0 && lclNum < 4);
9786                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9787                 break;
9788
9789             case CEE_LDLOC:
9790                 lclNum = getU2LittleEndian(codeAddr);
9791                 JITDUMP(" %u", lclNum);
9792                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9793                 break;
9794
9795             case CEE_LDLOC_S:
9796                 lclNum = getU1LittleEndian(codeAddr);
9797                 JITDUMP(" %u", lclNum);
9798                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9799                 break;
9800
9801             case CEE_LDLOC_0:
9802             case CEE_LDLOC_1:
9803             case CEE_LDLOC_2:
9804             case CEE_LDLOC_3:
9805                 lclNum = (opcode - CEE_LDLOC_0);
9806                 assert(lclNum >= 0 && lclNum < 4);
9807                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9808                 break;
9809
9810             case CEE_STARG:
9811                 lclNum = getU2LittleEndian(codeAddr);
9812                 goto STARG;
9813
9814             case CEE_STARG_S:
9815                 lclNum = getU1LittleEndian(codeAddr);
9816             STARG:
9817                 JITDUMP(" %u", lclNum);
9818
9819                 if (tiVerificationNeeded)
9820                 {
9821                     Verify(lclNum < info.compILargsCount, "bad arg num");
9822                 }
9823
9824                 if (compIsForInlining())
9825                 {
9826                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9827                     noway_assert(op1->gtOper == GT_LCL_VAR);
9828                     lclNum = op1->AsLclVar()->gtLclNum;
9829
9830                     goto VAR_ST_VALID;
9831                 }
9832
9833                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9834                 assertImp(lclNum < numArgs);
9835
9836                 if (lclNum == info.compThisArg)
9837                 {
9838                     lclNum = lvaArg0Var;
9839                 }
9840                 lvaTable[lclNum].lvArgWrite = 1;
9841
9842                 if (tiVerificationNeeded)
9843                 {
9844                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9845                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9846                            "type mismatch");
9847
9848                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9849                     {
9850                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9851                     }
9852                 }
9853
9854                 goto VAR_ST;
9855
9856             case CEE_STLOC:
9857                 lclNum = getU2LittleEndian(codeAddr);
9858                 JITDUMP(" %u", lclNum);
9859                 goto LOC_ST;
9860
9861             case CEE_STLOC_S:
9862                 lclNum = getU1LittleEndian(codeAddr);
9863                 JITDUMP(" %u", lclNum);
9864                 goto LOC_ST;
9865
9866             case CEE_STLOC_0:
9867             case CEE_STLOC_1:
9868             case CEE_STLOC_2:
9869             case CEE_STLOC_3:
9870                 lclNum = (opcode - CEE_STLOC_0);
9871                 assert(lclNum >= 0 && lclNum < 4);
9872
9873             LOC_ST:
9874                 if (tiVerificationNeeded)
9875                 {
9876                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9877                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9878                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9879                            "type mismatch");
9880                 }
9881
9882                 if (compIsForInlining())
9883                 {
9884                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9885
9886                     /* Have we allocated a temp for this local? */
9887
9888                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9889
9890                     goto _PopValue;
9891                 }
9892
9893                 lclNum += numArgs;
9894
9895             VAR_ST:
9896
9897                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9898                 {
9899                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9900                     BADCODE("Bad IL");
9901                 }
9902
9903             VAR_ST_VALID:
9904
9905                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9906                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9907
9908                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9909                 {
9910                     lclTyp = lvaGetRealType(lclNum);
9911                 }
9912                 else
9913                 {
9914                     lclTyp = lvaGetActualType(lclNum);
9915                 }
9916
9917             _PopValue:
9918                 /* Pop the value being assigned */
9919
9920                 {
9921                     StackEntry se = impPopStack(clsHnd);
9922                     op1           = se.val;
9923                     tiRetVal      = se.seTypeInfo;
9924                 }
9925
9926 #ifdef FEATURE_SIMD
9927                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9928                 {
9929                     assert(op1->TypeGet() == TYP_STRUCT);
9930                     op1->gtType = lclTyp;
9931                 }
9932 #endif // FEATURE_SIMD
9933
9934                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9935
9936 #ifdef _TARGET_64BIT_
9937                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9938                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9939                 {
9940                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9941                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9942                 }
9943 #endif // _TARGET_64BIT_
9944
9945                 // We had better assign it a value of the correct type
9946                 assertImp(
9947                     genActualType(lclTyp) == genActualType(op1->gtType) ||
9948                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9949                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9950                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9951                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9952                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9953
9954                 /* If op1 is "&var" then its type is the transient "*" and it can
9955                    be used either as TYP_BYREF or TYP_I_IMPL */
9956
9957                 if (op1->IsVarAddr())
9958                 {
9959                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9960
9961                     /* When "&var" is created, we assume it is a byref. If it is
9962                        being assigned to a TYP_I_IMPL var, change the type to
9963                        prevent unnecessary GC info */
9964
9965                     if (genActualType(lclTyp) == TYP_I_IMPL)
9966                     {
9967                         op1->gtType = TYP_I_IMPL;
9968                     }
9969                 }
9970
9971                 /* Filter out simple assignments to itself */
9972
9973                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9974                 {
9975                     if (insertLdloc)
9976                     {
9977                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
9978                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
9979                         CLANG_FORMAT_COMMENT_ANCHOR;
9980
9981 #ifdef DEBUG
9982                         if (tiVerificationNeeded)
9983                         {
9984                             assert(
9985                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9986                         }
9987 #endif
9988
9989                         op1         = nullptr;
9990                         insertLdloc = false;
9991
9992                         impLoadVar(lclNum, opcodeOffs + sz + 1);
9993                         break;
9994                     }
9995                     else if (opts.compDbgCode)
9996                     {
9997                         op1 = gtNewNothingNode();
9998                         goto SPILL_APPEND;
9999                     }
10000                     else
10001                     {
10002                         break;
10003                     }
10004                 }
10005
10006                 /* Create the assignment node */
10007
10008                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10009
10010                 /* If the local is aliased, we need to spill calls and
10011                    indirections from the stack. */
10012
10013                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10014                     verCurrentState.esStackDepth > 0)
10015                 {
10016                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10017                 }
10018
10019                 /* Spill any refs to the local from the stack */
10020
10021                 impSpillLclRefs(lclNum);
10022
10023 #if !FEATURE_X87_DOUBLES
10024                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10025                 // We insert a cast to the dest 'op2' type
10026                 //
10027                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10028                     varTypeIsFloating(op2->gtType))
10029                 {
10030                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10031                 }
10032 #endif // !FEATURE_X87_DOUBLES
10033
10034                 if (varTypeIsStruct(lclTyp))
10035                 {
10036                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10037                 }
10038                 else
10039                 {
10040                     // The code generator generates GC tracking information
10041                     // based on the RHS of the assignment.  Later the LHS (which is
10042                     // is a BYREF) gets used and the emitter checks that that variable
10043                     // is being tracked.  It is not (since the RHS was an int and did
10044                     // not need tracking).  To keep this assert happy, we change the RHS
10045                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10046                     {
10047                         op1->gtType = TYP_BYREF;
10048                     }
10049                     op1 = gtNewAssignNode(op2, op1);
10050                 }
10051
10052                 /* If insertLdloc is true, then we need to insert a ldloc following the
10053                    stloc.  This is done when converting a (dup, stloc) sequence into
10054                    a (stloc, ldloc) sequence. */
10055
10056                 if (insertLdloc)
10057                 {
10058                     // From SPILL_APPEND
10059                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10060
10061 #ifdef DEBUG
10062                     // From DONE_APPEND
10063                     impNoteLastILoffs();
10064 #endif
10065                     op1         = nullptr;
10066                     insertLdloc = false;
10067
10068                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10069                     break;
10070                 }
10071
10072                 goto SPILL_APPEND;
10073
10074             case CEE_LDLOCA:
10075                 lclNum = getU2LittleEndian(codeAddr);
10076                 goto LDLOCA;
10077
10078             case CEE_LDLOCA_S:
10079                 lclNum = getU1LittleEndian(codeAddr);
10080             LDLOCA:
10081                 JITDUMP(" %u", lclNum);
10082                 if (tiVerificationNeeded)
10083                 {
10084                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10085                     Verify(info.compInitMem, "initLocals not set");
10086                 }
10087
10088                 if (compIsForInlining())
10089                 {
10090                     // Get the local type
10091                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10092
10093                     /* Have we allocated a temp for this local? */
10094
10095                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10096
10097                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10098
10099                     goto _PUSH_ADRVAR;
10100                 }
10101
10102                 lclNum += numArgs;
10103                 assertImp(lclNum < info.compLocalsCount);
10104                 goto ADRVAR;
10105
10106             case CEE_LDARGA:
10107                 lclNum = getU2LittleEndian(codeAddr);
10108                 goto LDARGA;
10109
10110             case CEE_LDARGA_S:
10111                 lclNum = getU1LittleEndian(codeAddr);
10112             LDARGA:
10113                 JITDUMP(" %u", lclNum);
10114                 Verify(lclNum < info.compILargsCount, "bad arg num");
10115
10116                 if (compIsForInlining())
10117                 {
10118                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10119                     // followed by a ldfld to load the field.
10120
10121                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10122                     if (op1->gtOper != GT_LCL_VAR)
10123                     {
10124                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10125                         return;
10126                     }
10127
10128                     assert(op1->gtOper == GT_LCL_VAR);
10129
10130                     goto _PUSH_ADRVAR;
10131                 }
10132
10133                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10134                 assertImp(lclNum < numArgs);
10135
10136                 if (lclNum == info.compThisArg)
10137                 {
10138                     lclNum = lvaArg0Var;
10139                 }
10140
10141                 goto ADRVAR;
10142
10143             ADRVAR:
10144
10145                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10146
10147             _PUSH_ADRVAR:
10148                 assert(op1->gtOper == GT_LCL_VAR);
10149
10150                 /* Note that this is supposed to create the transient type "*"
10151                    which may be used as a TYP_I_IMPL. However we catch places
10152                    where it is used as a TYP_I_IMPL and change the node if needed.
10153                    Thus we are pessimistic and may report byrefs in the GC info
10154                    where it was not absolutely needed, but it is safer this way.
10155                  */
10156                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10157
10158                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10159                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10160
10161                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10162                 if (tiVerificationNeeded)
10163                 {
10164                     // Don't allow taking address of uninit this ptr.
10165                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10166                     {
10167                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10168                     }
10169
10170                     if (!tiRetVal.IsByRef())
10171                     {
10172                         tiRetVal.MakeByRef();
10173                     }
10174                     else
10175                     {
10176                         Verify(false, "byref to byref");
10177                     }
10178                 }
10179
10180                 impPushOnStack(op1, tiRetVal);
10181                 break;
10182
10183             case CEE_ARGLIST:
10184
10185                 if (!info.compIsVarArgs)
10186                 {
10187                     BADCODE("arglist in non-vararg method");
10188                 }
10189
10190                 if (tiVerificationNeeded)
10191                 {
10192                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10193                 }
10194                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10195
10196                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10197                    adjusted the arg count cos this is like fetching the last param */
10198                 assertImp(0 < numArgs);
10199                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10200                 lclNum = lvaVarargsHandleArg;
10201                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10202                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10203                 impPushOnStack(op1, tiRetVal);
10204                 break;
10205
10206             case CEE_ENDFINALLY:
10207
10208                 if (compIsForInlining())
10209                 {
10210                     assert(!"Shouldn't have exception handlers in the inliner!");
10211                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10212                     return;
10213                 }
10214
10215                 if (verCurrentState.esStackDepth > 0)
10216                 {
10217                     impEvalSideEffects();
10218                 }
10219
10220                 if (info.compXcptnsCount == 0)
10221                 {
10222                     BADCODE("endfinally outside finally");
10223                 }
10224
10225                 assert(verCurrentState.esStackDepth == 0);
10226
10227                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10228                 goto APPEND;
10229
10230             case CEE_ENDFILTER:
10231
10232                 if (compIsForInlining())
10233                 {
10234                     assert(!"Shouldn't have exception handlers in the inliner!");
10235                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10236                     return;
10237                 }
10238
10239                 block->bbSetRunRarely(); // filters are rare
10240
10241                 if (info.compXcptnsCount == 0)
10242                 {
10243                     BADCODE("endfilter outside filter");
10244                 }
10245
10246                 if (tiVerificationNeeded)
10247                 {
10248                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10249                 }
10250
10251                 op1 = impPopStack().val;
10252                 assertImp(op1->gtType == TYP_INT);
10253                 if (!bbInFilterILRange(block))
10254                 {
10255                     BADCODE("EndFilter outside a filter handler");
10256                 }
10257
10258                 /* Mark current bb as end of filter */
10259
10260                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10261                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10262
10263                 /* Mark catch handler as successor */
10264
10265                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10266                 if (verCurrentState.esStackDepth != 0)
10267                 {
10268                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10269                                                 DEBUGARG(__LINE__));
10270                 }
10271                 goto APPEND;
10272
10273             case CEE_RET:
10274                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10275             RET:
10276                 if (!impReturnInstruction(block, prefixFlags, opcode))
10277                 {
10278                     return; // abort
10279                 }
10280                 else
10281                 {
10282                     break;
10283                 }
10284
10285             case CEE_JMP:
10286
10287                 assert(!compIsForInlining());
10288
10289                 if (tiVerificationNeeded)
10290                 {
10291                     Verify(false, "Invalid opcode: CEE_JMP");
10292                 }
10293
10294                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10295                 {
10296                     /* CEE_JMP does not make sense in some "protected" regions. */
10297
10298                     BADCODE("Jmp not allowed in protected region");
10299                 }
10300
10301                 if (verCurrentState.esStackDepth != 0)
10302                 {
10303                     BADCODE("Stack must be empty after CEE_JMPs");
10304                 }
10305
10306                 _impResolveToken(CORINFO_TOKENKIND_Method);
10307
10308                 JITDUMP(" %08X", resolvedToken.token);
10309
10310                 /* The signature of the target has to be identical to ours.
10311                    At least check that argCnt and returnType match */
10312
10313                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10314                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10315                     sig.retType != info.compMethodInfo->args.retType ||
10316                     sig.callConv != info.compMethodInfo->args.callConv)
10317                 {
10318                     BADCODE("Incompatible target for CEE_JMPs");
10319                 }
10320
10321 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10322
10323                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10324
10325                 /* Mark the basic block as being a JUMP instead of RETURN */
10326
10327                 block->bbFlags |= BBF_HAS_JMP;
10328
10329                 /* Set this flag to make sure register arguments have a location assigned
10330                  * even if we don't use them inside the method */
10331
10332                 compJmpOpUsed = true;
10333
10334                 fgNoStructPromotion = true;
10335
10336                 goto APPEND;
10337
10338 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10339
10340                 // Import this just like a series of LDARGs + tail. + call + ret
10341
10342                 if (info.compIsVarArgs)
10343                 {
10344                     // For now we don't implement true tail calls, so this breaks varargs.
10345                     // So warn the user instead of generating bad code.
10346                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10347                     // implement true tail calls.
10348                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10349                 }
10350
10351                 // First load up the arguments (0 - N)
10352                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10353                 {
10354                     impLoadArg(argNum, opcodeOffs + sz + 1);
10355                 }
10356
10357                 // Now generate the tail call
10358                 noway_assert(prefixFlags == 0);
10359                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10360                 opcode      = CEE_CALL;
10361
10362                 eeGetCallInfo(&resolvedToken, NULL,
10363                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10364
10365                 // All calls and delegates need a security callout.
10366                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10367
10368                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10369                                         opcodeOffs);
10370
10371                 // And finish with the ret
10372                 goto RET;
10373
10374 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10375
10376             case CEE_LDELEMA:
10377                 assertImp(sz == sizeof(unsigned));
10378
10379                 _impResolveToken(CORINFO_TOKENKIND_Class);
10380
10381                 JITDUMP(" %08X", resolvedToken.token);
10382
10383                 ldelemClsHnd = resolvedToken.hClass;
10384
10385                 if (tiVerificationNeeded)
10386                 {
10387                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10388                     typeInfo tiIndex = impStackTop().seTypeInfo;
10389
10390                     // As per ECMA 'index' specified can be either int32 or native int.
10391                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10392
10393                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10394                     Verify(tiArray.IsNullObjRef() ||
10395                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10396                            "bad array");
10397
10398                     tiRetVal = arrayElemType;
10399                     tiRetVal.MakeByRef();
10400                     if (prefixFlags & PREFIX_READONLY)
10401                     {
10402                         tiRetVal.SetIsReadonlyByRef();
10403                     }
10404
10405                     // an array interior pointer is always in the heap
10406                     tiRetVal.SetIsPermanentHomeByRef();
10407                 }
10408
10409                 // If it's a value class array we just do a simple address-of
10410                 if (eeIsValueClass(ldelemClsHnd))
10411                 {
10412                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10413                     if (cit == CORINFO_TYPE_UNDEF)
10414                     {
10415                         lclTyp = TYP_STRUCT;
10416                     }
10417                     else
10418                     {
10419                         lclTyp = JITtype2varType(cit);
10420                     }
10421                     goto ARR_LD_POST_VERIFY;
10422                 }
10423
10424                 // Similarly, if its a readonly access, we can do a simple address-of
10425                 // without doing a runtime type-check
10426                 if (prefixFlags & PREFIX_READONLY)
10427                 {
10428                     lclTyp = TYP_REF;
10429                     goto ARR_LD_POST_VERIFY;
10430                 }
10431
10432                 // Otherwise we need the full helper function with run-time type check
10433                 op1 = impTokenToHandle(&resolvedToken);
10434                 if (op1 == nullptr)
10435                 { // compDonotInline()
10436                     return;
10437                 }
10438
10439                 args = gtNewArgList(op1);                      // Type
10440                 args = gtNewListNode(impPopStack().val, args); // index
10441                 args = gtNewListNode(impPopStack().val, args); // array
10442                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10443
10444                 impPushOnStack(op1, tiRetVal);
10445                 break;
10446
10447             // ldelem for reference and value types
10448             case CEE_LDELEM:
10449                 assertImp(sz == sizeof(unsigned));
10450
10451                 _impResolveToken(CORINFO_TOKENKIND_Class);
10452
10453                 JITDUMP(" %08X", resolvedToken.token);
10454
10455                 ldelemClsHnd = resolvedToken.hClass;
10456
10457                 if (tiVerificationNeeded)
10458                 {
10459                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10460                     typeInfo tiIndex = impStackTop().seTypeInfo;
10461
10462                     // As per ECMA 'index' specified can be either int32 or native int.
10463                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10464                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10465
10466                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10467                            "type of array incompatible with type operand");
10468                     tiRetVal.NormaliseForStack();
10469                 }
10470
10471                 // If it's a reference type or generic variable type
10472                 // then just generate code as though it's a ldelem.ref instruction
10473                 if (!eeIsValueClass(ldelemClsHnd))
10474                 {
10475                     lclTyp = TYP_REF;
10476                     opcode = CEE_LDELEM_REF;
10477                 }
10478                 else
10479                 {
10480                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10481                     lclTyp             = JITtype2varType(jitTyp);
10482                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10483                     tiRetVal.NormaliseForStack();
10484                 }
10485                 goto ARR_LD_POST_VERIFY;
10486
10487             case CEE_LDELEM_I1:
10488                 lclTyp = TYP_BYTE;
10489                 goto ARR_LD;
10490             case CEE_LDELEM_I2:
10491                 lclTyp = TYP_SHORT;
10492                 goto ARR_LD;
10493             case CEE_LDELEM_I:
10494                 lclTyp = TYP_I_IMPL;
10495                 goto ARR_LD;
10496
10497             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10498             // and treating it as TYP_INT avoids other asserts.
10499             case CEE_LDELEM_U4:
10500                 lclTyp = TYP_INT;
10501                 goto ARR_LD;
10502
10503             case CEE_LDELEM_I4:
10504                 lclTyp = TYP_INT;
10505                 goto ARR_LD;
10506             case CEE_LDELEM_I8:
10507                 lclTyp = TYP_LONG;
10508                 goto ARR_LD;
10509             case CEE_LDELEM_REF:
10510                 lclTyp = TYP_REF;
10511                 goto ARR_LD;
10512             case CEE_LDELEM_R4:
10513                 lclTyp = TYP_FLOAT;
10514                 goto ARR_LD;
10515             case CEE_LDELEM_R8:
10516                 lclTyp = TYP_DOUBLE;
10517                 goto ARR_LD;
10518             case CEE_LDELEM_U1:
10519                 lclTyp = TYP_UBYTE;
10520                 goto ARR_LD;
10521             case CEE_LDELEM_U2:
10522                 lclTyp = TYP_CHAR;
10523                 goto ARR_LD;
10524
10525             ARR_LD:
10526
10527                 if (tiVerificationNeeded)
10528                 {
10529                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10530                     typeInfo tiIndex = impStackTop().seTypeInfo;
10531
10532                     // As per ECMA 'index' specified can be either int32 or native int.
10533                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10534                     if (tiArray.IsNullObjRef())
10535                     {
10536                         if (lclTyp == TYP_REF)
10537                         { // we will say a deref of a null array yields a null ref
10538                             tiRetVal = typeInfo(TI_NULL);
10539                         }
10540                         else
10541                         {
10542                             tiRetVal = typeInfo(lclTyp);
10543                         }
10544                     }
10545                     else
10546                     {
10547                         tiRetVal             = verGetArrayElemType(tiArray);
10548                         typeInfo arrayElemTi = typeInfo(lclTyp);
10549 #ifdef _TARGET_64BIT_
10550                         if (opcode == CEE_LDELEM_I)
10551                         {
10552                             arrayElemTi = typeInfo::nativeInt();
10553                         }
10554
10555                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10556                         {
10557                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10558                         }
10559                         else
10560 #endif // _TARGET_64BIT_
10561                         {
10562                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10563                         }
10564                     }
10565                     tiRetVal.NormaliseForStack();
10566                 }
10567             ARR_LD_POST_VERIFY:
10568
10569                 /* Pull the index value and array address */
10570                 op2 = impPopStack().val;
10571                 op1 = impPopStack().val;
10572                 assertImp(op1->gtType == TYP_REF);
10573
10574                 /* Check for null pointer - in the inliner case we simply abort */
10575
10576                 if (compIsForInlining())
10577                 {
10578                     if (op1->gtOper == GT_CNS_INT)
10579                     {
10580                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10581                         return;
10582                     }
10583                 }
10584
10585                 op1 = impCheckForNullPointer(op1);
10586
10587                 /* Mark the block as containing an index expression */
10588
10589                 if (op1->gtOper == GT_LCL_VAR)
10590                 {
10591                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10592                     {
10593                         block->bbFlags |= BBF_HAS_IDX_LEN;
10594                         optMethodFlags |= OMF_HAS_ARRAYREF;
10595                     }
10596                 }
10597
10598                 /* Create the index node and push it on the stack */
10599
10600                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10601
10602                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10603
10604                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10605                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10606                 {
10607                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10608
10609                     // remember the element size
10610                     if (lclTyp == TYP_REF)
10611                     {
10612                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10613                     }
10614                     else
10615                     {
10616                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10617                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10618                         {
10619                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10620                         }
10621                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10622                         if (lclTyp == TYP_STRUCT)
10623                         {
10624                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10625                             op1->gtIndex.gtIndElemSize = size;
10626                             op1->gtType                = lclTyp;
10627                         }
10628                     }
10629
10630                     if ((opcode == CEE_LDELEMA) || ldstruct)
10631                     {
10632                         // wrap it in a &
10633                         lclTyp = TYP_BYREF;
10634
10635                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10636                     }
10637                     else
10638                     {
10639                         assert(lclTyp != TYP_STRUCT);
10640                     }
10641                 }
10642
10643                 if (ldstruct)
10644                 {
10645                     // Create an OBJ for the result
10646                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10647                     op1->gtFlags |= GTF_EXCEPT;
10648                 }
10649                 impPushOnStack(op1, tiRetVal);
10650                 break;
10651
10652             // stelem for reference and value types
10653             case CEE_STELEM:
10654
10655                 assertImp(sz == sizeof(unsigned));
10656
10657                 _impResolveToken(CORINFO_TOKENKIND_Class);
10658
10659                 JITDUMP(" %08X", resolvedToken.token);
10660
10661                 stelemClsHnd = resolvedToken.hClass;
10662
10663                 if (tiVerificationNeeded)
10664                 {
10665                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10666                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10667                     typeInfo tiValue = impStackTop().seTypeInfo;
10668
10669                     // As per ECMA 'index' specified can be either int32 or native int.
10670                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10671                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10672
10673                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10674                            "type operand incompatible with array element type");
10675                     arrayElem.NormaliseForStack();
10676                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10677                 }
10678
10679                 // If it's a reference type just behave as though it's a stelem.ref instruction
10680                 if (!eeIsValueClass(stelemClsHnd))
10681                 {
10682                     goto STELEM_REF_POST_VERIFY;
10683                 }
10684
10685                 // Otherwise extract the type
10686                 {
10687                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10688                     lclTyp             = JITtype2varType(jitTyp);
10689                     goto ARR_ST_POST_VERIFY;
10690                 }
10691
10692             case CEE_STELEM_REF:
10693
10694                 if (tiVerificationNeeded)
10695                 {
10696                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10697                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10698                     typeInfo tiValue = impStackTop().seTypeInfo;
10699
10700                     // As per ECMA 'index' specified can be either int32 or native int.
10701                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10702                     Verify(tiValue.IsObjRef(), "bad value");
10703
10704                     // we only check that it is an object referece, The helper does additional checks
10705                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10706                 }
10707
10708                 arrayNodeTo      = impStackTop(2).val;
10709                 arrayNodeToIndex = impStackTop(1).val;
10710                 arrayNodeFrom    = impStackTop().val;
10711
10712                 //
10713                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10714                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10715                 //
10716
10717                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10718                 // This does not need CORINFO_HELP_ARRADDR_ST
10719
10720                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10721                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10722                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10723                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10724                 {
10725                     lclTyp = TYP_REF;
10726                     goto ARR_ST_POST_VERIFY;
10727                 }
10728
10729                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10730
10731                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10732                 {
10733                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10734
10735                     lclTyp = TYP_REF;
10736                     goto ARR_ST_POST_VERIFY;
10737                 }
10738
10739             STELEM_REF_POST_VERIFY:
10740
10741                 /* Call a helper function to do the assignment */
10742                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10743
10744                 goto SPILL_APPEND;
10745
10746             case CEE_STELEM_I1:
10747                 lclTyp = TYP_BYTE;
10748                 goto ARR_ST;
10749             case CEE_STELEM_I2:
10750                 lclTyp = TYP_SHORT;
10751                 goto ARR_ST;
10752             case CEE_STELEM_I:
10753                 lclTyp = TYP_I_IMPL;
10754                 goto ARR_ST;
10755             case CEE_STELEM_I4:
10756                 lclTyp = TYP_INT;
10757                 goto ARR_ST;
10758             case CEE_STELEM_I8:
10759                 lclTyp = TYP_LONG;
10760                 goto ARR_ST;
10761             case CEE_STELEM_R4:
10762                 lclTyp = TYP_FLOAT;
10763                 goto ARR_ST;
10764             case CEE_STELEM_R8:
10765                 lclTyp = TYP_DOUBLE;
10766                 goto ARR_ST;
10767
10768             ARR_ST:
10769
10770                 if (tiVerificationNeeded)
10771                 {
10772                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10773                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10774                     typeInfo tiValue = impStackTop().seTypeInfo;
10775
10776                     // As per ECMA 'index' specified can be either int32 or native int.
10777                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10778                     typeInfo arrayElem = typeInfo(lclTyp);
10779 #ifdef _TARGET_64BIT_
10780                     if (opcode == CEE_STELEM_I)
10781                     {
10782                         arrayElem = typeInfo::nativeInt();
10783                     }
10784 #endif // _TARGET_64BIT_
10785                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10786                            "bad array");
10787
10788                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10789                            "bad value");
10790                 }
10791
10792             ARR_ST_POST_VERIFY:
10793                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10794                    range-check, and then assignment. However, codegen currently
10795                    does the range-check before evaluation the RHS-operands. So to
10796                    maintain strict ordering, we spill the stack. */
10797
10798                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10799                 {
10800                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10801                                                    "Strict ordering of exceptions for Array store"));
10802                 }
10803
10804                 /* Pull the new value from the stack */
10805                 op2 = impPopStack().val;
10806
10807                 /* Pull the index value */
10808                 op1 = impPopStack().val;
10809
10810                 /* Pull the array address */
10811                 op3 = impPopStack().val;
10812
10813                 assertImp(op3->gtType == TYP_REF);
10814                 if (op2->IsVarAddr())
10815                 {
10816                     op2->gtType = TYP_I_IMPL;
10817                 }
10818
10819                 op3 = impCheckForNullPointer(op3);
10820
10821                 // Mark the block as containing an index expression
10822
10823                 if (op3->gtOper == GT_LCL_VAR)
10824                 {
10825                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10826                     {
10827                         block->bbFlags |= BBF_HAS_IDX_LEN;
10828                         optMethodFlags |= OMF_HAS_ARRAYREF;
10829                     }
10830                 }
10831
10832                 /* Create the index node */
10833
10834                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10835
10836                 /* Create the assignment node and append it */
10837
10838                 if (lclTyp == TYP_STRUCT)
10839                 {
10840                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10841
10842                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10843                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10844                 }
10845                 if (varTypeIsStruct(op1))
10846                 {
10847                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10848                 }
10849                 else
10850                 {
10851                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10852                     op1 = gtNewAssignNode(op1, op2);
10853                 }
10854
10855                 /* Mark the expression as containing an assignment */
10856
10857                 op1->gtFlags |= GTF_ASG;
10858
10859                 goto SPILL_APPEND;
10860
10861             case CEE_ADD:
10862                 oper = GT_ADD;
10863                 goto MATH_OP2;
10864
10865             case CEE_ADD_OVF:
10866                 uns = false;
10867                 goto ADD_OVF;
10868             case CEE_ADD_OVF_UN:
10869                 uns = true;
10870                 goto ADD_OVF;
10871
10872             ADD_OVF:
10873                 ovfl     = true;
10874                 callNode = false;
10875                 oper     = GT_ADD;
10876                 goto MATH_OP2_FLAGS;
10877
10878             case CEE_SUB:
10879                 oper = GT_SUB;
10880                 goto MATH_OP2;
10881
10882             case CEE_SUB_OVF:
10883                 uns = false;
10884                 goto SUB_OVF;
10885             case CEE_SUB_OVF_UN:
10886                 uns = true;
10887                 goto SUB_OVF;
10888
10889             SUB_OVF:
10890                 ovfl     = true;
10891                 callNode = false;
10892                 oper     = GT_SUB;
10893                 goto MATH_OP2_FLAGS;
10894
10895             case CEE_MUL:
10896                 oper = GT_MUL;
10897                 goto MATH_MAYBE_CALL_NO_OVF;
10898
10899             case CEE_MUL_OVF:
10900                 uns = false;
10901                 goto MUL_OVF;
10902             case CEE_MUL_OVF_UN:
10903                 uns = true;
10904                 goto MUL_OVF;
10905
10906             MUL_OVF:
10907                 ovfl = true;
10908                 oper = GT_MUL;
10909                 goto MATH_MAYBE_CALL_OVF;
10910
10911             // Other binary math operations
10912
10913             case CEE_DIV:
10914                 oper = GT_DIV;
10915                 goto MATH_MAYBE_CALL_NO_OVF;
10916
10917             case CEE_DIV_UN:
10918                 oper = GT_UDIV;
10919                 goto MATH_MAYBE_CALL_NO_OVF;
10920
10921             case CEE_REM:
10922                 oper = GT_MOD;
10923                 goto MATH_MAYBE_CALL_NO_OVF;
10924
10925             case CEE_REM_UN:
10926                 oper = GT_UMOD;
10927                 goto MATH_MAYBE_CALL_NO_OVF;
10928
10929             MATH_MAYBE_CALL_NO_OVF:
10930                 ovfl = false;
10931             MATH_MAYBE_CALL_OVF:
10932                 // Morpher has some complex logic about when to turn different
10933                 // typed nodes on different platforms into helper calls. We
10934                 // need to either duplicate that logic here, or just
10935                 // pessimistically make all the nodes large enough to become
10936                 // call nodes.  Since call nodes aren't that much larger and
10937                 // these opcodes are infrequent enough I chose the latter.
10938                 callNode = true;
10939                 goto MATH_OP2_FLAGS;
10940
10941             case CEE_AND:
10942                 oper = GT_AND;
10943                 goto MATH_OP2;
10944             case CEE_OR:
10945                 oper = GT_OR;
10946                 goto MATH_OP2;
10947             case CEE_XOR:
10948                 oper = GT_XOR;
10949                 goto MATH_OP2;
10950
10951             MATH_OP2: // For default values of 'ovfl' and 'callNode'
10952
10953                 ovfl     = false;
10954                 callNode = false;
10955
10956             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10957
10958                 /* Pull two values and push back the result */
10959
10960                 if (tiVerificationNeeded)
10961                 {
10962                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10963                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10964
10965                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10966                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10967                     {
10968                         Verify(tiOp1.IsNumberType(), "not number");
10969                     }
10970                     else
10971                     {
10972                         Verify(tiOp1.IsIntegerType(), "not integer");
10973                     }
10974
10975                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10976
10977                     tiRetVal = tiOp1;
10978
10979 #ifdef _TARGET_64BIT_
10980                     if (tiOp2.IsNativeIntType())
10981                     {
10982                         tiRetVal = tiOp2;
10983                     }
10984 #endif // _TARGET_64BIT_
10985                 }
10986
10987                 op2 = impPopStack().val;
10988                 op1 = impPopStack().val;
10989
10990 #if !CPU_HAS_FP_SUPPORT
10991                 if (varTypeIsFloating(op1->gtType))
10992                 {
10993                     callNode = true;
10994                 }
10995 #endif
10996                 /* Can't do arithmetic with references */
10997                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10998
10999                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11000                 // if it is in the stack)
11001                 impBashVarAddrsToI(op1, op2);
11002
11003                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11004
11005                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11006
11007                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11008
11009                 if (op2->gtOper == GT_CNS_INT)
11010                 {
11011                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11012                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11013
11014                     {
11015                         impPushOnStack(op1, tiRetVal);
11016                         break;
11017                     }
11018                 }
11019
11020 #if !FEATURE_X87_DOUBLES
11021                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11022                 //
11023                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11024                 {
11025                     if (op1->TypeGet() != type)
11026                     {
11027                         // We insert a cast of op1 to 'type'
11028                         op1 = gtNewCastNode(type, op1, type);
11029                     }
11030                     if (op2->TypeGet() != type)
11031                     {
11032                         // We insert a cast of op2 to 'type'
11033                         op2 = gtNewCastNode(type, op2, type);
11034                     }
11035                 }
11036 #endif // !FEATURE_X87_DOUBLES
11037
11038 #if SMALL_TREE_NODES
11039                 if (callNode)
11040                 {
11041                     /* These operators can later be transformed into 'GT_CALL' */
11042
11043                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11044 #ifndef _TARGET_ARM_
11045                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11046                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11047                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11048                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11049 #endif
11050                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11051                     // that we'll need to transform into a general large node, but rather specifically
11052                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11053                     // and a CALL is no longer the largest.
11054                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11055                     // than an "if".
11056                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11057                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11058                 }
11059                 else
11060 #endif // SMALL_TREE_NODES
11061                 {
11062                     op1 = gtNewOperNode(oper, type, op1, op2);
11063                 }
11064
11065                 /* Special case: integer/long division may throw an exception */
11066
11067                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11068                 {
11069                     op1->gtFlags |= GTF_EXCEPT;
11070                 }
11071
11072                 if (ovfl)
11073                 {
11074                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11075                     if (ovflType != TYP_UNKNOWN)
11076                     {
11077                         op1->gtType = ovflType;
11078                     }
11079                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11080                     if (uns)
11081                     {
11082                         op1->gtFlags |= GTF_UNSIGNED;
11083                     }
11084                 }
11085
11086                 impPushOnStack(op1, tiRetVal);
11087                 break;
11088
11089             case CEE_SHL:
11090                 oper = GT_LSH;
11091                 goto CEE_SH_OP2;
11092
11093             case CEE_SHR:
11094                 oper = GT_RSH;
11095                 goto CEE_SH_OP2;
11096             case CEE_SHR_UN:
11097                 oper = GT_RSZ;
11098                 goto CEE_SH_OP2;
11099
11100             CEE_SH_OP2:
11101                 if (tiVerificationNeeded)
11102                 {
11103                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11104                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11105                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11106                     tiRetVal = tiVal;
11107                 }
11108                 op2 = impPopStack().val;
11109                 op1 = impPopStack().val; // operand to be shifted
11110                 impBashVarAddrsToI(op1, op2);
11111
11112                 type = genActualType(op1->TypeGet());
11113                 op1  = gtNewOperNode(oper, type, op1, op2);
11114
11115                 impPushOnStack(op1, tiRetVal);
11116                 break;
11117
11118             case CEE_NOT:
11119                 if (tiVerificationNeeded)
11120                 {
11121                     tiRetVal = impStackTop().seTypeInfo;
11122                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11123                 }
11124
11125                 op1 = impPopStack().val;
11126                 impBashVarAddrsToI(op1, nullptr);
11127                 type = genActualType(op1->TypeGet());
11128                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11129                 break;
11130
11131             case CEE_CKFINITE:
11132                 if (tiVerificationNeeded)
11133                 {
11134                     tiRetVal = impStackTop().seTypeInfo;
11135                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11136                 }
11137                 op1  = impPopStack().val;
11138                 type = op1->TypeGet();
11139                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11140                 op1->gtFlags |= GTF_EXCEPT;
11141
11142                 impPushOnStack(op1, tiRetVal);
11143                 break;
11144
11145             case CEE_LEAVE:
11146
11147                 val     = getI4LittleEndian(codeAddr); // jump distance
11148                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11149                 goto LEAVE;
11150
11151             case CEE_LEAVE_S:
11152                 val     = getI1LittleEndian(codeAddr); // jump distance
11153                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11154
11155             LEAVE:
11156
11157                 if (compIsForInlining())
11158                 {
11159                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11160                     return;
11161                 }
11162
11163                 JITDUMP(" %04X", jmpAddr);
11164                 if (block->bbJumpKind != BBJ_LEAVE)
11165                 {
11166                     impResetLeaveBlock(block, jmpAddr);
11167                 }
11168
11169                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11170                 impImportLeave(block);
11171                 impNoteBranchOffs();
11172
11173                 break;
11174
11175             case CEE_BR:
11176             case CEE_BR_S:
11177                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11178
11179                 if (compIsForInlining() && jmpDist == 0)
11180                 {
11181                     break; /* NOP */
11182                 }
11183
11184                 impNoteBranchOffs();
11185                 break;
11186
11187             case CEE_BRTRUE:
11188             case CEE_BRTRUE_S:
11189             case CEE_BRFALSE:
11190             case CEE_BRFALSE_S:
11191
11192                 /* Pop the comparand (now there's a neat term) from the stack */
11193                 if (tiVerificationNeeded)
11194                 {
11195                     typeInfo& tiVal = impStackTop().seTypeInfo;
11196                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11197                            "bad value");
11198                 }
11199
11200                 op1  = impPopStack().val;
11201                 type = op1->TypeGet();
11202
11203                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11204                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11205                 {
11206                     block->bbJumpKind = BBJ_NONE;
11207
11208                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11209                     {
11210                         op1 = gtUnusedValNode(op1);
11211                         goto SPILL_APPEND;
11212                     }
11213                     else
11214                     {
11215                         break;
11216                     }
11217                 }
11218
11219                 if (op1->OperIsCompare())
11220                 {
11221                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11222                     {
11223                         // Flip the sense of the compare
11224
11225                         op1 = gtReverseCond(op1);
11226                     }
11227                 }
11228                 else
11229                 {
11230                     /* We'll compare against an equally-sized integer 0 */
11231                     /* For small types, we always compare against int   */
11232                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11233
11234                     /* Create the comparison operator and try to fold it */
11235
11236                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11237                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11238                 }
11239
11240             // fall through
11241
11242             COND_JUMP:
11243
11244                 /* Fold comparison if we can */
11245
11246                 op1 = gtFoldExpr(op1);
11247
11248                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11249                 /* Don't make any blocks unreachable in import only mode */
11250
11251                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11252                 {
11253                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11254                        unreachable under compDbgCode */
11255                     assert(!opts.compDbgCode);
11256
11257                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11258                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11259                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11260                                                                          // block for the second time
11261
11262                     block->bbJumpKind = foldedJumpKind;
11263 #ifdef DEBUG
11264                     if (verbose)
11265                     {
11266                         if (op1->gtIntCon.gtIconVal)
11267                         {
11268                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11269                                    block->bbJumpDest->bbNum);
11270                         }
11271                         else
11272                         {
11273                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11274                         }
11275                     }
11276 #endif
11277                     break;
11278                 }
11279
11280                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11281
11282                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11283                    in impImportBlock(block). For correct line numbers, spill stack. */
11284
11285                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11286                 {
11287                     impSpillStackEnsure(true);
11288                 }
11289
11290                 goto SPILL_APPEND;
11291
11292             case CEE_CEQ:
11293                 oper = GT_EQ;
11294                 uns  = false;
11295                 goto CMP_2_OPs;
11296             case CEE_CGT_UN:
11297                 oper = GT_GT;
11298                 uns  = true;
11299                 goto CMP_2_OPs;
11300             case CEE_CGT:
11301                 oper = GT_GT;
11302                 uns  = false;
11303                 goto CMP_2_OPs;
11304             case CEE_CLT_UN:
11305                 oper = GT_LT;
11306                 uns  = true;
11307                 goto CMP_2_OPs;
11308             case CEE_CLT:
11309                 oper = GT_LT;
11310                 uns  = false;
11311                 goto CMP_2_OPs;
11312
11313             CMP_2_OPs:
11314                 if (tiVerificationNeeded)
11315                 {
11316                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11317                     tiRetVal = typeInfo(TI_INT);
11318                 }
11319
11320                 op2 = impPopStack().val;
11321                 op1 = impPopStack().val;
11322
11323 #ifdef _TARGET_64BIT_
11324                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11325                 {
11326                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11327                 }
11328                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11329                 {
11330                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11331                 }
11332 #endif // _TARGET_64BIT_
11333
11334                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11335                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11336                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11337
11338                 /* Create the comparison node */
11339
11340                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11341
11342                 /* TODO: setting both flags when only one is appropriate */
11343                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11344                 {
11345                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11346                 }
11347
11348                 impPushOnStack(op1, tiRetVal);
11349                 break;
11350
11351             case CEE_BEQ_S:
11352             case CEE_BEQ:
11353                 oper = GT_EQ;
11354                 goto CMP_2_OPs_AND_BR;
11355
11356             case CEE_BGE_S:
11357             case CEE_BGE:
11358                 oper = GT_GE;
11359                 goto CMP_2_OPs_AND_BR;
11360
11361             case CEE_BGE_UN_S:
11362             case CEE_BGE_UN:
11363                 oper = GT_GE;
11364                 goto CMP_2_OPs_AND_BR_UN;
11365
11366             case CEE_BGT_S:
11367             case CEE_BGT:
11368                 oper = GT_GT;
11369                 goto CMP_2_OPs_AND_BR;
11370
11371             case CEE_BGT_UN_S:
11372             case CEE_BGT_UN:
11373                 oper = GT_GT;
11374                 goto CMP_2_OPs_AND_BR_UN;
11375
11376             case CEE_BLE_S:
11377             case CEE_BLE:
11378                 oper = GT_LE;
11379                 goto CMP_2_OPs_AND_BR;
11380
11381             case CEE_BLE_UN_S:
11382             case CEE_BLE_UN:
11383                 oper = GT_LE;
11384                 goto CMP_2_OPs_AND_BR_UN;
11385
11386             case CEE_BLT_S:
11387             case CEE_BLT:
11388                 oper = GT_LT;
11389                 goto CMP_2_OPs_AND_BR;
11390
11391             case CEE_BLT_UN_S:
11392             case CEE_BLT_UN:
11393                 oper = GT_LT;
11394                 goto CMP_2_OPs_AND_BR_UN;
11395
11396             case CEE_BNE_UN_S:
11397             case CEE_BNE_UN:
11398                 oper = GT_NE;
11399                 goto CMP_2_OPs_AND_BR_UN;
11400
11401             CMP_2_OPs_AND_BR_UN:
11402                 uns       = true;
11403                 unordered = true;
11404                 goto CMP_2_OPs_AND_BR_ALL;
11405             CMP_2_OPs_AND_BR:
11406                 uns       = false;
11407                 unordered = false;
11408                 goto CMP_2_OPs_AND_BR_ALL;
11409             CMP_2_OPs_AND_BR_ALL:
11410
11411                 if (tiVerificationNeeded)
11412                 {
11413                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11414                 }
11415
11416                 /* Pull two values */
11417                 op2 = impPopStack().val;
11418                 op1 = impPopStack().val;
11419
11420 #ifdef _TARGET_64BIT_
11421                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11422                 {
11423                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11424                 }
11425                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11426                 {
11427                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11428                 }
11429 #endif // _TARGET_64BIT_
11430
11431                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11432                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11433                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11434
11435                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11436                 {
11437                     block->bbJumpKind = BBJ_NONE;
11438
11439                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11440                     {
11441                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11442                                                        "Branch to next Optimization, op1 side effect"));
11443                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11444                     }
11445                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11446                     {
11447                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11448                                                        "Branch to next Optimization, op2 side effect"));
11449                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11450                     }
11451
11452 #ifdef DEBUG
11453                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11454                     {
11455                         impNoteLastILoffs();
11456                     }
11457 #endif
11458                     break;
11459                 }
11460 #if !FEATURE_X87_DOUBLES
11461                 // We can generate an compare of different sized floating point op1 and op2
11462                 // We insert a cast
11463                 //
11464                 if (varTypeIsFloating(op1->TypeGet()))
11465                 {
11466                     if (op1->TypeGet() != op2->TypeGet())
11467                     {
11468                         assert(varTypeIsFloating(op2->TypeGet()));
11469
11470                         // say op1=double, op2=float. To avoid loss of precision
11471                         // while comparing, op2 is converted to double and double
11472                         // comparison is done.
11473                         if (op1->TypeGet() == TYP_DOUBLE)
11474                         {
11475                             // We insert a cast of op2 to TYP_DOUBLE
11476                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11477                         }
11478                         else if (op2->TypeGet() == TYP_DOUBLE)
11479                         {
11480                             // We insert a cast of op1 to TYP_DOUBLE
11481                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11482                         }
11483                     }
11484                 }
11485 #endif // !FEATURE_X87_DOUBLES
11486
11487                 /* Create and append the operator */
11488
11489                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11490
11491                 if (uns)
11492                 {
11493                     op1->gtFlags |= GTF_UNSIGNED;
11494                 }
11495
11496                 if (unordered)
11497                 {
11498                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11499                 }
11500
11501                 goto COND_JUMP;
11502
11503             case CEE_SWITCH:
11504                 assert(!compIsForInlining());
11505
11506                 if (tiVerificationNeeded)
11507                 {
11508                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11509                 }
11510                 /* Pop the switch value off the stack */
11511                 op1 = impPopStack().val;
11512                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11513
11514 #ifdef _TARGET_64BIT_
11515                 // Widen 'op1' on 64-bit targets
11516                 if (op1->TypeGet() != TYP_I_IMPL)
11517                 {
11518                     if (op1->OperGet() == GT_CNS_INT)
11519                     {
11520                         op1->gtType = TYP_I_IMPL;
11521                     }
11522                     else
11523                     {
11524                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11525                     }
11526                 }
11527 #endif // _TARGET_64BIT_
11528                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11529
11530                 /* We can create a switch node */
11531
11532                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11533
11534                 val = (int)getU4LittleEndian(codeAddr);
11535                 codeAddr += 4 + val * 4; // skip over the switch-table
11536
11537                 goto SPILL_APPEND;
11538
11539             /************************** Casting OPCODES ***************************/
11540
11541             case CEE_CONV_OVF_I1:
11542                 lclTyp = TYP_BYTE;
11543                 goto CONV_OVF;
11544             case CEE_CONV_OVF_I2:
11545                 lclTyp = TYP_SHORT;
11546                 goto CONV_OVF;
11547             case CEE_CONV_OVF_I:
11548                 lclTyp = TYP_I_IMPL;
11549                 goto CONV_OVF;
11550             case CEE_CONV_OVF_I4:
11551                 lclTyp = TYP_INT;
11552                 goto CONV_OVF;
11553             case CEE_CONV_OVF_I8:
11554                 lclTyp = TYP_LONG;
11555                 goto CONV_OVF;
11556
11557             case CEE_CONV_OVF_U1:
11558                 lclTyp = TYP_UBYTE;
11559                 goto CONV_OVF;
11560             case CEE_CONV_OVF_U2:
11561                 lclTyp = TYP_CHAR;
11562                 goto CONV_OVF;
11563             case CEE_CONV_OVF_U:
11564                 lclTyp = TYP_U_IMPL;
11565                 goto CONV_OVF;
11566             case CEE_CONV_OVF_U4:
11567                 lclTyp = TYP_UINT;
11568                 goto CONV_OVF;
11569             case CEE_CONV_OVF_U8:
11570                 lclTyp = TYP_ULONG;
11571                 goto CONV_OVF;
11572
11573             case CEE_CONV_OVF_I1_UN:
11574                 lclTyp = TYP_BYTE;
11575                 goto CONV_OVF_UN;
11576             case CEE_CONV_OVF_I2_UN:
11577                 lclTyp = TYP_SHORT;
11578                 goto CONV_OVF_UN;
11579             case CEE_CONV_OVF_I_UN:
11580                 lclTyp = TYP_I_IMPL;
11581                 goto CONV_OVF_UN;
11582             case CEE_CONV_OVF_I4_UN:
11583                 lclTyp = TYP_INT;
11584                 goto CONV_OVF_UN;
11585             case CEE_CONV_OVF_I8_UN:
11586                 lclTyp = TYP_LONG;
11587                 goto CONV_OVF_UN;
11588
11589             case CEE_CONV_OVF_U1_UN:
11590                 lclTyp = TYP_UBYTE;
11591                 goto CONV_OVF_UN;
11592             case CEE_CONV_OVF_U2_UN:
11593                 lclTyp = TYP_CHAR;
11594                 goto CONV_OVF_UN;
11595             case CEE_CONV_OVF_U_UN:
11596                 lclTyp = TYP_U_IMPL;
11597                 goto CONV_OVF_UN;
11598             case CEE_CONV_OVF_U4_UN:
11599                 lclTyp = TYP_UINT;
11600                 goto CONV_OVF_UN;
11601             case CEE_CONV_OVF_U8_UN:
11602                 lclTyp = TYP_ULONG;
11603                 goto CONV_OVF_UN;
11604
11605             CONV_OVF_UN:
11606                 uns = true;
11607                 goto CONV_OVF_COMMON;
11608             CONV_OVF:
11609                 uns = false;
11610                 goto CONV_OVF_COMMON;
11611
11612             CONV_OVF_COMMON:
11613                 ovfl = true;
11614                 goto _CONV;
11615
11616             case CEE_CONV_I1:
11617                 lclTyp = TYP_BYTE;
11618                 goto CONV;
11619             case CEE_CONV_I2:
11620                 lclTyp = TYP_SHORT;
11621                 goto CONV;
11622             case CEE_CONV_I:
11623                 lclTyp = TYP_I_IMPL;
11624                 goto CONV;
11625             case CEE_CONV_I4:
11626                 lclTyp = TYP_INT;
11627                 goto CONV;
11628             case CEE_CONV_I8:
11629                 lclTyp = TYP_LONG;
11630                 goto CONV;
11631
11632             case CEE_CONV_U1:
11633                 lclTyp = TYP_UBYTE;
11634                 goto CONV;
11635             case CEE_CONV_U2:
11636                 lclTyp = TYP_CHAR;
11637                 goto CONV;
11638 #if (REGSIZE_BYTES == 8)
11639             case CEE_CONV_U:
11640                 lclTyp = TYP_U_IMPL;
11641                 goto CONV_UN;
11642 #else
11643             case CEE_CONV_U:
11644                 lclTyp = TYP_U_IMPL;
11645                 goto CONV;
11646 #endif
11647             case CEE_CONV_U4:
11648                 lclTyp = TYP_UINT;
11649                 goto CONV;
11650             case CEE_CONV_U8:
11651                 lclTyp = TYP_ULONG;
11652                 goto CONV_UN;
11653
11654             case CEE_CONV_R4:
11655                 lclTyp = TYP_FLOAT;
11656                 goto CONV;
11657             case CEE_CONV_R8:
11658                 lclTyp = TYP_DOUBLE;
11659                 goto CONV;
11660
11661             case CEE_CONV_R_UN:
11662                 lclTyp = TYP_DOUBLE;
11663                 goto CONV_UN;
11664
11665             CONV_UN:
11666                 uns  = true;
11667                 ovfl = false;
11668                 goto _CONV;
11669
11670             CONV:
11671                 uns  = false;
11672                 ovfl = false;
11673                 goto _CONV;
11674
11675             _CONV:
11676                 // just check that we have a number on the stack
11677                 if (tiVerificationNeeded)
11678                 {
11679                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11680                     Verify(tiVal.IsNumberType(), "bad arg");
11681
11682 #ifdef _TARGET_64BIT_
11683                     bool isNative = false;
11684
11685                     switch (opcode)
11686                     {
11687                         case CEE_CONV_OVF_I:
11688                         case CEE_CONV_OVF_I_UN:
11689                         case CEE_CONV_I:
11690                         case CEE_CONV_OVF_U:
11691                         case CEE_CONV_OVF_U_UN:
11692                         case CEE_CONV_U:
11693                             isNative = true;
11694                         default:
11695                             // leave 'isNative' = false;
11696                             break;
11697                     }
11698                     if (isNative)
11699                     {
11700                         tiRetVal = typeInfo::nativeInt();
11701                     }
11702                     else
11703 #endif // _TARGET_64BIT_
11704                     {
11705                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11706                     }
11707                 }
11708
11709                 // only converts from FLOAT or DOUBLE to an integer type
11710                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11711
11712                 if (varTypeIsFloating(lclTyp))
11713                 {
11714                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11715 #ifdef _TARGET_64BIT_
11716                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11717                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11718                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11719                                // and generate SSE2 code instead of going through helper calls.
11720                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11721 #endif
11722                         ;
11723                 }
11724                 else
11725                 {
11726                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11727                 }
11728
11729                 // At this point uns, ovf, callNode all set
11730
11731                 op1 = impPopStack().val;
11732                 impBashVarAddrsToI(op1);
11733
11734                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11735                 {
11736                     op2 = op1->gtOp.gtOp2;
11737
11738                     if (op2->gtOper == GT_CNS_INT)
11739                     {
11740                         ssize_t ival = op2->gtIntCon.gtIconVal;
11741                         ssize_t mask, umask;
11742
11743                         switch (lclTyp)
11744                         {
11745                             case TYP_BYTE:
11746                             case TYP_UBYTE:
11747                                 mask  = 0x00FF;
11748                                 umask = 0x007F;
11749                                 break;
11750                             case TYP_CHAR:
11751                             case TYP_SHORT:
11752                                 mask  = 0xFFFF;
11753                                 umask = 0x7FFF;
11754                                 break;
11755
11756                             default:
11757                                 assert(!"unexpected type");
11758                                 return;
11759                         }
11760
11761                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11762                         {
11763                             /* Toss the cast, it's a waste of time */
11764
11765                             impPushOnStack(op1, tiRetVal);
11766                             break;
11767                         }
11768                         else if (ival == mask)
11769                         {
11770                             /* Toss the masking, it's a waste of time, since
11771                                we sign-extend from the small value anyways */
11772
11773                             op1 = op1->gtOp.gtOp1;
11774                         }
11775                     }
11776                 }
11777
11778                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11779                     since the result of a cast to one of the 'small' integer
11780                     types is an integer.
11781                  */
11782
11783                 type = genActualType(lclTyp);
11784
11785 #if SMALL_TREE_NODES
11786                 if (callNode)
11787                 {
11788                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11789                 }
11790                 else
11791 #endif // SMALL_TREE_NODES
11792                 {
11793                     op1 = gtNewCastNode(type, op1, lclTyp);
11794                 }
11795
11796                 if (ovfl)
11797                 {
11798                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11799                 }
11800                 if (uns)
11801                 {
11802                     op1->gtFlags |= GTF_UNSIGNED;
11803                 }
11804                 impPushOnStack(op1, tiRetVal);
11805                 break;
11806
11807             case CEE_NEG:
11808                 if (tiVerificationNeeded)
11809                 {
11810                     tiRetVal = impStackTop().seTypeInfo;
11811                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11812                 }
11813
11814                 op1 = impPopStack().val;
11815                 impBashVarAddrsToI(op1, nullptr);
11816                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11817                 break;
11818
11819             case CEE_POP:
11820                 if (tiVerificationNeeded)
11821                 {
11822                     impStackTop(0);
11823                 }
11824
11825                 /* Pull the top value from the stack */
11826
11827                 op1 = impPopStack(clsHnd).val;
11828
11829                 /* Get hold of the type of the value being duplicated */
11830
11831                 lclTyp = genActualType(op1->gtType);
11832
11833                 /* Does the value have any side effects? */
11834
11835                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11836                 {
11837                     // Since we are throwing away the value, just normalize
11838                     // it to its address.  This is more efficient.
11839
11840                     if (varTypeIsStruct(op1))
11841                     {
11842 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11843                         // Non-calls, such as obj or ret_expr, have to go through this.
11844                         // Calls with large struct return value have to go through this.
11845                         // Helper calls with small struct return value also have to go
11846                         // through this since they do not follow Unix calling convention.
11847                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11848                             op1->AsCall()->gtCallType == CT_HELPER)
11849 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11850                         {
11851                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11852                         }
11853                     }
11854
11855                     // If op1 is non-overflow cast, throw it away since it is useless.
11856                     // Another reason for throwing away the useless cast is in the context of
11857                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11858                     // The cast gets added as part of importing GT_CALL, which gets in the way
11859                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11860                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11861                     {
11862                         op1 = op1->gtOp.gtOp1;
11863                     }
11864
11865                     // If 'op1' is an expression, create an assignment node.
11866                     // Helps analyses (like CSE) to work fine.
11867
11868                     if (op1->gtOper != GT_CALL)
11869                     {
11870                         op1 = gtUnusedValNode(op1);
11871                     }
11872
11873                     /* Append the value to the tree list */
11874                     goto SPILL_APPEND;
11875                 }
11876
11877                 /* No side effects - just throw the <BEEP> thing away */
11878                 break;
11879
11880             case CEE_DUP:
11881
11882                 if (tiVerificationNeeded)
11883                 {
11884                     // Dup could start the begining of delegate creation sequence, remember that
11885                     delegateCreateStart = codeAddr - 1;
11886                     impStackTop(0);
11887                 }
11888
11889                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11890                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11891                 //   This helps eliminate a redundant bounds check in cases such as:
11892                 //       ariba[i+3] += some_value;
11893                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11894
11895                 if (codeAddr < codeEndp)
11896                 {
11897                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11898                     if (impIsAnySTLOC(nextOpcode))
11899                     {
11900                         if (!opts.compDbgCode)
11901                         {
11902                             insertLdloc = true;
11903                             break;
11904                         }
11905                         GenTree* stackTop = impStackTop().val;
11906                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11907                         {
11908                             insertLdloc = true;
11909                             break;
11910                         }
11911                     }
11912                 }
11913
11914                 /* Pull the top value from the stack */
11915                 op1 = impPopStack(tiRetVal);
11916
11917                 /* Clone the value */
11918                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11919                                    nullptr DEBUGARG("DUP instruction"));
11920
11921                 /* Either the tree started with no global effects, or impCloneExpr
11922                    evaluated the tree to a temp and returned two copies of that
11923                    temp. Either way, neither op1 nor op2 should have side effects.
11924                 */
11925                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11926
11927                 /* Push the tree/temp back on the stack */
11928                 impPushOnStack(op1, tiRetVal);
11929
11930                 /* Push the copy on the stack */
11931                 impPushOnStack(op2, tiRetVal);
11932
11933                 break;
11934
11935             case CEE_STIND_I1:
11936                 lclTyp = TYP_BYTE;
11937                 goto STIND;
11938             case CEE_STIND_I2:
11939                 lclTyp = TYP_SHORT;
11940                 goto STIND;
11941             case CEE_STIND_I4:
11942                 lclTyp = TYP_INT;
11943                 goto STIND;
11944             case CEE_STIND_I8:
11945                 lclTyp = TYP_LONG;
11946                 goto STIND;
11947             case CEE_STIND_I:
11948                 lclTyp = TYP_I_IMPL;
11949                 goto STIND;
11950             case CEE_STIND_REF:
11951                 lclTyp = TYP_REF;
11952                 goto STIND;
11953             case CEE_STIND_R4:
11954                 lclTyp = TYP_FLOAT;
11955                 goto STIND;
11956             case CEE_STIND_R8:
11957                 lclTyp = TYP_DOUBLE;
11958                 goto STIND;
11959             STIND:
11960
11961                 if (tiVerificationNeeded)
11962                 {
11963                     typeInfo instrType(lclTyp);
11964 #ifdef _TARGET_64BIT_
11965                     if (opcode == CEE_STIND_I)
11966                     {
11967                         instrType = typeInfo::nativeInt();
11968                     }
11969 #endif // _TARGET_64BIT_
11970                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11971                 }
11972                 else
11973                 {
11974                     compUnsafeCastUsed = true; // Have to go conservative
11975                 }
11976
11977             STIND_POST_VERIFY:
11978
11979                 op2 = impPopStack().val; // value to store
11980                 op1 = impPopStack().val; // address to store to
11981
11982                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11983                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11984
11985                 impBashVarAddrsToI(op1, op2);
11986
11987                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11988
11989 #ifdef _TARGET_64BIT_
11990                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11991                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11992                 {
11993                     op2->gtType = TYP_I_IMPL;
11994                 }
11995                 else
11996                 {
11997                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11998                     //
11999                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12000                     {
12001                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12002                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12003                     }
12004                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12005                     //
12006                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12007                     {
12008                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12009                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12010                     }
12011                 }
12012 #endif // _TARGET_64BIT_
12013
12014                 if (opcode == CEE_STIND_REF)
12015                 {
12016                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12017                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12018                     lclTyp = genActualType(op2->TypeGet());
12019                 }
12020
12021 // Check target type.
12022 #ifdef DEBUG
12023                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12024                 {
12025                     if (op2->gtType == TYP_BYREF)
12026                     {
12027                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12028                     }
12029                     else if (lclTyp == TYP_BYREF)
12030                     {
12031                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12032                     }
12033                 }
12034                 else
12035                 {
12036                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12037                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12038                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12039                 }
12040 #endif
12041
12042                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12043
12044                 // stind could point anywhere, example a boxed class static int
12045                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12046
12047                 if (prefixFlags & PREFIX_VOLATILE)
12048                 {
12049                     assert(op1->OperGet() == GT_IND);
12050                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12051                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12052                     op1->gtFlags |= GTF_IND_VOLATILE;
12053                 }
12054
12055                 if (prefixFlags & PREFIX_UNALIGNED)
12056                 {
12057                     assert(op1->OperGet() == GT_IND);
12058                     op1->gtFlags |= GTF_IND_UNALIGNED;
12059                 }
12060
12061                 op1 = gtNewAssignNode(op1, op2);
12062                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12063
12064                 // Spill side-effects AND global-data-accesses
12065                 if (verCurrentState.esStackDepth > 0)
12066                 {
12067                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12068                 }
12069
12070                 goto APPEND;
12071
12072             case CEE_LDIND_I1:
12073                 lclTyp = TYP_BYTE;
12074                 goto LDIND;
12075             case CEE_LDIND_I2:
12076                 lclTyp = TYP_SHORT;
12077                 goto LDIND;
12078             case CEE_LDIND_U4:
12079             case CEE_LDIND_I4:
12080                 lclTyp = TYP_INT;
12081                 goto LDIND;
12082             case CEE_LDIND_I8:
12083                 lclTyp = TYP_LONG;
12084                 goto LDIND;
12085             case CEE_LDIND_REF:
12086                 lclTyp = TYP_REF;
12087                 goto LDIND;
12088             case CEE_LDIND_I:
12089                 lclTyp = TYP_I_IMPL;
12090                 goto LDIND;
12091             case CEE_LDIND_R4:
12092                 lclTyp = TYP_FLOAT;
12093                 goto LDIND;
12094             case CEE_LDIND_R8:
12095                 lclTyp = TYP_DOUBLE;
12096                 goto LDIND;
12097             case CEE_LDIND_U1:
12098                 lclTyp = TYP_UBYTE;
12099                 goto LDIND;
12100             case CEE_LDIND_U2:
12101                 lclTyp = TYP_CHAR;
12102                 goto LDIND;
12103             LDIND:
12104
12105                 if (tiVerificationNeeded)
12106                 {
12107                     typeInfo lclTiType(lclTyp);
12108 #ifdef _TARGET_64BIT_
12109                     if (opcode == CEE_LDIND_I)
12110                     {
12111                         lclTiType = typeInfo::nativeInt();
12112                     }
12113 #endif // _TARGET_64BIT_
12114                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12115                     tiRetVal.NormaliseForStack();
12116                 }
12117                 else
12118                 {
12119                     compUnsafeCastUsed = true; // Have to go conservative
12120                 }
12121
12122             LDIND_POST_VERIFY:
12123
12124                 op1 = impPopStack().val; // address to load from
12125                 impBashVarAddrsToI(op1);
12126
12127 #ifdef _TARGET_64BIT_
12128                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12129                 //
12130                 if (genActualType(op1->gtType) == TYP_INT)
12131                 {
12132                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12133                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12134                 }
12135 #endif
12136
12137                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12138
12139                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12140
12141                 // ldind could point anywhere, example a boxed class static int
12142                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12143
12144                 if (prefixFlags & PREFIX_VOLATILE)
12145                 {
12146                     assert(op1->OperGet() == GT_IND);
12147                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12148                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12149                     op1->gtFlags |= GTF_IND_VOLATILE;
12150                 }
12151
12152                 if (prefixFlags & PREFIX_UNALIGNED)
12153                 {
12154                     assert(op1->OperGet() == GT_IND);
12155                     op1->gtFlags |= GTF_IND_UNALIGNED;
12156                 }
12157
12158                 impPushOnStack(op1, tiRetVal);
12159
12160                 break;
12161
12162             case CEE_UNALIGNED:
12163
12164                 assert(sz == 1);
12165                 val = getU1LittleEndian(codeAddr);
12166                 ++codeAddr;
12167                 JITDUMP(" %u", val);
12168                 if ((val != 1) && (val != 2) && (val != 4))
12169                 {
12170                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12171                 }
12172
12173                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12174                 prefixFlags |= PREFIX_UNALIGNED;
12175
12176                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12177
12178             PREFIX:
12179                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12180                 codeAddr += sizeof(__int8);
12181                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12182                 goto DECODE_OPCODE;
12183
12184             case CEE_VOLATILE:
12185
12186                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12187                 prefixFlags |= PREFIX_VOLATILE;
12188
12189                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12190
12191                 assert(sz == 0);
12192                 goto PREFIX;
12193
12194             case CEE_LDFTN:
12195             {
12196                 // Need to do a lookup here so that we perform an access check
12197                 // and do a NOWAY if protections are violated
12198                 _impResolveToken(CORINFO_TOKENKIND_Method);
12199
12200                 JITDUMP(" %08X", resolvedToken.token);
12201
12202                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12203                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12204                               &callInfo);
12205
12206                 // This check really only applies to intrinsic Array.Address methods
12207                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12208                 {
12209                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12210                 }
12211
12212                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12213                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12214
12215                 if (tiVerificationNeeded)
12216                 {
12217                     // LDFTN could start the begining of delegate creation sequence, remember that
12218                     delegateCreateStart = codeAddr - 2;
12219
12220                     // check any constraints on the callee's class and type parameters
12221                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12222                                    "method has unsatisfied class constraints");
12223                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12224                                                                                 resolvedToken.hMethod),
12225                                    "method has unsatisfied method constraints");
12226
12227                     mflags = callInfo.verMethodFlags;
12228                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12229                 }
12230
12231             DO_LDFTN:
12232                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12233                 if (compDonotInline())
12234                 {
12235                     return;
12236                 }
12237
12238                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12239
12240                 break;
12241             }
12242
12243             case CEE_LDVIRTFTN:
12244             {
12245                 /* Get the method token */
12246
12247                 _impResolveToken(CORINFO_TOKENKIND_Method);
12248
12249                 JITDUMP(" %08X", resolvedToken.token);
12250
12251                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12252                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12253                                                     CORINFO_CALLINFO_CALLVIRT)),
12254                               &callInfo);
12255
12256                 // This check really only applies to intrinsic Array.Address methods
12257                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12258                 {
12259                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12260                 }
12261
12262                 mflags = callInfo.methodFlags;
12263
12264                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12265
12266                 if (compIsForInlining())
12267                 {
12268                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12269                     {
12270                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12271                         return;
12272                     }
12273                 }
12274
12275                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12276
12277                 if (tiVerificationNeeded)
12278                 {
12279
12280                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12281                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12282
12283                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12284                     typeInfo declType =
12285                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12286
12287                     typeInfo arg = impStackTop().seTypeInfo;
12288                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12289                            "bad ldvirtftn");
12290
12291                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12292                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12293                     {
12294                         instanceClassHnd = arg.GetClassHandleForObjRef();
12295                     }
12296
12297                     // check any constraints on the method's class and type parameters
12298                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12299                                    "method has unsatisfied class constraints");
12300                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12301                                                                                 resolvedToken.hMethod),
12302                                    "method has unsatisfied method constraints");
12303
12304                     if (mflags & CORINFO_FLG_PROTECTED)
12305                     {
12306                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12307                                "Accessing protected method through wrong type.");
12308                     }
12309                 }
12310
12311                 /* Get the object-ref */
12312                 op1 = impPopStack().val;
12313                 assertImp(op1->gtType == TYP_REF);
12314
12315                 if (opts.IsReadyToRun())
12316                 {
12317                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12318                     {
12319                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12320                         {
12321                             op1 = gtUnusedValNode(op1);
12322                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12323                         }
12324                         goto DO_LDFTN;
12325                     }
12326                 }
12327                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12328                 {
12329                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12330                     {
12331                         op1 = gtUnusedValNode(op1);
12332                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12333                     }
12334                     goto DO_LDFTN;
12335                 }
12336
12337                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12338                 if (compDonotInline())
12339                 {
12340                     return;
12341                 }
12342
12343                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12344
12345                 break;
12346             }
12347
12348             case CEE_CONSTRAINED:
12349
12350                 assertImp(sz == sizeof(unsigned));
12351                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12352                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12353                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12354
12355                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12356                 prefixFlags |= PREFIX_CONSTRAINED;
12357
12358                 {
12359                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12360                     if (actualOpcode != CEE_CALLVIRT)
12361                     {
12362                         BADCODE("constrained. has to be followed by callvirt");
12363                     }
12364                 }
12365
12366                 goto PREFIX;
12367
12368             case CEE_READONLY:
12369                 JITDUMP(" readonly.");
12370
12371                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12372                 prefixFlags |= PREFIX_READONLY;
12373
12374                 {
12375                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12376                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12377                     {
12378                         BADCODE("readonly. has to be followed by ldelema or call");
12379                     }
12380                 }
12381
12382                 assert(sz == 0);
12383                 goto PREFIX;
12384
12385             case CEE_TAILCALL:
12386                 JITDUMP(" tail.");
12387
12388                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12389                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12390
12391                 {
12392                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12393                     if (!impOpcodeIsCallOpcode(actualOpcode))
12394                     {
12395                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12396                     }
12397                 }
12398                 assert(sz == 0);
12399                 goto PREFIX;
12400
12401             case CEE_NEWOBJ:
12402
12403                 /* Since we will implicitly insert newObjThisPtr at the start of the
12404                    argument list, spill any GTF_ORDER_SIDEEFF */
12405                 impSpillSpecialSideEff();
12406
12407                 /* NEWOBJ does not respond to TAIL */
12408                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12409
12410                 /* NEWOBJ does not respond to CONSTRAINED */
12411                 prefixFlags &= ~PREFIX_CONSTRAINED;
12412
12413 #if COR_JIT_EE_VERSION > 460
12414                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12415 #else
12416                 _impResolveToken(CORINFO_TOKENKIND_Method);
12417 #endif
12418
12419                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12420                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12421                               &callInfo);
12422
12423                 if (compIsForInlining())
12424                 {
12425                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12426                     {
12427                         // Check to see if this call violates the boundary.
12428                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12429                         return;
12430                     }
12431                 }
12432
12433                 mflags = callInfo.methodFlags;
12434
12435                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12436                 {
12437                     BADCODE("newobj on static or abstract method");
12438                 }
12439
12440                 // Insert the security callout before any actual code is generated
12441                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12442
12443                 // There are three different cases for new
12444                 // Object size is variable (depends on arguments)
12445                 //      1) Object is an array (arrays treated specially by the EE)
12446                 //      2) Object is some other variable sized object (e.g. String)
12447                 //      3) Class Size can be determined beforehand (normal case)
12448                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12449                 // in the second case we call the constructor with a '0' this pointer
12450                 // In the third case we alloc the memory, then call the constuctor
12451
12452                 clsFlags = callInfo.classFlags;
12453                 if (clsFlags & CORINFO_FLG_ARRAY)
12454                 {
12455                     if (tiVerificationNeeded)
12456                     {
12457                         CORINFO_CLASS_HANDLE elemTypeHnd;
12458                         INDEBUG(CorInfoType corType =)
12459                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12460                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12461                         Verify(elemTypeHnd == nullptr ||
12462                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12463                                "newarr of byref-like objects");
12464                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12465                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12466                                       &callInfo DEBUGARG(info.compFullName));
12467                     }
12468                     // Arrays need to call the NEWOBJ helper.
12469                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12470
12471                     impImportNewObjArray(&resolvedToken, &callInfo);
12472                     if (compDonotInline())
12473                     {
12474                         return;
12475                     }
12476
12477                     callTyp = TYP_REF;
12478                     break;
12479                 }
12480                 // At present this can only be String
12481                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12482                 {
12483                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12484                     {
12485                         // The dummy argument does not exist in CoreRT
12486                         newObjThisPtr = nullptr;
12487                     }
12488                     else
12489                     {
12490                         // This is the case for variable-sized objects that are not
12491                         // arrays.  In this case, call the constructor with a null 'this'
12492                         // pointer
12493                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12494                     }
12495
12496                     /* Remember that this basic block contains 'new' of an object */
12497                     block->bbFlags |= BBF_HAS_NEWOBJ;
12498                     optMethodFlags |= OMF_HAS_NEWOBJ;
12499                 }
12500                 else
12501                 {
12502                     // This is the normal case where the size of the object is
12503                     // fixed.  Allocate the memory and call the constructor.
12504
12505                     // Note: We cannot add a peep to avoid use of temp here
12506                     // becase we don't have enough interference info to detect when
12507                     // sources and destination interfere, example: s = new S(ref);
12508
12509                     // TODO: We find the correct place to introduce a general
12510                     // reverse copy prop for struct return values from newobj or
12511                     // any function returning structs.
12512
12513                     /* get a temporary for the new object */
12514                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12515
12516                     // In the value class case we only need clsHnd for size calcs.
12517                     //
12518                     // The lookup of the code pointer will be handled by CALL in this case
12519                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12520                     {
12521                         if (compIsForInlining())
12522                         {
12523                             // If value class has GC fields, inform the inliner. It may choose to
12524                             // bail out on the inline.
12525                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12526                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12527                             {
12528                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12529                                 if (compInlineResult->IsFailure())
12530                                 {
12531                                     return;
12532                                 }
12533
12534                                 // Do further notification in the case where the call site is rare;
12535                                 // some policies do not track the relative hotness of call sites for
12536                                 // "always" inline cases.
12537                                 if (impInlineInfo->iciBlock->isRunRarely())
12538                                 {
12539                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12540                                     if (compInlineResult->IsFailure())
12541                                     {
12542                                         return;
12543                                     }
12544                                 }
12545                             }
12546                         }
12547
12548                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12549                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12550
12551                         if (impIsPrimitive(jitTyp))
12552                         {
12553                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12554                         }
12555                         else
12556                         {
12557                             // The local variable itself is the allocated space.
12558                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12559                             // and potentially exploitable.
12560                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12561                         }
12562
12563                         // Append a tree to zero-out the temp
12564                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12565
12566                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12567                                                        gtNewIconNode(0), // Value
12568                                                        size,             // Size
12569                                                        false,            // isVolatile
12570                                                        false);           // not copyBlock
12571                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12572
12573                         // Obtain the address of the temp
12574                         newObjThisPtr =
12575                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12576                     }
12577                     else
12578                     {
12579 #ifdef FEATURE_READYTORUN_COMPILER
12580                         if (opts.IsReadyToRun())
12581                         {
12582                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12583                             usingReadyToRunHelper = (op1 != nullptr);
12584                         }
12585
12586                         if (!usingReadyToRunHelper)
12587 #endif
12588                         {
12589                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12590                             if (op1 == nullptr)
12591                             { // compDonotInline()
12592                                 return;
12593                             }
12594
12595                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12596                             // and the newfast call with a single call to a dynamic R2R cell that will:
12597                             //      1) Load the context
12598                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12599                             //      stub
12600                             //      3) Allocate and return the new object
12601                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12602
12603                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12604                                                     resolvedToken.hClass, TYP_REF, op1);
12605                         }
12606
12607                         // Remember that this basic block contains 'new' of an object
12608                         block->bbFlags |= BBF_HAS_NEWOBJ;
12609                         optMethodFlags |= OMF_HAS_NEWOBJ;
12610
12611                         // Append the assignment to the temp/local. Dont need to spill
12612                         // at all as we are just calling an EE-Jit helper which can only
12613                         // cause an (async) OutOfMemoryException.
12614
12615                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12616                         // to a temp. Note that the pattern "temp = allocObj" is required
12617                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12618                         // without exhaustive walk over all expressions.
12619
12620                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12621
12622                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12623                     }
12624                 }
12625                 goto CALL;
12626
12627             case CEE_CALLI:
12628
12629                 /* CALLI does not respond to CONSTRAINED */
12630                 prefixFlags &= ~PREFIX_CONSTRAINED;
12631
12632                 if (compIsForInlining())
12633                 {
12634                     // CALLI doesn't have a method handle, so assume the worst.
12635                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12636                     {
12637                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12638                         return;
12639                     }
12640                 }
12641
12642             // fall through
12643
12644             case CEE_CALLVIRT:
12645             case CEE_CALL:
12646
12647                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12648                 // many other places.  We unfortunately embed that knowledge here.
12649                 if (opcode != CEE_CALLI)
12650                 {
12651                     _impResolveToken(CORINFO_TOKENKIND_Method);
12652
12653                     eeGetCallInfo(&resolvedToken,
12654                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12655                                   // this is how impImportCall invokes getCallInfo
12656                                   addVerifyFlag(
12657                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12658                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12659                                                                        : CORINFO_CALLINFO_NONE)),
12660                                   &callInfo);
12661                 }
12662                 else
12663                 {
12664                     // Suppress uninitialized use warning.
12665                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12666                     memset(&callInfo, 0, sizeof(callInfo));
12667
12668                     resolvedToken.token = getU4LittleEndian(codeAddr);
12669                 }
12670
12671             CALL: // memberRef should be set.
12672                 // newObjThisPtr should be set for CEE_NEWOBJ
12673
12674                 JITDUMP(" %08X", resolvedToken.token);
12675                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12676
12677                 bool newBBcreatedForTailcallStress;
12678
12679                 newBBcreatedForTailcallStress = false;
12680
12681                 if (compIsForInlining())
12682                 {
12683                     if (compDonotInline())
12684                     {
12685                         return;
12686                     }
12687                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12688                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12689                 }
12690                 else
12691                 {
12692                     if (compTailCallStress())
12693                     {
12694                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12695                         // Tail call stress only recognizes call+ret patterns and forces them to be
12696                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12697                         // doesn't import 'ret' opcode following the call into the basic block containing
12698                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12699                         // is already checking that there is an opcode following call and hence it is
12700                         // safe here to read next opcode without bounds check.
12701                         newBBcreatedForTailcallStress =
12702                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12703                                                              // make it jump to RET.
12704                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12705
12706                         if (newBBcreatedForTailcallStress &&
12707                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12708                             verCheckTailCallConstraint(opcode, &resolvedToken,
12709                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12710                                                        true) // Is it legal to do talcall?
12711                             )
12712                         {
12713                             // Stress the tailcall.
12714                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12715                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12716                         }
12717                     }
12718
12719                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12720                     // hence will not be considered for implicit tail calling.
12721                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12722                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12723                     {
12724                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12725                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12726                     }
12727                 }
12728
12729                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12730                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12731                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12732
12733                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12734                 {
12735                     // All calls and delegates need a security callout.
12736                     // For delegates, this is the call to the delegate constructor, not the access check on the
12737                     // LD(virt)FTN.
12738                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12739
12740 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12741      
12742                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12743                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12744                 // ldtoken <filed token>, and we now check accessibility
12745                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12746                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12747                 {
12748                     if (prevOpcode != CEE_LDTOKEN)
12749                     {
12750                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12751                     }
12752                     else
12753                     {
12754                         assert(lastLoadToken != NULL);
12755                         // Now that we know we have a token, verify that it is accessible for loading
12756                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12757                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12758                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12759                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12760                     }
12761                 }
12762
12763 #endif // DevDiv 410397
12764                 }
12765
12766                 if (tiVerificationNeeded)
12767                 {
12768                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12769                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12770                                   &callInfo DEBUGARG(info.compFullName));
12771                 }
12772
12773                 // Insert delegate callout here.
12774                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12775                 {
12776 #ifdef DEBUG
12777                     // We should do this only if verification is enabled
12778                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12779                     if (tiVerificationNeeded)
12780                     {
12781                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12782                         // We should get here only for well formed delegate creation.
12783                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12784                     }
12785 #endif
12786
12787 #ifdef FEATURE_CORECLR
12788                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12789                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12790                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12791
12792                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12793 #endif // FEATURE_CORECLR
12794                 }
12795
12796                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12797                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12798                 if (compDonotInline())
12799                 {
12800                     return;
12801                 }
12802
12803                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12804                                                                        // have created a new BB after the "call"
12805                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12806                 {
12807                     assert(!compIsForInlining());
12808                     goto RET;
12809                 }
12810
12811                 break;
12812
12813             case CEE_LDFLD:
12814             case CEE_LDSFLD:
12815             case CEE_LDFLDA:
12816             case CEE_LDSFLDA:
12817             {
12818
12819                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12820                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12821
12822                 /* Get the CP_Fieldref index */
12823                 assertImp(sz == sizeof(unsigned));
12824
12825                 _impResolveToken(CORINFO_TOKENKIND_Field);
12826
12827                 JITDUMP(" %08X", resolvedToken.token);
12828
12829                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12830
12831                 GenTreePtr           obj     = nullptr;
12832                 typeInfo*            tiObj   = nullptr;
12833                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12834
12835                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12836                 {
12837                     tiObj = &impStackTop().seTypeInfo;
12838                     obj   = impPopStack(objType).val;
12839
12840                     if (impIsThis(obj))
12841                     {
12842                         aflags |= CORINFO_ACCESS_THIS;
12843
12844                         // An optimization for Contextful classes:
12845                         // we unwrap the proxy when we have a 'this reference'
12846
12847                         if (info.compUnwrapContextful)
12848                         {
12849                             aflags |= CORINFO_ACCESS_UNWRAP;
12850                         }
12851                     }
12852                 }
12853
12854                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12855
12856                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12857                 // handle
12858                 CorInfoType ciType = fieldInfo.fieldType;
12859                 clsHnd             = fieldInfo.structType;
12860
12861                 lclTyp = JITtype2varType(ciType);
12862
12863 #ifdef _TARGET_AMD64
12864                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12865 #endif // _TARGET_AMD64
12866
12867                 if (compIsForInlining())
12868                 {
12869                     switch (fieldInfo.fieldAccessor)
12870                     {
12871                         case CORINFO_FIELD_INSTANCE_HELPER:
12872                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12873                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12874                         case CORINFO_FIELD_STATIC_TLS:
12875
12876                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12877                             return;
12878
12879                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12880 #if COR_JIT_EE_VERSION > 460
12881                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12882 #endif
12883                             /* We may be able to inline the field accessors in specific instantiations of generic
12884                              * methods */
12885                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12886                             return;
12887
12888                         default:
12889                             break;
12890                     }
12891
12892                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12893                         clsHnd)
12894                     {
12895                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12896                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12897                         {
12898                             // Loading a static valuetype field usually will cause a JitHelper to be called
12899                             // for the static base. This will bloat the code.
12900                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12901
12902                             if (compInlineResult->IsFailure())
12903                             {
12904                                 return;
12905                             }
12906                         }
12907                     }
12908                 }
12909
12910                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12911                 if (isLoadAddress)
12912                 {
12913                     tiRetVal.MakeByRef();
12914                 }
12915                 else
12916                 {
12917                     tiRetVal.NormaliseForStack();
12918                 }
12919
12920                 // Perform this check always to ensure that we get field access exceptions even with
12921                 // SkipVerification.
12922                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12923
12924                 if (tiVerificationNeeded)
12925                 {
12926                     // You can also pass the unboxed struct to  LDFLD
12927                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12928                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12929                     {
12930                         bAllowPlainValueTypeAsThis = TRUE;
12931                     }
12932
12933                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12934
12935                     // If we're doing this on a heap object or from a 'safe' byref
12936                     // then the result is a safe byref too
12937                     if (isLoadAddress) // load address
12938                     {
12939                         if (fieldInfo.fieldFlags &
12940                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12941                         {
12942                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12943                             {
12944                                 tiRetVal.SetIsPermanentHomeByRef();
12945                             }
12946                         }
12947                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12948                         {
12949                             // ldflda of byref is safe if done on a gc object or on  a
12950                             // safe byref
12951                             tiRetVal.SetIsPermanentHomeByRef();
12952                         }
12953                     }
12954                 }
12955                 else
12956                 {
12957                     // tiVerificationNeeded is false.
12958                     // Raise InvalidProgramException if static load accesses non-static field
12959                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12960                     {
12961                         BADCODE("static access on an instance field");
12962                     }
12963                 }
12964
12965                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12966                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12967                 {
12968                     if (obj->gtFlags & GTF_SIDE_EFFECT)
12969                     {
12970                         obj = gtUnusedValNode(obj);
12971                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12972                     }
12973                     obj = nullptr;
12974                 }
12975
12976                 /* Preserve 'small' int types */
12977                 if (lclTyp > TYP_INT)
12978                 {
12979                     lclTyp = genActualType(lclTyp);
12980                 }
12981
12982                 bool usesHelper = false;
12983
12984                 switch (fieldInfo.fieldAccessor)
12985                 {
12986                     case CORINFO_FIELD_INSTANCE:
12987 #ifdef FEATURE_READYTORUN_COMPILER
12988                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
12989 #endif
12990                     {
12991                         bool nullcheckNeeded = false;
12992
12993                         obj = impCheckForNullPointer(obj);
12994
12995                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12996                         {
12997                             nullcheckNeeded = true;
12998                         }
12999
13000                         // If the object is a struct, what we really want is
13001                         // for the field to operate on the address of the struct.
13002                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13003                         {
13004                             assert(opcode == CEE_LDFLD && objType != nullptr);
13005
13006                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13007                         }
13008
13009                         /* Create the data member node */
13010                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13011
13012 #ifdef FEATURE_READYTORUN_COMPILER
13013                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13014                         {
13015                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13016                         }
13017 #endif
13018
13019                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13020
13021                         if (fgAddrCouldBeNull(obj))
13022                         {
13023                             op1->gtFlags |= GTF_EXCEPT;
13024                         }
13025
13026                         // If gtFldObj is a BYREF then our target is a value class and
13027                         // it could point anywhere, example a boxed class static int
13028                         if (obj->gtType == TYP_BYREF)
13029                         {
13030                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13031                         }
13032
13033                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13034                         if (StructHasOverlappingFields(typeFlags))
13035                         {
13036                             op1->gtField.gtFldMayOverlap = true;
13037                         }
13038
13039                         // wrap it in a address of operator if necessary
13040                         if (isLoadAddress)
13041                         {
13042                             op1 = gtNewOperNode(GT_ADDR,
13043                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13044                         }
13045                         else
13046                         {
13047                             if (compIsForInlining() &&
13048                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13049                                                                                    impInlineInfo->inlArgInfo))
13050                             {
13051                                 impInlineInfo->thisDereferencedFirst = true;
13052                             }
13053                         }
13054                     }
13055                     break;
13056
13057                     case CORINFO_FIELD_STATIC_TLS:
13058 #ifdef _TARGET_X86_
13059                         // Legacy TLS access is implemented as intrinsic on x86 only
13060
13061                         /* Create the data member node */
13062                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13063                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13064
13065                         if (isLoadAddress)
13066                         {
13067                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13068                         }
13069                         break;
13070 #else
13071                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13072
13073                         __fallthrough;
13074 #endif
13075
13076                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13077                     case CORINFO_FIELD_INSTANCE_HELPER:
13078                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13079                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13080                                                clsHnd, nullptr);
13081                         usesHelper = true;
13082                         break;
13083
13084                     case CORINFO_FIELD_STATIC_ADDRESS:
13085                         // Replace static read-only fields with constant if possible
13086                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13087                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13088                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13089                         {
13090                             CorInfoInitClassResult initClassResult =
13091                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13092                                                             impTokenLookupContextHandle);
13093
13094                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13095                             {
13096                                 void** pFldAddr = nullptr;
13097                                 void*  fldAddr =
13098                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13099
13100                                 // We should always be able to access this static's address directly
13101                                 assert(pFldAddr == nullptr);
13102
13103                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13104                                 goto FIELD_DONE;
13105                             }
13106                         }
13107
13108                         __fallthrough;
13109
13110                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13111                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13112                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13113 #if COR_JIT_EE_VERSION > 460
13114                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13115 #endif
13116                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13117                                                          lclTyp);
13118                         break;
13119
13120                     case CORINFO_FIELD_INTRINSIC_ZERO:
13121                     {
13122                         assert(aflags & CORINFO_ACCESS_GET);
13123                         op1 = gtNewIconNode(0, lclTyp);
13124                         goto FIELD_DONE;
13125                     }
13126                     break;
13127
13128                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13129                     {
13130                         assert(aflags & CORINFO_ACCESS_GET);
13131
13132                         LPVOID         pValue;
13133                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13134                         op1                = gtNewStringLiteralNode(iat, pValue);
13135                         goto FIELD_DONE;
13136                     }
13137                     break;
13138
13139                     default:
13140                         assert(!"Unexpected fieldAccessor");
13141                 }
13142
13143                 if (!isLoadAddress)
13144                 {
13145
13146                     if (prefixFlags & PREFIX_VOLATILE)
13147                     {
13148                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13149                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13150
13151                         if (!usesHelper)
13152                         {
13153                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13154                                    (op1->OperGet() == GT_OBJ));
13155                             op1->gtFlags |= GTF_IND_VOLATILE;
13156                         }
13157                     }
13158
13159                     if (prefixFlags & PREFIX_UNALIGNED)
13160                     {
13161                         if (!usesHelper)
13162                         {
13163                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13164                                    (op1->OperGet() == GT_OBJ));
13165                             op1->gtFlags |= GTF_IND_UNALIGNED;
13166                         }
13167                     }
13168                 }
13169
13170                 /* Check if the class needs explicit initialization */
13171
13172                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13173                 {
13174                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13175                     if (compDonotInline())
13176                     {
13177                         return;
13178                     }
13179                     if (helperNode != nullptr)
13180                     {
13181                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13182                     }
13183                 }
13184
13185             FIELD_DONE:
13186                 impPushOnStack(op1, tiRetVal);
13187             }
13188             break;
13189
13190             case CEE_STFLD:
13191             case CEE_STSFLD:
13192             {
13193
13194                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13195
13196                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13197
13198                 /* Get the CP_Fieldref index */
13199
13200                 assertImp(sz == sizeof(unsigned));
13201
13202                 _impResolveToken(CORINFO_TOKENKIND_Field);
13203
13204                 JITDUMP(" %08X", resolvedToken.token);
13205
13206                 int        aflags = CORINFO_ACCESS_SET;
13207                 GenTreePtr obj    = nullptr;
13208                 typeInfo*  tiObj  = nullptr;
13209                 typeInfo   tiVal;
13210
13211                 /* Pull the value from the stack */
13212                 op2    = impPopStack(tiVal);
13213                 clsHnd = tiVal.GetClassHandle();
13214
13215                 if (opcode == CEE_STFLD)
13216                 {
13217                     tiObj = &impStackTop().seTypeInfo;
13218                     obj   = impPopStack().val;
13219
13220                     if (impIsThis(obj))
13221                     {
13222                         aflags |= CORINFO_ACCESS_THIS;
13223
13224                         // An optimization for Contextful classes:
13225                         // we unwrap the proxy when we have a 'this reference'
13226
13227                         if (info.compUnwrapContextful)
13228                         {
13229                             aflags |= CORINFO_ACCESS_UNWRAP;
13230                         }
13231                     }
13232                 }
13233
13234                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13235
13236                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13237                 // handle
13238                 CorInfoType ciType = fieldInfo.fieldType;
13239                 fieldClsHnd        = fieldInfo.structType;
13240
13241                 lclTyp = JITtype2varType(ciType);
13242
13243                 if (compIsForInlining())
13244                 {
13245                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13246                      * per-inst static? */
13247
13248                     switch (fieldInfo.fieldAccessor)
13249                     {
13250                         case CORINFO_FIELD_INSTANCE_HELPER:
13251                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13252                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13253                         case CORINFO_FIELD_STATIC_TLS:
13254
13255                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13256                             return;
13257
13258                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13259 #if COR_JIT_EE_VERSION > 460
13260                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13261 #endif
13262
13263                             /* We may be able to inline the field accessors in specific instantiations of generic
13264                              * methods */
13265                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13266                             return;
13267
13268                         default:
13269                             break;
13270                     }
13271                 }
13272
13273                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13274
13275                 if (tiVerificationNeeded)
13276                 {
13277                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13278                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13279                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13280                 }
13281                 else
13282                 {
13283                     // tiVerificationNeed is false.
13284                     // Raise InvalidProgramException if static store accesses non-static field
13285                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13286                     {
13287                         BADCODE("static access on an instance field");
13288                     }
13289                 }
13290
13291                 // We are using stfld on a static field.
13292                 // We allow it, but need to eval any side-effects for obj
13293                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13294                 {
13295                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13296                     {
13297                         obj = gtUnusedValNode(obj);
13298                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13299                     }
13300                     obj = nullptr;
13301                 }
13302
13303                 /* Preserve 'small' int types */
13304                 if (lclTyp > TYP_INT)
13305                 {
13306                     lclTyp = genActualType(lclTyp);
13307                 }
13308
13309                 switch (fieldInfo.fieldAccessor)
13310                 {
13311                     case CORINFO_FIELD_INSTANCE:
13312 #ifdef FEATURE_READYTORUN_COMPILER
13313                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13314 #endif
13315                     {
13316                         obj = impCheckForNullPointer(obj);
13317
13318                         /* Create the data member node */
13319                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13320                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13321                         if (StructHasOverlappingFields(typeFlags))
13322                         {
13323                             op1->gtField.gtFldMayOverlap = true;
13324                         }
13325
13326 #ifdef FEATURE_READYTORUN_COMPILER
13327                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13328                         {
13329                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13330                         }
13331 #endif
13332
13333                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13334
13335                         if (fgAddrCouldBeNull(obj))
13336                         {
13337                             op1->gtFlags |= GTF_EXCEPT;
13338                         }
13339
13340                         // If gtFldObj is a BYREF then our target is a value class and
13341                         // it could point anywhere, example a boxed class static int
13342                         if (obj->gtType == TYP_BYREF)
13343                         {
13344                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13345                         }
13346
13347                         if (compIsForInlining() &&
13348                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13349                         {
13350                             impInlineInfo->thisDereferencedFirst = true;
13351                         }
13352                     }
13353                     break;
13354
13355                     case CORINFO_FIELD_STATIC_TLS:
13356 #ifdef _TARGET_X86_
13357                         // Legacy TLS access is implemented as intrinsic on x86 only
13358
13359                         /* Create the data member node */
13360                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13361                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13362
13363                         break;
13364 #else
13365                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13366
13367                         __fallthrough;
13368 #endif
13369
13370                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13371                     case CORINFO_FIELD_INSTANCE_HELPER:
13372                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13373                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13374                                                clsHnd, op2);
13375                         goto SPILL_APPEND;
13376
13377                     case CORINFO_FIELD_STATIC_ADDRESS:
13378                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13379                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13380                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13381 #if COR_JIT_EE_VERSION > 460
13382                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13383 #endif
13384                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13385                                                          lclTyp);
13386                         break;
13387
13388                     default:
13389                         assert(!"Unexpected fieldAccessor");
13390                 }
13391
13392                 // Create the member assignment, unless we have a struct.
13393                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13394                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13395
13396                 if (!deferStructAssign)
13397                 {
13398                     if (prefixFlags & PREFIX_VOLATILE)
13399                     {
13400                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13401                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13402                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13403                         op1->gtFlags |= GTF_IND_VOLATILE;
13404                     }
13405                     if (prefixFlags & PREFIX_UNALIGNED)
13406                     {
13407                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13408                         op1->gtFlags |= GTF_IND_UNALIGNED;
13409                     }
13410
13411                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13412                        trust
13413                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13414                        importation
13415                        and reads from the union as if it were a long during code generation. Though this can potentially
13416                        read garbage, one can get lucky to have this working correctly.
13417
13418                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13419                        /O2
13420                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13421                        on
13422                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13423                        correctly
13424                        always.
13425
13426                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13427                        V4.0.
13428                     */
13429                     CLANG_FORMAT_COMMENT_ANCHOR;
13430
13431 #ifdef _TARGET_X86_
13432                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13433                         varTypeIsLong(op1->TypeGet()))
13434                     {
13435                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13436                     }
13437 #endif
13438
13439 #ifdef _TARGET_64BIT_
13440                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13441                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13442                     {
13443                         op2->gtType = TYP_I_IMPL;
13444                     }
13445                     else
13446                     {
13447                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13448                         //
13449                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13450                         {
13451                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13452                         }
13453                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13454                         //
13455                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13456                         {
13457                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13458                         }
13459                     }
13460 #endif
13461
13462 #if !FEATURE_X87_DOUBLES
13463                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13464                     // We insert a cast to the dest 'op1' type
13465                     //
13466                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13467                         varTypeIsFloating(op2->gtType))
13468                     {
13469                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13470                     }
13471 #endif // !FEATURE_X87_DOUBLES
13472
13473                     op1 = gtNewAssignNode(op1, op2);
13474
13475                     /* Mark the expression as containing an assignment */
13476
13477                     op1->gtFlags |= GTF_ASG;
13478                 }
13479
13480                 /* Check if the class needs explicit initialization */
13481
13482                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13483                 {
13484                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13485                     if (compDonotInline())
13486                     {
13487                         return;
13488                     }
13489                     if (helperNode != nullptr)
13490                     {
13491                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13492                     }
13493                 }
13494
13495                 /* stfld can interfere with value classes (consider the sequence
13496                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13497                    spill all value class references from the stack. */
13498
13499                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13500                 {
13501                     assert(tiObj);
13502
13503                     if (impIsValueType(tiObj))
13504                     {
13505                         impSpillEvalStack();
13506                     }
13507                     else
13508                     {
13509                         impSpillValueClasses();
13510                     }
13511                 }
13512
13513                 /* Spill any refs to the same member from the stack */
13514
13515                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13516
13517                 /* stsfld also interferes with indirect accesses (for aliased
13518                    statics) and calls. But don't need to spill other statics
13519                    as we have explicitly spilled this particular static field. */
13520
13521                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13522
13523                 if (deferStructAssign)
13524                 {
13525                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13526                 }
13527             }
13528                 goto APPEND;
13529
13530             case CEE_NEWARR:
13531             {
13532
13533                 /* Get the class type index operand */
13534
13535                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13536
13537                 JITDUMP(" %08X", resolvedToken.token);
13538
13539                 if (!opts.IsReadyToRun())
13540                 {
13541                     // Need to restore array classes before creating array objects on the heap
13542                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13543                     if (op1 == nullptr)
13544                     { // compDonotInline()
13545                         return;
13546                     }
13547                 }
13548
13549                 if (tiVerificationNeeded)
13550                 {
13551                     // As per ECMA 'numElems' specified can be either int32 or native int.
13552                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13553
13554                     CORINFO_CLASS_HANDLE elemTypeHnd;
13555                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13556                     Verify(elemTypeHnd == nullptr ||
13557                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13558                            "array of byref-like type");
13559                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13560                 }
13561
13562                 accessAllowedResult =
13563                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13564                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13565
13566                 /* Form the arglist: array class handle, size */
13567                 op2 = impPopStack().val;
13568                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13569
13570 #ifdef FEATURE_READYTORUN_COMPILER
13571                 if (opts.IsReadyToRun())
13572                 {
13573                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13574                                                     gtNewArgList(op2));
13575                     usingReadyToRunHelper = (op1 != nullptr);
13576
13577                     if (!usingReadyToRunHelper)
13578                     {
13579                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13580                         // and the newarr call with a single call to a dynamic R2R cell that will:
13581                         //      1) Load the context
13582                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13583                         //      3) Allocate the new array
13584                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13585
13586                         // Need to restore array classes before creating array objects on the heap
13587                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13588                         if (op1 == nullptr)
13589                         { // compDonotInline()
13590                             return;
13591                         }
13592                     }
13593                 }
13594
13595                 if (!usingReadyToRunHelper)
13596 #endif
13597                 {
13598                     args = gtNewArgList(op1, op2);
13599
13600                     /* Create a call to 'new' */
13601
13602                     // Note that this only works for shared generic code because the same helper is used for all
13603                     // reference array types
13604                     op1 =
13605                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13606                 }
13607
13608                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13609
13610                 /* Remember that this basic block contains 'new' of an sd array */
13611
13612                 block->bbFlags |= BBF_HAS_NEWARRAY;
13613                 optMethodFlags |= OMF_HAS_NEWARRAY;
13614
13615                 /* Push the result of the call on the stack */
13616
13617                 impPushOnStack(op1, tiRetVal);
13618
13619                 callTyp = TYP_REF;
13620             }
13621             break;
13622
13623             case CEE_LOCALLOC:
13624                 assert(!compIsForInlining());
13625
13626                 if (tiVerificationNeeded)
13627                 {
13628                     Verify(false, "bad opcode");
13629                 }
13630
13631                 // We don't allow locallocs inside handlers
13632                 if (block->hasHndIndex())
13633                 {
13634                     BADCODE("Localloc can't be inside handler");
13635                 }
13636
13637                 /* The FP register may not be back to the original value at the end
13638                    of the method, even if the frame size is 0, as localloc may
13639                    have modified it. So we will HAVE to reset it */
13640
13641                 compLocallocUsed = true;
13642                 setNeedsGSSecurityCookie();
13643
13644                 // Get the size to allocate
13645
13646                 op2 = impPopStack().val;
13647                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13648
13649                 if (verCurrentState.esStackDepth != 0)
13650                 {
13651                     BADCODE("Localloc can only be used when the stack is empty");
13652                 }
13653
13654                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13655
13656                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13657
13658                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13659
13660                 impPushOnStack(op1, tiRetVal);
13661                 break;
13662
13663             case CEE_ISINST:
13664
13665                 /* Get the type token */
13666                 assertImp(sz == sizeof(unsigned));
13667
13668                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13669
13670                 JITDUMP(" %08X", resolvedToken.token);
13671
13672                 if (!opts.IsReadyToRun())
13673                 {
13674                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13675                     if (op2 == nullptr)
13676                     { // compDonotInline()
13677                         return;
13678                     }
13679                 }
13680
13681                 if (tiVerificationNeeded)
13682                 {
13683                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13684                     // Even if this is a value class, we know it is boxed.
13685                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13686                 }
13687                 accessAllowedResult =
13688                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13689                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13690
13691                 op1 = impPopStack().val;
13692
13693 #ifdef FEATURE_READYTORUN_COMPILER
13694                 if (opts.IsReadyToRun())
13695                 {
13696                     GenTreePtr opLookup =
13697                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13698                                                   gtNewArgList(op1));
13699                     usingReadyToRunHelper = (opLookup != nullptr);
13700                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13701
13702                     if (!usingReadyToRunHelper)
13703                     {
13704                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13705                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13706                         //      1) Load the context
13707                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13708                         //      3) Perform the 'is instance' check on the input object
13709                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13710
13711                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13712                         if (op2 == nullptr)
13713                         { // compDonotInline()
13714                             return;
13715                         }
13716                     }
13717                 }
13718
13719                 if (!usingReadyToRunHelper)
13720 #endif
13721                 {
13722                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13723                 }
13724                 if (compDonotInline())
13725                 {
13726                     return;
13727                 }
13728
13729                 impPushOnStack(op1, tiRetVal);
13730
13731                 break;
13732
13733             case CEE_REFANYVAL:
13734
13735                 // get the class handle and make a ICON node out of it
13736
13737                 _impResolveToken(CORINFO_TOKENKIND_Class);
13738
13739                 JITDUMP(" %08X", resolvedToken.token);
13740
13741                 op2 = impTokenToHandle(&resolvedToken);
13742                 if (op2 == nullptr)
13743                 { // compDonotInline()
13744                     return;
13745                 }
13746
13747                 if (tiVerificationNeeded)
13748                 {
13749                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13750                            "need refany");
13751                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13752                 }
13753
13754                 op1 = impPopStack().val;
13755                 // make certain it is normalized;
13756                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13757
13758                 // Call helper GETREFANY(classHandle, op1);
13759                 args = gtNewArgList(op2, op1);
13760                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13761
13762                 impPushOnStack(op1, tiRetVal);
13763                 break;
13764
13765             case CEE_REFANYTYPE:
13766
13767                 if (tiVerificationNeeded)
13768                 {
13769                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13770                            "need refany");
13771                 }
13772
13773                 op1 = impPopStack().val;
13774
13775                 // make certain it is normalized;
13776                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13777
13778                 if (op1->gtOper == GT_OBJ)
13779                 {
13780                     // Get the address of the refany
13781                     op1 = op1->gtOp.gtOp1;
13782
13783                     // Fetch the type from the correct slot
13784                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13785                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13786                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13787                 }
13788                 else
13789                 {
13790                     assertImp(op1->gtOper == GT_MKREFANY);
13791
13792                     // The pointer may have side-effects
13793                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13794                     {
13795                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13796 #ifdef DEBUG
13797                         impNoteLastILoffs();
13798 #endif
13799                     }
13800
13801                     // We already have the class handle
13802                     op1 = op1->gtOp.gtOp2;
13803                 }
13804
13805                 // convert native TypeHandle to RuntimeTypeHandle
13806                 {
13807                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13808
13809                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13810                                               helperArgs);
13811
13812                     // The handle struct is returned in register
13813                     op1->gtCall.gtReturnType = TYP_REF;
13814
13815                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13816                 }
13817
13818                 impPushOnStack(op1, tiRetVal);
13819                 break;
13820
13821             case CEE_LDTOKEN:
13822             {
13823                 /* Get the Class index */
13824                 assertImp(sz == sizeof(unsigned));
13825                 lastLoadToken = codeAddr;
13826                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13827
13828                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13829
13830                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13831                 if (op1 == nullptr)
13832                 { // compDonotInline()
13833                     return;
13834                 }
13835
13836                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13837                 assert(resolvedToken.hClass != nullptr);
13838
13839                 if (resolvedToken.hMethod != nullptr)
13840                 {
13841                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13842                 }
13843                 else if (resolvedToken.hField != nullptr)
13844                 {
13845                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13846                 }
13847
13848                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13849
13850                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13851
13852                 // The handle struct is returned in register
13853                 op1->gtCall.gtReturnType = TYP_REF;
13854
13855                 tiRetVal = verMakeTypeInfo(tokenType);
13856                 impPushOnStack(op1, tiRetVal);
13857             }
13858             break;
13859
13860             case CEE_UNBOX:
13861             case CEE_UNBOX_ANY:
13862             {
13863                 /* Get the Class index */
13864                 assertImp(sz == sizeof(unsigned));
13865
13866                 _impResolveToken(CORINFO_TOKENKIND_Class);
13867
13868                 JITDUMP(" %08X", resolvedToken.token);
13869
13870                 BOOL runtimeLookup;
13871                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13872                 if (op2 == nullptr)
13873                 { // compDonotInline()
13874                     return;
13875                 }
13876
13877                 // Run this always so we can get access exceptions even with SkipVerification.
13878                 accessAllowedResult =
13879                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13880                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13881
13882                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13883                 {
13884                     if (tiVerificationNeeded)
13885                     {
13886                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13887                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13888                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13889                         tiRetVal.NormaliseForStack();
13890                     }
13891                     op1 = impPopStack().val;
13892                     goto CASTCLASS;
13893                 }
13894
13895                 /* Pop the object and create the unbox helper call */
13896                 /* You might think that for UNBOX_ANY we need to push a different */
13897                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13898                 /* for the intermediate pointer which we then transfer onto the OBJ */
13899                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13900                 if (tiVerificationNeeded)
13901                 {
13902                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13903                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13904
13905                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13906                     Verify(tiRetVal.IsValueClass(), "not value class");
13907                     tiRetVal.MakeByRef();
13908
13909                     // We always come from an objref, so this is safe byref
13910                     tiRetVal.SetIsPermanentHomeByRef();
13911                     tiRetVal.SetIsReadonlyByRef();
13912                 }
13913
13914                 op1 = impPopStack().val;
13915                 assertImp(op1->gtType == TYP_REF);
13916
13917                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13918                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13919
13920                 // We only want to expand inline the normal UNBOX helper;
13921                 expandInline = (helper == CORINFO_HELP_UNBOX);
13922
13923                 if (expandInline)
13924                 {
13925                     if (compCurBB->isRunRarely())
13926                     {
13927                         expandInline = false; // not worth the code expansion
13928                     }
13929                 }
13930
13931                 if (expandInline)
13932                 {
13933                     // we are doing normal unboxing
13934                     // inline the common case of the unbox helper
13935                     // UNBOX(exp) morphs into
13936                     // clone = pop(exp);
13937                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13938                     // push(clone + sizeof(void*))
13939                     //
13940                     GenTreePtr cloneOperand;
13941                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13942                                        nullptr DEBUGARG("inline UNBOX clone1"));
13943                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13944
13945                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13946
13947                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13948                                        nullptr DEBUGARG("inline UNBOX clone2"));
13949                     op2 = impTokenToHandle(&resolvedToken);
13950                     if (op2 == nullptr)
13951                     { // compDonotInline()
13952                         return;
13953                     }
13954                     args = gtNewArgList(op2, op1);
13955                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13956
13957                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13958                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13959                     condBox->gtFlags |= GTF_RELOP_QMARK;
13960
13961                     // QMARK nodes cannot reside on the evaluation stack. Because there
13962                     // may be other trees on the evaluation stack that side-effect the
13963                     // sources of the UNBOX operation we must spill the stack.
13964
13965                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13966
13967                     // Create the address-expression to reference past the object header
13968                     // to the beginning of the value-type. Today this means adjusting
13969                     // past the base of the objects vtable field which is pointer sized.
13970
13971                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13972                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13973                 }
13974                 else
13975                 {
13976                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13977
13978                     // Don't optimize, just call the helper and be done with it
13979                     args = gtNewArgList(op2, op1);
13980                     op1  = gtNewHelperCallNode(helper,
13981                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13982                                               callFlags, args);
13983                 }
13984
13985                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13986                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
13987                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13988                        );
13989
13990                 /*
13991                   ----------------------------------------------------------------------
13992                   | \ helper  |                         |                              |
13993                   |   \       |                         |                              |
13994                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
13995                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
13996                   | opcode  \ |                         |                              |
13997                   |---------------------------------------------------------------------
13998                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
13999                   |           |                         | push the BYREF to this local |
14000                   |---------------------------------------------------------------------
14001                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14002                   |           | the BYREF               | For Linux when the           |
14003                   |           |                         |  struct is returned in two   |
14004                   |           |                         |  registers create a temp     |
14005                   |           |                         |  which address is passed to  |
14006                   |           |                         |  the unbox_nullable helper.  |
14007                   |---------------------------------------------------------------------
14008                 */
14009
14010                 if (opcode == CEE_UNBOX)
14011                 {
14012                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14013                     {
14014                         // Unbox nullable helper returns a struct type.
14015                         // We need to spill it to a temp so than can take the address of it.
14016                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14017                         // further along and potetially be exploitable.
14018
14019                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14020                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14021
14022                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14023                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14024                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14025
14026                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14027                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14028                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14029                     }
14030
14031                     assert(op1->gtType == TYP_BYREF);
14032                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14033                 }
14034                 else
14035                 {
14036                     assert(opcode == CEE_UNBOX_ANY);
14037
14038                     if (helper == CORINFO_HELP_UNBOX)
14039                     {
14040                         // Normal unbox helper returns a TYP_BYREF.
14041                         impPushOnStack(op1, tiRetVal);
14042                         oper = GT_OBJ;
14043                         goto OBJ;
14044                     }
14045
14046                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14047
14048 #if FEATURE_MULTIREG_RET
14049
14050                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14051                     {
14052                         // Unbox nullable helper returns a TYP_STRUCT.
14053                         // For the multi-reg case we need to spill it to a temp so that
14054                         // we can pass the address to the unbox_nullable jit helper.
14055
14056                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14057                         lvaTable[tmp].lvIsMultiRegArg = true;
14058                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14059
14060                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14061                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14062                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14063
14064                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14065                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14066                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14067
14068                         // In this case the return value of the unbox helper is TYP_BYREF.
14069                         // Make sure the right type is placed on the operand type stack.
14070                         impPushOnStack(op1, tiRetVal);
14071
14072                         // Load the struct.
14073                         oper = GT_OBJ;
14074
14075                         assert(op1->gtType == TYP_BYREF);
14076                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14077
14078                         goto OBJ;
14079                     }
14080                     else
14081
14082 #endif // !FEATURE_MULTIREG_RET
14083
14084                     {
14085                         // If non register passable struct we have it materialized in the RetBuf.
14086                         assert(op1->gtType == TYP_STRUCT);
14087                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14088                         assert(tiRetVal.IsValueClass());
14089                     }
14090                 }
14091
14092                 impPushOnStack(op1, tiRetVal);
14093             }
14094             break;
14095
14096             case CEE_BOX:
14097             {
14098                 /* Get the Class index */
14099                 assertImp(sz == sizeof(unsigned));
14100
14101                 _impResolveToken(CORINFO_TOKENKIND_Box);
14102
14103                 JITDUMP(" %08X", resolvedToken.token);
14104
14105                 if (tiVerificationNeeded)
14106                 {
14107                     typeInfo tiActual = impStackTop().seTypeInfo;
14108                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14109
14110                     Verify(verIsBoxable(tiBox), "boxable type expected");
14111
14112                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14113                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14114                            "boxed type has unsatisfied class constraints");
14115
14116                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14117
14118                     // Observation: the following code introduces a boxed value class on the stack, but,
14119                     // according to the ECMA spec, one would simply expect: tiRetVal =
14120                     // typeInfo(TI_REF,impGetObjectClass());
14121
14122                     // Push the result back on the stack,
14123                     // even if clsHnd is a value class we want the TI_REF
14124                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14125                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14126                 }
14127
14128                 accessAllowedResult =
14129                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14130                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14131
14132                 // Note BOX can be used on things that are not value classes, in which
14133                 // case we get a NOP.  However the verifier's view of the type on the
14134                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14135                 if (!eeIsValueClass(resolvedToken.hClass))
14136                 {
14137                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14138                     break;
14139                 }
14140
14141                 // Look ahead for unbox.any
14142                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14143                 {
14144                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14145                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14146                     {
14147                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14148
14149                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14150
14151                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14152                         {
14153                             // Skip the next unbox.any instruction
14154                             sz += sizeof(mdToken) + 1;
14155                             break;
14156                         }
14157                     }
14158                 }
14159
14160                 impImportAndPushBox(&resolvedToken);
14161                 if (compDonotInline())
14162                 {
14163                     return;
14164                 }
14165             }
14166             break;
14167
14168             case CEE_SIZEOF:
14169
14170                 /* Get the Class index */
14171                 assertImp(sz == sizeof(unsigned));
14172
14173                 _impResolveToken(CORINFO_TOKENKIND_Class);
14174
14175                 JITDUMP(" %08X", resolvedToken.token);
14176
14177                 if (tiVerificationNeeded)
14178                 {
14179                     tiRetVal = typeInfo(TI_INT);
14180                 }
14181
14182                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14183                 impPushOnStack(op1, tiRetVal);
14184                 break;
14185
14186             case CEE_CASTCLASS:
14187
14188                 /* Get the Class index */
14189
14190                 assertImp(sz == sizeof(unsigned));
14191
14192                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14193
14194                 JITDUMP(" %08X", resolvedToken.token);
14195
14196                 if (!opts.IsReadyToRun())
14197                 {
14198                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14199                     if (op2 == nullptr)
14200                     { // compDonotInline()
14201                         return;
14202                     }
14203                 }
14204
14205                 if (tiVerificationNeeded)
14206                 {
14207                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14208                     // box it
14209                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14210                 }
14211
14212                 accessAllowedResult =
14213                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14214                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14215
14216                 op1 = impPopStack().val;
14217
14218             /* Pop the address and create the 'checked cast' helper call */
14219
14220             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14221             // and op2 to contain code that creates the type handle corresponding to typeRef
14222             CASTCLASS:
14223
14224 #ifdef FEATURE_READYTORUN_COMPILER
14225                 if (opts.IsReadyToRun())
14226                 {
14227                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14228                                                                     TYP_REF, gtNewArgList(op1));
14229                     usingReadyToRunHelper = (opLookup != nullptr);
14230                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14231
14232                     if (!usingReadyToRunHelper)
14233                     {
14234                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14235                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14236                         //      1) Load the context
14237                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14238                         //      3) Check the object on the stack for the type-cast
14239                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14240
14241                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14242                         if (op2 == nullptr)
14243                         { // compDonotInline()
14244                             return;
14245                         }
14246                     }
14247                 }
14248
14249                 if (!usingReadyToRunHelper)
14250 #endif
14251                 {
14252                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14253                 }
14254                 if (compDonotInline())
14255                 {
14256                     return;
14257                 }
14258
14259                 /* Push the result back on the stack */
14260                 impPushOnStack(op1, tiRetVal);
14261                 break;
14262
14263             case CEE_THROW:
14264
14265                 if (compIsForInlining())
14266                 {
14267                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14268                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14269                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14270
14271                     /* Do we have just the exception on the stack ?*/
14272
14273                     if (verCurrentState.esStackDepth != 1)
14274                     {
14275                         /* if not, just don't inline the method */
14276
14277                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14278                         return;
14279                     }
14280                 }
14281
14282                 if (tiVerificationNeeded)
14283                 {
14284                     tiRetVal = impStackTop().seTypeInfo;
14285                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14286                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14287                     {
14288                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14289                     }
14290                 }
14291
14292                 block->bbSetRunRarely(); // any block with a throw is rare
14293                 /* Pop the exception object and create the 'throw' helper call */
14294
14295                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14296
14297             EVAL_APPEND:
14298                 if (verCurrentState.esStackDepth > 0)
14299                 {
14300                     impEvalSideEffects();
14301                 }
14302
14303                 assert(verCurrentState.esStackDepth == 0);
14304
14305                 goto APPEND;
14306
14307             case CEE_RETHROW:
14308
14309                 assert(!compIsForInlining());
14310
14311                 if (info.compXcptnsCount == 0)
14312                 {
14313                     BADCODE("rethrow outside catch");
14314                 }
14315
14316                 if (tiVerificationNeeded)
14317                 {
14318                     Verify(block->hasHndIndex(), "rethrow outside catch");
14319                     if (block->hasHndIndex())
14320                     {
14321                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14322                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14323                         if (HBtab->HasFilter())
14324                         {
14325                             // we better be in the handler clause part, not the filter part
14326                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14327                                    "rethrow in filter");
14328                         }
14329                     }
14330                 }
14331
14332                 /* Create the 'rethrow' helper call */
14333
14334                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14335
14336                 goto EVAL_APPEND;
14337
14338             case CEE_INITOBJ:
14339
14340                 assertImp(sz == sizeof(unsigned));
14341
14342                 _impResolveToken(CORINFO_TOKENKIND_Class);
14343
14344                 JITDUMP(" %08X", resolvedToken.token);
14345
14346                 if (tiVerificationNeeded)
14347                 {
14348                     typeInfo tiTo    = impStackTop().seTypeInfo;
14349                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14350
14351                     Verify(tiTo.IsByRef(), "byref expected");
14352                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14353
14354                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14355                            "type operand incompatible with type of address");
14356                 }
14357
14358                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14359                 op2  = gtNewIconNode(0);                                     // Value
14360                 op1  = impPopStack().val;                                    // Dest
14361                 op1  = gtNewBlockVal(op1, size);
14362                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14363                 goto SPILL_APPEND;
14364
14365             case CEE_INITBLK:
14366
14367                 if (tiVerificationNeeded)
14368                 {
14369                     Verify(false, "bad opcode");
14370                 }
14371
14372                 op3 = impPopStack().val; // Size
14373                 op2 = impPopStack().val; // Value
14374                 op1 = impPopStack().val; // Dest
14375
14376                 if (op3->IsCnsIntOrI())
14377                 {
14378                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14379                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14380                 }
14381                 else
14382                 {
14383                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14384                     size = 0;
14385                 }
14386                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14387
14388                 goto SPILL_APPEND;
14389
14390             case CEE_CPBLK:
14391
14392                 if (tiVerificationNeeded)
14393                 {
14394                     Verify(false, "bad opcode");
14395                 }
14396                 op3 = impPopStack().val; // Size
14397                 op2 = impPopStack().val; // Src
14398                 op1 = impPopStack().val; // Dest
14399
14400                 if (op3->IsCnsIntOrI())
14401                 {
14402                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14403                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14404                 }
14405                 else
14406                 {
14407                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14408                     size = 0;
14409                 }
14410                 if (op2->OperGet() == GT_ADDR)
14411                 {
14412                     op2 = op2->gtOp.gtOp1;
14413                 }
14414                 else
14415                 {
14416                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14417                 }
14418
14419                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14420                 goto SPILL_APPEND;
14421
14422             case CEE_CPOBJ:
14423
14424                 assertImp(sz == sizeof(unsigned));
14425
14426                 _impResolveToken(CORINFO_TOKENKIND_Class);
14427
14428                 JITDUMP(" %08X", resolvedToken.token);
14429
14430                 if (tiVerificationNeeded)
14431                 {
14432                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14433                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14434                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14435
14436                     Verify(tiFrom.IsByRef(), "expected byref source");
14437                     Verify(tiTo.IsByRef(), "expected byref destination");
14438
14439                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14440                            "type of source address incompatible with type operand");
14441                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14442                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14443                            "type operand incompatible with type of destination address");
14444                 }
14445
14446                 if (!eeIsValueClass(resolvedToken.hClass))
14447                 {
14448                     op1 = impPopStack().val; // address to load from
14449
14450                     impBashVarAddrsToI(op1);
14451
14452                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14453
14454                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14455                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14456
14457                     impPushOnStackNoType(op1);
14458                     opcode = CEE_STIND_REF;
14459                     lclTyp = TYP_REF;
14460                     goto STIND_POST_VERIFY;
14461                 }
14462
14463                 op2 = impPopStack().val; // Src
14464                 op1 = impPopStack().val; // Dest
14465                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14466                 goto SPILL_APPEND;
14467
14468             case CEE_STOBJ:
14469             {
14470                 assertImp(sz == sizeof(unsigned));
14471
14472                 _impResolveToken(CORINFO_TOKENKIND_Class);
14473
14474                 JITDUMP(" %08X", resolvedToken.token);
14475
14476                 if (eeIsValueClass(resolvedToken.hClass))
14477                 {
14478                     lclTyp = TYP_STRUCT;
14479                 }
14480                 else
14481                 {
14482                     lclTyp = TYP_REF;
14483                 }
14484
14485                 if (tiVerificationNeeded)
14486                 {
14487
14488                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14489
14490                     // Make sure we have a good looking byref
14491                     Verify(tiPtr.IsByRef(), "pointer not byref");
14492                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14493                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14494                     {
14495                         compUnsafeCastUsed = true;
14496                     }
14497
14498                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14499                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14500
14501                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14502                     {
14503                         Verify(false, "type of value incompatible with type operand");
14504                         compUnsafeCastUsed = true;
14505                     }
14506
14507                     if (!tiCompatibleWith(argVal, ptrVal, false))
14508                     {
14509                         Verify(false, "type operand incompatible with type of address");
14510                         compUnsafeCastUsed = true;
14511                     }
14512                 }
14513                 else
14514                 {
14515                     compUnsafeCastUsed = true;
14516                 }
14517
14518                 if (lclTyp == TYP_REF)
14519                 {
14520                     opcode = CEE_STIND_REF;
14521                     goto STIND_POST_VERIFY;
14522                 }
14523
14524                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14525                 if (impIsPrimitive(jitTyp))
14526                 {
14527                     lclTyp = JITtype2varType(jitTyp);
14528                     goto STIND_POST_VERIFY;
14529                 }
14530
14531                 op2 = impPopStack().val; // Value
14532                 op1 = impPopStack().val; // Ptr
14533
14534                 assertImp(varTypeIsStruct(op2));
14535
14536                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14537                 goto SPILL_APPEND;
14538             }
14539
14540             case CEE_MKREFANY:
14541
14542                 assert(!compIsForInlining());
14543
14544                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14545                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14546
14547                 JITDUMP("disabling struct promotion because of mkrefany\n");
14548                 fgNoStructPromotion = true;
14549
14550                 oper = GT_MKREFANY;
14551                 assertImp(sz == sizeof(unsigned));
14552
14553                 _impResolveToken(CORINFO_TOKENKIND_Class);
14554
14555                 JITDUMP(" %08X", resolvedToken.token);
14556
14557                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14558                 if (op2 == nullptr)
14559                 { // compDonotInline()
14560                     return;
14561                 }
14562
14563                 if (tiVerificationNeeded)
14564                 {
14565                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14566                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14567
14568                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14569                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14570                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14571                 }
14572
14573                 accessAllowedResult =
14574                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14575                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14576
14577                 op1 = impPopStack().val;
14578
14579                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14580                 // But JIT32 allowed it, so we continue to allow it.
14581                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14582
14583                 // MKREFANY returns a struct.  op2 is the class token.
14584                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14585
14586                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14587                 break;
14588
14589             case CEE_LDOBJ:
14590             {
14591                 oper = GT_OBJ;
14592                 assertImp(sz == sizeof(unsigned));
14593
14594                 _impResolveToken(CORINFO_TOKENKIND_Class);
14595
14596                 JITDUMP(" %08X", resolvedToken.token);
14597
14598             OBJ:
14599
14600                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14601
14602                 if (tiVerificationNeeded)
14603                 {
14604                     typeInfo tiPtr = impStackTop().seTypeInfo;
14605
14606                     // Make sure we have a byref
14607                     if (!tiPtr.IsByRef())
14608                     {
14609                         Verify(false, "pointer not byref");
14610                         compUnsafeCastUsed = true;
14611                     }
14612                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14613
14614                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14615                     {
14616                         Verify(false, "type of address incompatible with type operand");
14617                         compUnsafeCastUsed = true;
14618                     }
14619                     tiRetVal.NormaliseForStack();
14620                 }
14621                 else
14622                 {
14623                     compUnsafeCastUsed = true;
14624                 }
14625
14626                 if (eeIsValueClass(resolvedToken.hClass))
14627                 {
14628                     lclTyp = TYP_STRUCT;
14629                 }
14630                 else
14631                 {
14632                     lclTyp = TYP_REF;
14633                     opcode = CEE_LDIND_REF;
14634                     goto LDIND_POST_VERIFY;
14635                 }
14636
14637                 op1 = impPopStack().val;
14638
14639                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14640
14641                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14642                 if (impIsPrimitive(jitTyp))
14643                 {
14644                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14645
14646                     // Could point anywhere, example a boxed class static int
14647                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14648                     assertImp(varTypeIsArithmetic(op1->gtType));
14649                 }
14650                 else
14651                 {
14652                     // OBJ returns a struct
14653                     // and an inline argument which is the class token of the loaded obj
14654                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14655                 }
14656                 op1->gtFlags |= GTF_EXCEPT;
14657
14658                 impPushOnStack(op1, tiRetVal);
14659                 break;
14660             }
14661
14662             case CEE_LDLEN:
14663                 if (tiVerificationNeeded)
14664                 {
14665                     typeInfo tiArray = impStackTop().seTypeInfo;
14666                     Verify(verIsSDArray(tiArray), "bad array");
14667                     tiRetVal = typeInfo(TI_INT);
14668                 }
14669
14670                 op1 = impPopStack().val;
14671                 if (!opts.MinOpts() && !opts.compDbgCode)
14672                 {
14673                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14674                     GenTreeArrLen* arrLen =
14675                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14676
14677                     /* Mark the block as containing a length expression */
14678
14679                     if (op1->gtOper == GT_LCL_VAR)
14680                     {
14681                         block->bbFlags |= BBF_HAS_IDX_LEN;
14682                     }
14683
14684                     op1 = arrLen;
14685                 }
14686                 else
14687                 {
14688                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14689                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14690                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14691                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14692                     op1->gtFlags |= GTF_IND_ARR_LEN;
14693                 }
14694
14695                 /* An indirection will cause a GPF if the address is null */
14696                 op1->gtFlags |= GTF_EXCEPT;
14697
14698                 /* Push the result back on the stack */
14699                 impPushOnStack(op1, tiRetVal);
14700                 break;
14701
14702             case CEE_BREAK:
14703                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14704                 goto SPILL_APPEND;
14705
14706             case CEE_NOP:
14707                 if (opts.compDbgCode)
14708                 {
14709                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14710                     goto SPILL_APPEND;
14711                 }
14712                 break;
14713
14714             /******************************** NYI *******************************/
14715
14716             case 0xCC:
14717                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14718
14719             case CEE_ILLEGAL:
14720             case CEE_MACRO_END:
14721
14722             default:
14723                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14724         }
14725
14726         codeAddr += sz;
14727         prevOpcode = opcode;
14728
14729         prefixFlags = 0;
14730         assert(!insertLdloc || opcode == CEE_DUP);
14731     }
14732
14733     assert(!insertLdloc);
14734
14735     return;
14736 #undef _impResolveToken
14737 }
14738 #ifdef _PREFAST_
14739 #pragma warning(pop)
14740 #endif
14741
14742 // Push a local/argument treeon the operand stack
14743 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14744 {
14745     tiRetVal.NormaliseForStack();
14746
14747     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14748     {
14749         tiRetVal.SetUninitialisedObjRef();
14750     }
14751
14752     impPushOnStack(op, tiRetVal);
14753 }
14754
14755 // Load a local/argument on the operand stack
14756 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14757 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14758 {
14759     var_types lclTyp;
14760
14761     if (lvaTable[lclNum].lvNormalizeOnLoad())
14762     {
14763         lclTyp = lvaGetRealType(lclNum);
14764     }
14765     else
14766     {
14767         lclTyp = lvaGetActualType(lclNum);
14768     }
14769
14770     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14771 }
14772
14773 // Load an argument on the operand stack
14774 // Shared by the various CEE_LDARG opcodes
14775 // ilArgNum is the argument index as specified in IL.
14776 // It will be mapped to the correct lvaTable index
14777 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14778 {
14779     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14780
14781     if (compIsForInlining())
14782     {
14783         if (ilArgNum >= info.compArgsCount)
14784         {
14785             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14786             return;
14787         }
14788
14789         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14790                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14791     }
14792     else
14793     {
14794         if (ilArgNum >= info.compArgsCount)
14795         {
14796             BADCODE("Bad IL");
14797         }
14798
14799         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14800
14801         if (lclNum == info.compThisArg)
14802         {
14803             lclNum = lvaArg0Var;
14804         }
14805
14806         impLoadVar(lclNum, offset);
14807     }
14808 }
14809
14810 // Load a local on the operand stack
14811 // Shared by the various CEE_LDLOC opcodes
14812 // ilLclNum is the local index as specified in IL.
14813 // It will be mapped to the correct lvaTable index
14814 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14815 {
14816     if (tiVerificationNeeded)
14817     {
14818         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14819         Verify(info.compInitMem, "initLocals not set");
14820     }
14821
14822     if (compIsForInlining())
14823     {
14824         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14825         {
14826             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14827             return;
14828         }
14829
14830         // Get the local type
14831         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14832
14833         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14834
14835         /* Have we allocated a temp for this local? */
14836
14837         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14838
14839         // All vars of inlined methods should be !lvNormalizeOnLoad()
14840
14841         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14842         lclTyp = genActualType(lclTyp);
14843
14844         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14845     }
14846     else
14847     {
14848         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14849         {
14850             BADCODE("Bad IL");
14851         }
14852
14853         unsigned lclNum = info.compArgsCount + ilLclNum;
14854
14855         impLoadVar(lclNum, offset);
14856     }
14857 }
14858
14859 #ifdef _TARGET_ARM_
14860 /**************************************************************************************
14861  *
14862  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14863  *  dst struct, because struct promotion will turn it into a float/double variable while
14864  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14865  *  a float, but there is nothing that might prevent us from doing so. The tree however
14866  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14867  *
14868  *  tmpNum - the lcl dst variable num that is a struct.
14869  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14870  *  hClass - the type handle for the struct variable.
14871  *
14872  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14873  *        however, we could do a codegen of transferring from int to float registers
14874  *        (transfer, not a cast.)
14875  *
14876  */
14877 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14878 {
14879     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14880     {
14881         int       hfaSlots = GetHfaCount(hClass);
14882         var_types hfaType  = GetHfaType(hClass);
14883
14884         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14885         // type: struct/float at importer because the ABI calls out return in integer registers.
14886         // We don't want struct promotion to replace an expression like this:
14887         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14888         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14889         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14890             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14891         {
14892             // Make sure this struct type stays as struct so we can receive the call in a struct.
14893             lvaTable[tmpNum].lvIsMultiRegRet = true;
14894         }
14895     }
14896 }
14897 #endif // _TARGET_ARM_
14898
14899 #if FEATURE_MULTIREG_RET
14900 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14901 {
14902     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14903     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14904     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14905
14906     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14907     ret->gtFlags |= GTF_DONT_CSE;
14908
14909     assert(IsMultiRegReturnedType(hClass));
14910
14911     // Mark the var so that fields are not promoted and stay together.
14912     lvaTable[tmpNum].lvIsMultiRegRet = true;
14913
14914     return ret;
14915 }
14916 #endif // FEATURE_MULTIREG_RET
14917
14918 // do import for a return
14919 // returns false if inlining was aborted
14920 // opcode can be ret or call in the case of a tail.call
14921 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14922 {
14923     if (tiVerificationNeeded)
14924     {
14925         verVerifyThisPtrInitialised();
14926
14927         unsigned expectedStack = 0;
14928         if (info.compRetType != TYP_VOID)
14929         {
14930             typeInfo tiVal = impStackTop().seTypeInfo;
14931             typeInfo tiDeclared =
14932                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14933
14934             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14935
14936             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14937             expectedStack = 1;
14938         }
14939         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14940     }
14941
14942     GenTree*             op2       = nullptr;
14943     GenTree*             op1       = nullptr;
14944     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14945
14946     if (info.compRetType != TYP_VOID)
14947     {
14948         StackEntry se = impPopStack(retClsHnd);
14949         op2           = se.val;
14950
14951         if (!compIsForInlining())
14952         {
14953             impBashVarAddrsToI(op2);
14954             op2 = impImplicitIorI4Cast(op2, info.compRetType);
14955             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14956             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14957                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14958                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14959                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14960                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14961
14962 #ifdef DEBUG
14963             if (opts.compGcChecks && info.compRetType == TYP_REF)
14964             {
14965                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
14966                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14967                 // one-return BB.
14968
14969                 assert(op2->gtType == TYP_REF);
14970
14971                 // confirm that the argument is a GC pointer (for debugging (GC stress))
14972                 GenTreeArgList* args = gtNewArgList(op2);
14973                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14974
14975                 if (verbose)
14976                 {
14977                     printf("\ncompGcChecks tree:\n");
14978                     gtDispTree(op2);
14979                 }
14980             }
14981 #endif
14982         }
14983         else
14984         {
14985             // inlinee's stack should be empty now.
14986             assert(verCurrentState.esStackDepth == 0);
14987
14988 #ifdef DEBUG
14989             if (verbose)
14990             {
14991                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
14992                 gtDispTree(op2);
14993             }
14994 #endif
14995
14996             // Make sure the type matches the original call.
14997
14998             var_types returnType       = genActualType(op2->gtType);
14999             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15000             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15001             {
15002                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15003             }
15004
15005             if (returnType != originalCallType)
15006             {
15007                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15008                 return false;
15009             }
15010
15011             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15012             // expression. At this point, retExpr could already be set if there are multiple
15013             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15014             // the other blocks already set it. If there is only a single return block,
15015             // retExpr shouldn't be set. However, this is not true if we reimport a block
15016             // with a return. In that case, retExpr will be set, then the block will be
15017             // reimported, but retExpr won't get cleared as part of setting the block to
15018             // be reimported. The reimported retExpr value should be the same, so even if
15019             // we don't unconditionally overwrite it, it shouldn't matter.
15020             if (info.compRetNativeType != TYP_STRUCT)
15021             {
15022                 // compRetNativeType is not TYP_STRUCT.
15023                 // This implies it could be either a scalar type or SIMD vector type or
15024                 // a struct type that can be normalized to a scalar type.
15025
15026                 if (varTypeIsStruct(info.compRetType))
15027                 {
15028                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15029                     // adjust the type away from struct to integral
15030                     // and no normalizing
15031                     op2 = impFixupStructReturnType(op2, retClsHnd);
15032                 }
15033                 else
15034                 {
15035                     // Do we have to normalize?
15036                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15037                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15038                         fgCastNeeded(op2, fncRealRetType))
15039                     {
15040                         // Small-typed return values are normalized by the callee
15041                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15042                     }
15043                 }
15044
15045                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15046                 {
15047                     assert(info.compRetNativeType != TYP_VOID &&
15048                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15049
15050                     // This is a bit of a workaround...
15051                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15052                     // not a struct (for example, the struct is composed of exactly one int, and the native
15053                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15054                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15055                     // to the *native* return type), and at least one of the return blocks is the result of
15056                     // a call, then we have a problem. The situation is like this (from a failed test case):
15057                     //
15058                     // inliner:
15059                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15060                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15061                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15062                     //
15063                     // inlinee:
15064                     //      ...
15065                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15066                     //      ret
15067                     //      ...
15068                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15069                     //      object&, class System.Func`1<!!0>)
15070                     //      ret
15071                     //
15072                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15073                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15074                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15075                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15076                     //
15077                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15078                     // native return type, which is what it will be set to eventually. We generate the
15079                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15080                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15081
15082                     bool restoreType = false;
15083                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15084                     {
15085                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15086                         op2->gtType = info.compRetNativeType;
15087                         restoreType = true;
15088                     }
15089
15090                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15091                                      (unsigned)CHECK_SPILL_ALL);
15092
15093                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15094
15095                     if (restoreType)
15096                     {
15097                         op2->gtType = TYP_STRUCT; // restore it to what it was
15098                     }
15099
15100                     op2 = tmpOp2;
15101
15102 #ifdef DEBUG
15103                     if (impInlineInfo->retExpr)
15104                     {
15105                         // Some other block(s) have seen the CEE_RET first.
15106                         // Better they spilled to the same temp.
15107                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15108                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15109                     }
15110 #endif
15111                 }
15112
15113 #ifdef DEBUG
15114                 if (verbose)
15115                 {
15116                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15117                     gtDispTree(op2);
15118                 }
15119 #endif
15120
15121                 // Report the return expression
15122                 impInlineInfo->retExpr = op2;
15123             }
15124             else
15125             {
15126                 // compRetNativeType is TYP_STRUCT.
15127                 // This implies that struct return via RetBuf arg or multi-reg struct return
15128
15129                 GenTreePtr iciCall = impInlineInfo->iciCall;
15130                 assert(iciCall->gtOper == GT_CALL);
15131
15132                 // Assign the inlinee return into a spill temp.
15133                 // spill temp only exists if there are multiple return points
15134                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15135                 {
15136                     // in this case we have to insert multiple struct copies to the temp
15137                     // and the retexpr is just the temp.
15138                     assert(info.compRetNativeType != TYP_VOID);
15139                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15140
15141                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15142                                      (unsigned)CHECK_SPILL_ALL);
15143                 }
15144
15145 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15146 #if defined(_TARGET_ARM_)
15147                 // TODO-ARM64-NYI: HFA
15148                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15149                 // next ifdefs could be refactored in a single method with the ifdef inside.
15150                 if (IsHfa(retClsHnd))
15151                 {
15152 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15153 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15154                 ReturnTypeDesc retTypeDesc;
15155                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15156                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15157
15158                 if (retRegCount != 0)
15159                 {
15160                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15161                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15162                     // max allowed.)
15163                     assert(retRegCount == MAX_RET_REG_COUNT);
15164                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15165                     CLANG_FORMAT_COMMENT_ANCHOR;
15166 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15167
15168                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15169                     {
15170                         if (!impInlineInfo->retExpr)
15171                         {
15172 #if defined(_TARGET_ARM_)
15173                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15174 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15175                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15176                             impInlineInfo->retExpr =
15177                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15178 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15179                         }
15180                     }
15181                     else
15182                     {
15183                         impInlineInfo->retExpr = op2;
15184                     }
15185                 }
15186                 else
15187 #elif defined(_TARGET_ARM64_)
15188                 ReturnTypeDesc retTypeDesc;
15189                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15190                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15191
15192                 if (retRegCount != 0)
15193                 {
15194                     assert(!iciCall->AsCall()->HasRetBufArg());
15195                     assert(retRegCount >= 2);
15196                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15197                     {
15198                         if (!impInlineInfo->retExpr)
15199                         {
15200                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15201                             impInlineInfo->retExpr =
15202                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15203                         }
15204                     }
15205                     else
15206                     {
15207                         impInlineInfo->retExpr = op2;
15208                     }
15209                 }
15210                 else
15211 #endif // defined(_TARGET_ARM64_)
15212                 {
15213                     assert(iciCall->AsCall()->HasRetBufArg());
15214                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15215                     // spill temp only exists if there are multiple return points
15216                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15217                     {
15218                         // if this is the first return we have seen set the retExpr
15219                         if (!impInlineInfo->retExpr)
15220                         {
15221                             impInlineInfo->retExpr =
15222                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15223                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15224                         }
15225                     }
15226                     else
15227                     {
15228                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15229                     }
15230                 }
15231             }
15232         }
15233     }
15234
15235     if (compIsForInlining())
15236     {
15237         return true;
15238     }
15239
15240     if (info.compRetType == TYP_VOID)
15241     {
15242         // return void
15243         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15244     }
15245     else if (info.compRetBuffArg != BAD_VAR_NUM)
15246     {
15247         // Assign value to return buff (first param)
15248         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15249
15250         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15251         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15252
15253         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15254         CLANG_FORMAT_COMMENT_ANCHOR;
15255
15256 #if defined(_TARGET_AMD64_)
15257
15258         // x64 (System V and Win64) calling convention requires to
15259         // return the implicit return buffer explicitly (in RAX).
15260         // Change the return type to be BYREF.
15261         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15262 #else  // !defined(_TARGET_AMD64_)
15263         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15264         // In such case the return value of the function is changed to BYREF.
15265         // If profiler hook is not needed the return type of the function is TYP_VOID.
15266         if (compIsProfilerHookNeeded())
15267         {
15268             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15269         }
15270         else
15271         {
15272             // return void
15273             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15274         }
15275 #endif // !defined(_TARGET_AMD64_)
15276     }
15277     else if (varTypeIsStruct(info.compRetType))
15278     {
15279 #if !FEATURE_MULTIREG_RET
15280         // For both ARM architectures the HFA native types are maintained as structs.
15281         // Also on System V AMD64 the multireg structs returns are also left as structs.
15282         noway_assert(info.compRetNativeType != TYP_STRUCT);
15283 #endif
15284         op2 = impFixupStructReturnType(op2, retClsHnd);
15285         // return op2
15286         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15287     }
15288     else
15289     {
15290         // return op2
15291         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15292     }
15293
15294     // We must have imported a tailcall and jumped to RET
15295     if (prefixFlags & PREFIX_TAILCALL)
15296     {
15297 #ifndef _TARGET_AMD64_
15298         // Jit64 compat:
15299         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15300         //      tail.call
15301         //      pop
15302         //      ret
15303         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15304 #endif
15305
15306         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15307
15308         // impImportCall() would have already appended TYP_VOID calls
15309         if (info.compRetType == TYP_VOID)
15310         {
15311             return true;
15312         }
15313     }
15314
15315     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15316 #ifdef DEBUG
15317     // Remember at which BC offset the tree was finished
15318     impNoteLastILoffs();
15319 #endif
15320     return true;
15321 }
15322
15323 /*****************************************************************************
15324  *  Mark the block as unimported.
15325  *  Note that the caller is responsible for calling impImportBlockPending(),
15326  *  with the appropriate stack-state
15327  */
15328
15329 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15330 {
15331 #ifdef DEBUG
15332     if (verbose && (block->bbFlags & BBF_IMPORTED))
15333     {
15334         printf("\nBB%02u will be reimported\n", block->bbNum);
15335     }
15336 #endif
15337
15338     block->bbFlags &= ~BBF_IMPORTED;
15339 }
15340
15341 /*****************************************************************************
15342  *  Mark the successors of the given block as unimported.
15343  *  Note that the caller is responsible for calling impImportBlockPending()
15344  *  for all the successors, with the appropriate stack-state.
15345  */
15346
15347 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15348 {
15349     for (unsigned i = 0; i < block->NumSucc(); i++)
15350     {
15351         impReimportMarkBlock(block->GetSucc(i));
15352     }
15353 }
15354
15355 /*****************************************************************************
15356  *
15357  *  Filter wrapper to handle only passed in exception code
15358  *  from it).
15359  */
15360
15361 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15362 {
15363     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15364     {
15365         return EXCEPTION_EXECUTE_HANDLER;
15366     }
15367
15368     return EXCEPTION_CONTINUE_SEARCH;
15369 }
15370
15371 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15372 {
15373     assert(block->hasTryIndex());
15374     assert(!compIsForInlining());
15375
15376     unsigned  tryIndex = block->getTryIndex();
15377     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15378
15379     if (isTryStart)
15380     {
15381         assert(block->bbFlags & BBF_TRY_BEG);
15382
15383         // The Stack must be empty
15384         //
15385         if (block->bbStkDepth != 0)
15386         {
15387             BADCODE("Evaluation stack must be empty on entry into a try block");
15388         }
15389     }
15390
15391     // Save the stack contents, we'll need to restore it later
15392     //
15393     SavedStack blockState;
15394     impSaveStackState(&blockState, false);
15395
15396     while (HBtab != nullptr)
15397     {
15398         if (isTryStart)
15399         {
15400             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15401             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15402             //
15403             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15404             {
15405                 // We  trigger an invalid program exception here unless we have a try/fault region.
15406                 //
15407                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15408                 {
15409                     BADCODE(
15410                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15411                 }
15412                 else
15413                 {
15414                     // Allow a try/fault region to proceed.
15415                     assert(HBtab->HasFaultHandler());
15416                 }
15417             }
15418
15419             /* Recursively process the handler block */
15420             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15421
15422             //  Construct the proper verification stack state
15423             //   either empty or one that contains just
15424             //   the Exception Object that we are dealing with
15425             //
15426             verCurrentState.esStackDepth = 0;
15427
15428             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15429             {
15430                 CORINFO_CLASS_HANDLE clsHnd;
15431
15432                 if (HBtab->HasFilter())
15433                 {
15434                     clsHnd = impGetObjectClass();
15435                 }
15436                 else
15437                 {
15438                     CORINFO_RESOLVED_TOKEN resolvedToken;
15439
15440                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15441                     resolvedToken.tokenScope   = info.compScopeHnd;
15442                     resolvedToken.token        = HBtab->ebdTyp;
15443                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15444                     info.compCompHnd->resolveToken(&resolvedToken);
15445
15446                     clsHnd = resolvedToken.hClass;
15447                 }
15448
15449                 // push catch arg the stack, spill to a temp if necessary
15450                 // Note: can update HBtab->ebdHndBeg!
15451                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15452             }
15453
15454             // Queue up the handler for importing
15455             //
15456             impImportBlockPending(hndBegBB);
15457
15458             if (HBtab->HasFilter())
15459             {
15460                 /* @VERIFICATION : Ideally the end of filter state should get
15461                    propagated to the catch handler, this is an incompleteness,
15462                    but is not a security/compliance issue, since the only
15463                    interesting state is the 'thisInit' state.
15464                    */
15465
15466                 verCurrentState.esStackDepth = 0;
15467
15468                 BasicBlock* filterBB = HBtab->ebdFilter;
15469
15470                 // push catch arg the stack, spill to a temp if necessary
15471                 // Note: can update HBtab->ebdFilter!
15472                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15473
15474                 impImportBlockPending(filterBB);
15475             }
15476         }
15477         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15478         {
15479             /* Recursively process the handler block */
15480
15481             verCurrentState.esStackDepth = 0;
15482
15483             // Queue up the fault handler for importing
15484             //
15485             impImportBlockPending(HBtab->ebdHndBeg);
15486         }
15487
15488         // Now process our enclosing try index (if any)
15489         //
15490         tryIndex = HBtab->ebdEnclosingTryIndex;
15491         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15492         {
15493             HBtab = nullptr;
15494         }
15495         else
15496         {
15497             HBtab = ehGetDsc(tryIndex);
15498         }
15499     }
15500
15501     // Restore the stack contents
15502     impRestoreStackState(&blockState);
15503 }
15504
15505 //***************************************************************
15506 // Import the instructions for the given basic block.  Perform
15507 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15508 // time, or whose verification pre-state is changed.
15509
15510 #ifdef _PREFAST_
15511 #pragma warning(push)
15512 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15513 #endif
15514 void Compiler::impImportBlock(BasicBlock* block)
15515 {
15516     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15517     // handle them specially. In particular, there is no IL to import for them, but we do need
15518     // to mark them as imported and put their successors on the pending import list.
15519     if (block->bbFlags & BBF_INTERNAL)
15520     {
15521         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15522         block->bbFlags |= BBF_IMPORTED;
15523
15524         for (unsigned i = 0; i < block->NumSucc(); i++)
15525         {
15526             impImportBlockPending(block->GetSucc(i));
15527         }
15528
15529         return;
15530     }
15531
15532     bool markImport;
15533
15534     assert(block);
15535
15536     /* Make the block globaly available */
15537
15538     compCurBB = block;
15539
15540 #ifdef DEBUG
15541     /* Initialize the debug variables */
15542     impCurOpcName = "unknown";
15543     impCurOpcOffs = block->bbCodeOffs;
15544 #endif
15545
15546     /* Set the current stack state to the merged result */
15547     verResetCurrentState(block, &verCurrentState);
15548
15549     /* Now walk the code and import the IL into GenTrees */
15550
15551     struct FilterVerificationExceptionsParam
15552     {
15553         Compiler*   pThis;
15554         BasicBlock* block;
15555     };
15556     FilterVerificationExceptionsParam param;
15557
15558     param.pThis = this;
15559     param.block = block;
15560
15561     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15562     {
15563         /* @VERIFICATION : For now, the only state propagation from try
15564            to it's handler is "thisInit" state (stack is empty at start of try).
15565            In general, for state that we track in verification, we need to
15566            model the possibility that an exception might happen at any IL
15567            instruction, so we really need to merge all states that obtain
15568            between IL instructions in a try block into the start states of
15569            all handlers.
15570
15571            However we do not allow the 'this' pointer to be uninitialized when
15572            entering most kinds try regions (only try/fault are allowed to have
15573            an uninitialized this pointer on entry to the try)
15574
15575            Fortunately, the stack is thrown away when an exception
15576            leads to a handler, so we don't have to worry about that.
15577            We DO, however, have to worry about the "thisInit" state.
15578            But only for the try/fault case.
15579
15580            The only allowed transition is from TIS_Uninit to TIS_Init.
15581
15582            So for a try/fault region for the fault handler block
15583            we will merge the start state of the try begin
15584            and the post-state of each block that is part of this try region
15585         */
15586
15587         // merge the start state of the try begin
15588         //
15589         if (pParam->block->bbFlags & BBF_TRY_BEG)
15590         {
15591             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15592         }
15593
15594         pParam->pThis->impImportBlockCode(pParam->block);
15595
15596         // As discussed above:
15597         // merge the post-state of each block that is part of this try region
15598         //
15599         if (pParam->block->hasTryIndex())
15600         {
15601             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15602         }
15603     }
15604     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15605     {
15606         verHandleVerificationFailure(block DEBUGARG(false));
15607     }
15608     PAL_ENDTRY
15609
15610     if (compDonotInline())
15611     {
15612         return;
15613     }
15614
15615     assert(!compDonotInline());
15616
15617     markImport = false;
15618
15619 SPILLSTACK:
15620
15621     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15622     bool        reimportSpillClique = false;
15623     BasicBlock* tgtBlock            = nullptr;
15624
15625     /* If the stack is non-empty, we might have to spill its contents */
15626
15627     if (verCurrentState.esStackDepth != 0)
15628     {
15629         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15630                                   // on the stack, its lifetime is hard to determine, simply
15631                                   // don't reuse such temps.
15632
15633         GenTreePtr addStmt = nullptr;
15634
15635         /* Do the successors of 'block' have any other predecessors ?
15636            We do not want to do some of the optimizations related to multiRef
15637            if we can reimport blocks */
15638
15639         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15640
15641         switch (block->bbJumpKind)
15642         {
15643             case BBJ_COND:
15644
15645                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15646
15647                 assert(impTreeLast);
15648                 assert(impTreeLast->gtOper == GT_STMT);
15649                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15650
15651                 addStmt     = impTreeLast;
15652                 impTreeLast = impTreeLast->gtPrev;
15653
15654                 /* Note if the next block has more than one ancestor */
15655
15656                 multRef |= block->bbNext->bbRefs;
15657
15658                 /* Does the next block have temps assigned? */
15659
15660                 baseTmp  = block->bbNext->bbStkTempsIn;
15661                 tgtBlock = block->bbNext;
15662
15663                 if (baseTmp != NO_BASE_TMP)
15664                 {
15665                     break;
15666                 }
15667
15668                 /* Try the target of the jump then */
15669
15670                 multRef |= block->bbJumpDest->bbRefs;
15671                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15672                 tgtBlock = block->bbJumpDest;
15673                 break;
15674
15675             case BBJ_ALWAYS:
15676                 multRef |= block->bbJumpDest->bbRefs;
15677                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15678                 tgtBlock = block->bbJumpDest;
15679                 break;
15680
15681             case BBJ_NONE:
15682                 multRef |= block->bbNext->bbRefs;
15683                 baseTmp  = block->bbNext->bbStkTempsIn;
15684                 tgtBlock = block->bbNext;
15685                 break;
15686
15687             case BBJ_SWITCH:
15688
15689                 BasicBlock** jmpTab;
15690                 unsigned     jmpCnt;
15691
15692                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15693
15694                 assert(impTreeLast);
15695                 assert(impTreeLast->gtOper == GT_STMT);
15696                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15697
15698                 addStmt     = impTreeLast;
15699                 impTreeLast = impTreeLast->gtPrev;
15700
15701                 jmpCnt = block->bbJumpSwt->bbsCount;
15702                 jmpTab = block->bbJumpSwt->bbsDstTab;
15703
15704                 do
15705                 {
15706                     tgtBlock = (*jmpTab);
15707
15708                     multRef |= tgtBlock->bbRefs;
15709
15710                     // Thanks to spill cliques, we should have assigned all or none
15711                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15712                     baseTmp = tgtBlock->bbStkTempsIn;
15713                     if (multRef > 1)
15714                     {
15715                         break;
15716                     }
15717                 } while (++jmpTab, --jmpCnt);
15718
15719                 break;
15720
15721             case BBJ_CALLFINALLY:
15722             case BBJ_EHCATCHRET:
15723             case BBJ_RETURN:
15724             case BBJ_EHFINALLYRET:
15725             case BBJ_EHFILTERRET:
15726             case BBJ_THROW:
15727                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15728                 break;
15729
15730             default:
15731                 noway_assert(!"Unexpected bbJumpKind");
15732                 break;
15733         }
15734
15735         assert(multRef >= 1);
15736
15737         /* Do we have a base temp number? */
15738
15739         bool newTemps = (baseTmp == NO_BASE_TMP);
15740
15741         if (newTemps)
15742         {
15743             /* Grab enough temps for the whole stack */
15744             baseTmp = impGetSpillTmpBase(block);
15745         }
15746
15747         /* Spill all stack entries into temps */
15748         unsigned level, tempNum;
15749
15750         JITDUMP("\nSpilling stack entries into temps\n");
15751         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15752         {
15753             GenTreePtr tree = verCurrentState.esStack[level].val;
15754
15755             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15756                the other. This should merge to a byref in unverifiable code.
15757                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15758                successor would be imported assuming there was a TYP_I_IMPL on
15759                the stack. Thus the value would not get GC-tracked. Hence,
15760                change the temp to TYP_BYREF and reimport the successors.
15761                Note: We should only allow this in unverifiable code.
15762             */
15763             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15764             {
15765                 lvaTable[tempNum].lvType = TYP_BYREF;
15766                 impReimportMarkSuccessors(block);
15767                 markImport = true;
15768             }
15769
15770 #ifdef _TARGET_64BIT_
15771             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15772             {
15773                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15774                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15775                 {
15776                     // Merge the current state into the entry state of block;
15777                     // the call to verMergeEntryStates must have changed
15778                     // the entry state of the block by merging the int local var
15779                     // and the native-int stack entry.
15780                     bool changed = false;
15781                     if (verMergeEntryStates(tgtBlock, &changed))
15782                     {
15783                         impRetypeEntryStateTemps(tgtBlock);
15784                         impReimportBlockPending(tgtBlock);
15785                         assert(changed);
15786                     }
15787                     else
15788                     {
15789                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15790                         break;
15791                     }
15792                 }
15793
15794                 // Some other block in the spill clique set this to "int", but now we have "native int".
15795                 // Change the type and go back to re-import any blocks that used the wrong type.
15796                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15797                 reimportSpillClique      = true;
15798             }
15799             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15800             {
15801                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15802                 // Insert a sign-extension to "native int" so we match the clique.
15803                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15804             }
15805
15806             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15807             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15808             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15809             // behavior instead of asserting and then generating bad code (where we save/restore the
15810             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15811             // imported already, we need to change the type of the local and reimport the spill clique.
15812             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15813             // the 'byref' size.
15814             if (!tiVerificationNeeded)
15815             {
15816                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15817                 {
15818                     // Some other block in the spill clique set this to "int", but now we have "byref".
15819                     // Change the type and go back to re-import any blocks that used the wrong type.
15820                     lvaTable[tempNum].lvType = TYP_BYREF;
15821                     reimportSpillClique      = true;
15822                 }
15823                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15824                 {
15825                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15826                     // Insert a sign-extension to "native int" so we match the clique size.
15827                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15828                 }
15829             }
15830 #endif // _TARGET_64BIT_
15831
15832 #if FEATURE_X87_DOUBLES
15833             // X87 stack doesn't differentiate between float/double
15834             // so promoting is no big deal.
15835             // For everybody else keep it as float until we have a collision and then promote
15836             // Just like for x64's TYP_INT<->TYP_I_IMPL
15837
15838             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15839             {
15840                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15841             }
15842
15843 #else // !FEATURE_X87_DOUBLES
15844
15845             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15846             {
15847                 // Some other block in the spill clique set this to "float", but now we have "double".
15848                 // Change the type and go back to re-import any blocks that used the wrong type.
15849                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15850                 reimportSpillClique      = true;
15851             }
15852             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15853             {
15854                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15855                 // Insert a cast to "double" so we match the clique.
15856                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15857             }
15858
15859 #endif // FEATURE_X87_DOUBLES
15860
15861             /* If addStmt has a reference to tempNum (can only happen if we
15862                are spilling to the temps already used by a previous block),
15863                we need to spill addStmt */
15864
15865             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15866             {
15867                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15868
15869                 if (addTree->gtOper == GT_JTRUE)
15870                 {
15871                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15872                     assert(relOp->OperIsCompare());
15873
15874                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15875
15876                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15877                     {
15878                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15879                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15880                         type              = genActualType(lvaTable[temp].TypeGet());
15881                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15882                     }
15883
15884                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15885                     {
15886                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15887                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15888                         type              = genActualType(lvaTable[temp].TypeGet());
15889                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15890                     }
15891                 }
15892                 else
15893                 {
15894                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15895
15896                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15897                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15898                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15899                 }
15900             }
15901
15902             /* Spill the stack entry, and replace with the temp */
15903
15904             if (!impSpillStackEntry(level, tempNum
15905 #ifdef DEBUG
15906                                     ,
15907                                     true, "Spill Stack Entry"
15908 #endif
15909                                     ))
15910             {
15911                 if (markImport)
15912                 {
15913                     BADCODE("bad stack state");
15914                 }
15915
15916                 // Oops. Something went wrong when spilling. Bad code.
15917                 verHandleVerificationFailure(block DEBUGARG(true));
15918
15919                 goto SPILLSTACK;
15920             }
15921         }
15922
15923         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15924
15925         if (addStmt)
15926         {
15927             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15928         }
15929     }
15930
15931     // Some of the append/spill logic works on compCurBB
15932
15933     assert(compCurBB == block);
15934
15935     /* Save the tree list in the block */
15936     impEndTreeList(block);
15937
15938     // impEndTreeList sets BBF_IMPORTED on the block
15939     // We do *NOT* want to set it later than this because
15940     // impReimportSpillClique might clear it if this block is both a
15941     // predecessor and successor in the current spill clique
15942     assert(block->bbFlags & BBF_IMPORTED);
15943
15944     // If we had a int/native int, or float/double collision, we need to re-import
15945     if (reimportSpillClique)
15946     {
15947         // This will re-import all the successors of block (as well as each of their predecessors)
15948         impReimportSpillClique(block);
15949
15950         // For blocks that haven't been imported yet, we still need to mark them as pending import.
15951         for (unsigned i = 0; i < block->NumSucc(); i++)
15952         {
15953             BasicBlock* succ = block->GetSucc(i);
15954             if ((succ->bbFlags & BBF_IMPORTED) == 0)
15955             {
15956                 impImportBlockPending(succ);
15957             }
15958         }
15959     }
15960     else // the normal case
15961     {
15962         // otherwise just import the successors of block
15963
15964         /* Does this block jump to any other blocks? */
15965         for (unsigned i = 0; i < block->NumSucc(); i++)
15966         {
15967             impImportBlockPending(block->GetSucc(i));
15968         }
15969     }
15970 }
15971 #ifdef _PREFAST_
15972 #pragma warning(pop)
15973 #endif
15974
15975 /*****************************************************************************/
15976 //
15977 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15978 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15979 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
15980 // (its "pre-state").
15981
15982 void Compiler::impImportBlockPending(BasicBlock* block)
15983 {
15984 #ifdef DEBUG
15985     if (verbose)
15986     {
15987         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15988     }
15989 #endif
15990
15991     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15992     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15993     // (When we're doing verification, we always attempt the merge to detect verification errors.)
15994
15995     // If the block has not been imported, add to pending set.
15996     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15997
15998     // Initialize bbEntryState just the first time we try to add this block to the pending list
15999     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16000     // We use NULL to indicate the 'common' state to avoid memory allocation
16001     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16002         (impGetPendingBlockMember(block) == 0))
16003     {
16004         verInitBBEntryState(block, &verCurrentState);
16005         assert(block->bbStkDepth == 0);
16006         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16007         assert(addToPending);
16008         assert(impGetPendingBlockMember(block) == 0);
16009     }
16010     else
16011     {
16012         // The stack should have the same height on entry to the block from all its predecessors.
16013         if (block->bbStkDepth != verCurrentState.esStackDepth)
16014         {
16015 #ifdef DEBUG
16016             char buffer[400];
16017             sprintf_s(buffer, sizeof(buffer),
16018                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16019                       "Previous depth was %d, current depth is %d",
16020                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16021                       verCurrentState.esStackDepth);
16022             buffer[400 - 1] = 0;
16023             NO_WAY(buffer);
16024 #else
16025             NO_WAY("Block entered with different stack depths");
16026 #endif
16027         }
16028
16029         // Additionally, if we need to verify, merge the verification state.
16030         if (tiVerificationNeeded)
16031         {
16032             // Merge the current state into the entry state of block; if this does not change the entry state
16033             // by merging, do not add the block to the pending-list.
16034             bool changed = false;
16035             if (!verMergeEntryStates(block, &changed))
16036             {
16037                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16038                 addToPending = true; // We will pop it off, and check the flag set above.
16039             }
16040             else if (changed)
16041             {
16042                 addToPending = true;
16043
16044                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16045             }
16046         }
16047
16048         if (!addToPending)
16049         {
16050             return;
16051         }
16052
16053         if (block->bbStkDepth > 0)
16054         {
16055             // We need to fix the types of any spill temps that might have changed:
16056             //   int->native int, float->double, int->byref, etc.
16057             impRetypeEntryStateTemps(block);
16058         }
16059
16060         // OK, we must add to the pending list, if it's not already in it.
16061         if (impGetPendingBlockMember(block) != 0)
16062         {
16063             return;
16064         }
16065     }
16066
16067     // Get an entry to add to the pending list
16068
16069     PendingDsc* dsc;
16070
16071     if (impPendingFree)
16072     {
16073         // We can reuse one of the freed up dscs.
16074         dsc            = impPendingFree;
16075         impPendingFree = dsc->pdNext;
16076     }
16077     else
16078     {
16079         // We have to create a new dsc
16080         dsc = new (this, CMK_Unknown) PendingDsc;
16081     }
16082
16083     dsc->pdBB                 = block;
16084     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16085     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16086
16087     // Save the stack trees for later
16088
16089     if (verCurrentState.esStackDepth)
16090     {
16091         impSaveStackState(&dsc->pdSavedStack, false);
16092     }
16093
16094     // Add the entry to the pending list
16095
16096     dsc->pdNext    = impPendingList;
16097     impPendingList = dsc;
16098     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16099
16100     // Various assertions require us to now to consider the block as not imported (at least for
16101     // the final time...)
16102     block->bbFlags &= ~BBF_IMPORTED;
16103
16104 #ifdef DEBUG
16105     if (verbose && 0)
16106     {
16107         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16108     }
16109 #endif
16110 }
16111
16112 /*****************************************************************************/
16113 //
16114 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16115 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16116 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16117
16118 void Compiler::impReimportBlockPending(BasicBlock* block)
16119 {
16120     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16121
16122     assert(block->bbFlags & BBF_IMPORTED);
16123
16124     // OK, we must add to the pending list, if it's not already in it.
16125     if (impGetPendingBlockMember(block) != 0)
16126     {
16127         return;
16128     }
16129
16130     // Get an entry to add to the pending list
16131
16132     PendingDsc* dsc;
16133
16134     if (impPendingFree)
16135     {
16136         // We can reuse one of the freed up dscs.
16137         dsc            = impPendingFree;
16138         impPendingFree = dsc->pdNext;
16139     }
16140     else
16141     {
16142         // We have to create a new dsc
16143         dsc = new (this, CMK_ImpStack) PendingDsc;
16144     }
16145
16146     dsc->pdBB = block;
16147
16148     if (block->bbEntryState)
16149     {
16150         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16151         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16152         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16153     }
16154     else
16155     {
16156         dsc->pdThisPtrInit        = TIS_Bottom;
16157         dsc->pdSavedStack.ssDepth = 0;
16158         dsc->pdSavedStack.ssTrees = nullptr;
16159     }
16160
16161     // Add the entry to the pending list
16162
16163     dsc->pdNext    = impPendingList;
16164     impPendingList = dsc;
16165     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16166
16167     // Various assertions require us to now to consider the block as not imported (at least for
16168     // the final time...)
16169     block->bbFlags &= ~BBF_IMPORTED;
16170
16171 #ifdef DEBUG
16172     if (verbose && 0)
16173     {
16174         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16175     }
16176 #endif
16177 }
16178
16179 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16180 {
16181     if (comp->impBlockListNodeFreeList == nullptr)
16182     {
16183         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16184     }
16185     else
16186     {
16187         BlockListNode* res             = comp->impBlockListNodeFreeList;
16188         comp->impBlockListNodeFreeList = res->m_next;
16189         return res;
16190     }
16191 }
16192
16193 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16194 {
16195     node->m_next             = impBlockListNodeFreeList;
16196     impBlockListNodeFreeList = node;
16197 }
16198
16199 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16200 {
16201     bool toDo = true;
16202
16203     noway_assert(!fgComputePredsDone);
16204     if (!fgCheapPredsValid)
16205     {
16206         fgComputeCheapPreds();
16207     }
16208
16209     BlockListNode* succCliqueToDo = nullptr;
16210     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16211     while (toDo)
16212     {
16213         toDo = false;
16214         // Look at the successors of every member of the predecessor to-do list.
16215         while (predCliqueToDo != nullptr)
16216         {
16217             BlockListNode* node = predCliqueToDo;
16218             predCliqueToDo      = node->m_next;
16219             BasicBlock* blk     = node->m_blk;
16220             FreeBlockListNode(node);
16221
16222             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16223             {
16224                 BasicBlock* succ = blk->GetSucc(succNum);
16225                 // If it's not already in the clique, add it, and also add it
16226                 // as a member of the successor "toDo" set.
16227                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16228                 {
16229                     callback->Visit(SpillCliqueSucc, succ);
16230                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16231                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16232                     toDo           = true;
16233                 }
16234             }
16235         }
16236         // Look at the predecessors of every member of the successor to-do list.
16237         while (succCliqueToDo != nullptr)
16238         {
16239             BlockListNode* node = succCliqueToDo;
16240             succCliqueToDo      = node->m_next;
16241             BasicBlock* blk     = node->m_blk;
16242             FreeBlockListNode(node);
16243
16244             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16245             {
16246                 BasicBlock* predBlock = pred->block;
16247                 // If it's not already in the clique, add it, and also add it
16248                 // as a member of the predecessor "toDo" set.
16249                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16250                 {
16251                     callback->Visit(SpillCliquePred, predBlock);
16252                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16253                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16254                     toDo           = true;
16255                 }
16256             }
16257         }
16258     }
16259
16260     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16261     // miss walking back to include the predecessor we started from.
16262     // This most likely cause: missing or out of date bbPreds
16263     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16264 }
16265
16266 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16267 {
16268     if (predOrSucc == SpillCliqueSucc)
16269     {
16270         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16271         blk->bbStkTempsIn = m_baseTmp;
16272     }
16273     else
16274     {
16275         assert(predOrSucc == SpillCliquePred);
16276         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16277         blk->bbStkTempsOut = m_baseTmp;
16278     }
16279 }
16280
16281 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16282 {
16283     // For Preds we could be a little smarter and just find the existing store
16284     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16285     // just re-import the whole block (just like we do for successors)
16286
16287     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16288     {
16289         // If we haven't imported this block and we're not going to (because it isn't on
16290         // the pending list) then just ignore it for now.
16291
16292         // This block has either never been imported (EntryState == NULL) or it failed
16293         // verification. Neither state requires us to force it to be imported now.
16294         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16295         return;
16296     }
16297
16298     // For successors we have a valid verCurrentState, so just mark them for reimport
16299     // the 'normal' way
16300     // Unlike predecessors, we *DO* need to reimport the current block because the
16301     // initial import had the wrong entry state types.
16302     // Similarly, blocks that are currently on the pending list, still need to call
16303     // impImportBlockPending to fixup their entry state.
16304     if (predOrSucc == SpillCliqueSucc)
16305     {
16306         m_pComp->impReimportMarkBlock(blk);
16307
16308         // Set the current stack state to that of the blk->bbEntryState
16309         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16310         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16311
16312         m_pComp->impImportBlockPending(blk);
16313     }
16314     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16315     {
16316         // As described above, we are only visiting predecessors so they can
16317         // add the appropriate casts, since we have already done that for the current
16318         // block, it does not need to be reimported.
16319         // Nor do we need to reimport blocks that are still pending, but not yet
16320         // imported.
16321         //
16322         // For predecessors, we have no state to seed the EntryState, so we just have
16323         // to assume the existing one is correct.
16324         // If the block is also a successor, it will get the EntryState properly
16325         // updated when it is visited as a successor in the above "if" block.
16326         assert(predOrSucc == SpillCliquePred);
16327         m_pComp->impReimportBlockPending(blk);
16328     }
16329 }
16330
16331 // Re-type the incoming lclVar nodes to match the varDsc.
16332 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16333 {
16334     if (blk->bbEntryState != nullptr)
16335     {
16336         EntryState* es = blk->bbEntryState;
16337         for (unsigned level = 0; level < es->esStackDepth; level++)
16338         {
16339             GenTreePtr tree = es->esStack[level].val;
16340             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16341             {
16342                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16343                 noway_assert(lclNum < lvaCount);
16344                 LclVarDsc* varDsc              = lvaTable + lclNum;
16345                 es->esStack[level].val->gtType = varDsc->TypeGet();
16346             }
16347         }
16348     }
16349 }
16350
16351 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16352 {
16353     if (block->bbStkTempsOut != NO_BASE_TMP)
16354     {
16355         return block->bbStkTempsOut;
16356     }
16357
16358 #ifdef DEBUG
16359     if (verbose)
16360     {
16361         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16362     }
16363 #endif // DEBUG
16364
16365     // Otherwise, choose one, and propagate to all members of the spill clique.
16366     // Grab enough temps for the whole stack.
16367     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16368     SetSpillTempsBase callback(baseTmp);
16369
16370     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16371     // to one spill clique, and similarly can only be the sucessor to one spill clique
16372     impWalkSpillCliqueFromPred(block, &callback);
16373
16374     return baseTmp;
16375 }
16376
16377 void Compiler::impReimportSpillClique(BasicBlock* block)
16378 {
16379 #ifdef DEBUG
16380     if (verbose)
16381     {
16382         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16383     }
16384 #endif // DEBUG
16385
16386     // If we get here, it is because this block is already part of a spill clique
16387     // and one predecessor had an outgoing live stack slot of type int, and this
16388     // block has an outgoing live stack slot of type native int.
16389     // We need to reset these before traversal because they have already been set
16390     // by the previous walk to determine all the members of the spill clique.
16391     impInlineRoot()->impSpillCliquePredMembers.Reset();
16392     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16393
16394     ReimportSpillClique callback(this);
16395
16396     impWalkSpillCliqueFromPred(block, &callback);
16397 }
16398
16399 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16400 // a copy of "srcState", cloning tree pointers as required.
16401 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16402 {
16403     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16404     {
16405         block->bbEntryState = nullptr;
16406         return;
16407     }
16408
16409     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16410
16411     // block->bbEntryState.esRefcount = 1;
16412
16413     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16414     block->bbEntryState->thisInitialized = TIS_Bottom;
16415
16416     if (srcState->esStackDepth > 0)
16417     {
16418         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16419         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16420
16421         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16422         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16423         {
16424             GenTreePtr tree                         = srcState->esStack[level].val;
16425             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16426         }
16427     }
16428
16429     if (verTrackObjCtorInitState)
16430     {
16431         verSetThisInit(block, srcState->thisInitialized);
16432     }
16433
16434     return;
16435 }
16436
16437 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16438 {
16439     assert(tis != TIS_Bottom); // Precondition.
16440     if (block->bbEntryState == nullptr)
16441     {
16442         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16443     }
16444
16445     block->bbEntryState->thisInitialized = tis;
16446 }
16447
16448 /*
16449  * Resets the current state to the state at the start of the basic block
16450  */
16451 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16452 {
16453
16454     if (block->bbEntryState == nullptr)
16455     {
16456         destState->esStackDepth    = 0;
16457         destState->thisInitialized = TIS_Bottom;
16458         return;
16459     }
16460
16461     destState->esStackDepth = block->bbEntryState->esStackDepth;
16462
16463     if (destState->esStackDepth > 0)
16464     {
16465         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16466
16467         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16468     }
16469
16470     destState->thisInitialized = block->bbThisOnEntry();
16471
16472     return;
16473 }
16474
16475 ThisInitState BasicBlock::bbThisOnEntry()
16476 {
16477     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16478 }
16479
16480 unsigned BasicBlock::bbStackDepthOnEntry()
16481 {
16482     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16483 }
16484
16485 void BasicBlock::bbSetStack(void* stackBuffer)
16486 {
16487     assert(bbEntryState);
16488     assert(stackBuffer);
16489     bbEntryState->esStack = (StackEntry*)stackBuffer;
16490 }
16491
16492 StackEntry* BasicBlock::bbStackOnEntry()
16493 {
16494     assert(bbEntryState);
16495     return bbEntryState->esStack;
16496 }
16497
16498 void Compiler::verInitCurrentState()
16499 {
16500     verTrackObjCtorInitState        = FALSE;
16501     verCurrentState.thisInitialized = TIS_Bottom;
16502
16503     if (tiVerificationNeeded)
16504     {
16505         // Track this ptr initialization
16506         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16507         {
16508             verTrackObjCtorInitState        = TRUE;
16509             verCurrentState.thisInitialized = TIS_Uninit;
16510         }
16511     }
16512
16513     // initialize stack info
16514
16515     verCurrentState.esStackDepth = 0;
16516     assert(verCurrentState.esStack != nullptr);
16517
16518     // copy current state to entry state of first BB
16519     verInitBBEntryState(fgFirstBB, &verCurrentState);
16520 }
16521
16522 Compiler* Compiler::impInlineRoot()
16523 {
16524     if (impInlineInfo == nullptr)
16525     {
16526         return this;
16527     }
16528     else
16529     {
16530         return impInlineInfo->InlineRoot;
16531     }
16532 }
16533
16534 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16535 {
16536     if (predOrSucc == SpillCliquePred)
16537     {
16538         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16539     }
16540     else
16541     {
16542         assert(predOrSucc == SpillCliqueSucc);
16543         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16544     }
16545 }
16546
16547 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16548 {
16549     if (predOrSucc == SpillCliquePred)
16550     {
16551         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16552     }
16553     else
16554     {
16555         assert(predOrSucc == SpillCliqueSucc);
16556         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16557     }
16558 }
16559
16560 /*****************************************************************************
16561  *
16562  *  Convert the instrs ("import") into our internal format (trees). The
16563  *  basic flowgraph has already been constructed and is passed in.
16564  */
16565
16566 void Compiler::impImport(BasicBlock* method)
16567 {
16568 #ifdef DEBUG
16569     if (verbose)
16570     {
16571         printf("*************** In impImport() for %s\n", info.compFullName);
16572     }
16573 #endif
16574
16575     /* Allocate the stack contents */
16576
16577     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16578     {
16579         /* Use local variable, don't waste time allocating on the heap */
16580
16581         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16582         verCurrentState.esStack = impSmallStack;
16583     }
16584     else
16585     {
16586         impStkSize              = info.compMaxStack;
16587         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16588     }
16589
16590     // initialize the entry state at start of method
16591     verInitCurrentState();
16592
16593     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16594     Compiler* inlineRoot = impInlineRoot();
16595     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16596     {
16597         // We have initialized these previously, but to size 0.  Make them larger.
16598         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16599         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16600         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16601     }
16602     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16603     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16604     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16605     impBlockListNodeFreeList = nullptr;
16606
16607 #ifdef DEBUG
16608     impLastILoffsStmt   = nullptr;
16609     impNestedStackSpill = false;
16610 #endif
16611     impBoxTemp = BAD_VAR_NUM;
16612
16613     impPendingList = impPendingFree = nullptr;
16614
16615     /* Add the entry-point to the worker-list */
16616
16617     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16618     // from EH normalization.
16619     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16620     // out.
16621     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16622     {
16623         // Treat these as imported.
16624         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16625         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16626         method->bbFlags |= BBF_IMPORTED;
16627     }
16628
16629     impImportBlockPending(method);
16630
16631     /* Import blocks in the worker-list until there are no more */
16632
16633     while (impPendingList)
16634     {
16635         /* Remove the entry at the front of the list */
16636
16637         PendingDsc* dsc = impPendingList;
16638         impPendingList  = impPendingList->pdNext;
16639         impSetPendingBlockMember(dsc->pdBB, 0);
16640
16641         /* Restore the stack state */
16642
16643         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16644         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16645         if (verCurrentState.esStackDepth)
16646         {
16647             impRestoreStackState(&dsc->pdSavedStack);
16648         }
16649
16650         /* Add the entry to the free list for reuse */
16651
16652         dsc->pdNext    = impPendingFree;
16653         impPendingFree = dsc;
16654
16655         /* Now import the block */
16656
16657         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16658         {
16659
16660 #ifdef _TARGET_64BIT_
16661             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16662             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16663             // method for further explanation on why we raise this exception instead of making the jitted
16664             // code throw the verification exception during execution.
16665             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16666             {
16667                 BADCODE("Basic block marked as not verifiable");
16668             }
16669             else
16670 #endif // _TARGET_64BIT_
16671             {
16672                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16673                 impEndTreeList(dsc->pdBB);
16674             }
16675         }
16676         else
16677         {
16678             impImportBlock(dsc->pdBB);
16679
16680             if (compDonotInline())
16681             {
16682                 return;
16683             }
16684             if (compIsForImportOnly() && !tiVerificationNeeded)
16685             {
16686                 return;
16687             }
16688         }
16689     }
16690
16691 #ifdef DEBUG
16692     if (verbose && info.compXcptnsCount)
16693     {
16694         printf("\nAfter impImport() added block for try,catch,finally");
16695         fgDispBasicBlocks();
16696         printf("\n");
16697     }
16698
16699     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16700     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16701     {
16702         block->bbFlags &= ~BBF_VISITED;
16703     }
16704 #endif
16705
16706     assert(!compIsForInlining() || !tiVerificationNeeded);
16707 }
16708
16709 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16710 // The invariant here is that if it's not a ref or a method and has a class handle
16711 // it's a valuetype
16712 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16713 {
16714     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16715     {
16716         return true;
16717     }
16718     else
16719     {
16720         return false;
16721     }
16722 }
16723
16724 /*****************************************************************************
16725  *  Check to see if the tree is the address of a local or
16726     the address of a field in a local.
16727
16728     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16729
16730  */
16731
16732 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16733 {
16734     if (tree->gtOper != GT_ADDR)
16735     {
16736         return FALSE;
16737     }
16738
16739     GenTreePtr op = tree->gtOp.gtOp1;
16740     while (op->gtOper == GT_FIELD)
16741     {
16742         op = op->gtField.gtFldObj;
16743         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16744         {
16745             op = op->gtOp.gtOp1;
16746         }
16747         else
16748         {
16749             return false;
16750         }
16751     }
16752
16753     if (op->gtOper == GT_LCL_VAR)
16754     {
16755         *lclVarTreeOut = op;
16756         return TRUE;
16757     }
16758     else
16759     {
16760         return FALSE;
16761     }
16762 }
16763
16764 //------------------------------------------------------------------------
16765 // impMakeDiscretionaryInlineObservations: make observations that help
16766 // determine the profitability of a discretionary inline
16767 //
16768 // Arguments:
16769 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16770 //    inlineResult -- InlineResult accumulating information about this inline
16771 //
16772 // Notes:
16773 //    If inlining or prejitting the root, this method also makes
16774 //    various observations about the method that factor into inline
16775 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16776
16777 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16778 {
16779     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16780            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16781            );
16782
16783     // If we're really inlining, we should just have one result in play.
16784     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16785
16786     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16787     // to the trouble of estimating the native code size. Even if it did, it
16788     // shouldn't be relying on the result of this method.
16789     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16790
16791     // Note if the caller contains NEWOBJ or NEWARR.
16792     Compiler* rootCompiler = impInlineRoot();
16793
16794     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16795     {
16796         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16797     }
16798
16799     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16800     {
16801         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16802     }
16803
16804     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16805     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16806
16807     if (isSpecialMethod)
16808     {
16809         if (calleeIsStatic)
16810         {
16811             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16812         }
16813         else
16814         {
16815             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16816         }
16817     }
16818     else if (!calleeIsStatic)
16819     {
16820         // Callee is an instance method.
16821         //
16822         // Check if the callee has the same 'this' as the root.
16823         if (pInlineInfo != nullptr)
16824         {
16825             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16826             assert(thisArg);
16827             bool isSameThis = impIsThis(thisArg);
16828             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16829         }
16830     }
16831
16832     // Note if the callee's class is a promotable struct
16833     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16834     {
16835         lvaStructPromotionInfo structPromotionInfo;
16836         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16837         if (structPromotionInfo.canPromote)
16838         {
16839             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16840         }
16841     }
16842
16843 #ifdef FEATURE_SIMD
16844
16845     // Note if this method is has SIMD args or return value
16846     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16847     {
16848         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16849     }
16850
16851 #endif // FEATURE_SIMD
16852
16853     // Roughly classify callsite frequency.
16854     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16855
16856     // If this is a prejit root, or a maximally hot block...
16857     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16858     {
16859         frequency = InlineCallsiteFrequency::HOT;
16860     }
16861     // No training data.  Look for loop-like things.
16862     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16863     // However, give it to things nearby.
16864     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16865              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16866     {
16867         frequency = InlineCallsiteFrequency::LOOP;
16868     }
16869     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16870     {
16871         frequency = InlineCallsiteFrequency::WARM;
16872     }
16873     // Now modify the multiplier based on where we're called from.
16874     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16875     {
16876         frequency = InlineCallsiteFrequency::RARE;
16877     }
16878     else
16879     {
16880         frequency = InlineCallsiteFrequency::BORING;
16881     }
16882
16883     // Also capture the block weight of the call site.  In the prejit
16884     // root case, assume there's some hot call site for this method.
16885     unsigned weight = 0;
16886
16887     if (pInlineInfo != nullptr)
16888     {
16889         weight = pInlineInfo->iciBlock->bbWeight;
16890     }
16891     else
16892     {
16893         weight = BB_MAX_WEIGHT;
16894     }
16895
16896     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16897     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16898 }
16899
16900 /*****************************************************************************
16901  This method makes STATIC inlining decision based on the IL code.
16902  It should not make any inlining decision based on the context.
16903  If forceInline is true, then the inlining decision should not depend on
16904  performance heuristics (code size, etc.).
16905  */
16906
16907 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16908                               CORINFO_METHOD_INFO*  methInfo,
16909                               bool                  forceInline,
16910                               InlineResult*         inlineResult)
16911 {
16912     unsigned codeSize = methInfo->ILCodeSize;
16913
16914     // We shouldn't have made up our minds yet...
16915     assert(!inlineResult->IsDecided());
16916
16917     if (methInfo->EHcount)
16918     {
16919         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16920         return;
16921     }
16922
16923     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16924     {
16925         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16926         return;
16927     }
16928
16929     // For now we don't inline varargs (import code can't handle it)
16930
16931     if (methInfo->args.isVarArg())
16932     {
16933         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16934         return;
16935     }
16936
16937     // Reject if it has too many locals.
16938     // This is currently an implementation limit due to fixed-size arrays in the
16939     // inline info, rather than a performance heuristic.
16940
16941     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16942
16943     if (methInfo->locals.numArgs > MAX_INL_LCLS)
16944     {
16945         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16946         return;
16947     }
16948
16949     // Make sure there aren't too many arguments.
16950     // This is currently an implementation limit due to fixed-size arrays in the
16951     // inline info, rather than a performance heuristic.
16952
16953     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16954
16955     if (methInfo->args.numArgs > MAX_INL_ARGS)
16956     {
16957         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16958         return;
16959     }
16960
16961     // Note force inline state
16962
16963     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16964
16965     // Note IL code size
16966
16967     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16968
16969     if (inlineResult->IsFailure())
16970     {
16971         return;
16972     }
16973
16974     // Make sure maxstack is not too big
16975
16976     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16977
16978     if (inlineResult->IsFailure())
16979     {
16980         return;
16981     }
16982 }
16983
16984 /*****************************************************************************
16985  */
16986
16987 void Compiler::impCheckCanInline(GenTreePtr             call,
16988                                  CORINFO_METHOD_HANDLE  fncHandle,
16989                                  unsigned               methAttr,
16990                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
16991                                  InlineCandidateInfo**  ppInlineCandidateInfo,
16992                                  InlineResult*          inlineResult)
16993 {
16994     // Either EE or JIT might throw exceptions below.
16995     // If that happens, just don't inline the method.
16996
16997     struct Param
16998     {
16999         Compiler*              pThis;
17000         GenTreePtr             call;
17001         CORINFO_METHOD_HANDLE  fncHandle;
17002         unsigned               methAttr;
17003         CORINFO_CONTEXT_HANDLE exactContextHnd;
17004         InlineResult*          result;
17005         InlineCandidateInfo**  ppInlineCandidateInfo;
17006     } param = {nullptr};
17007
17008     param.pThis                 = this;
17009     param.call                  = call;
17010     param.fncHandle             = fncHandle;
17011     param.methAttr              = methAttr;
17012     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17013     param.result                = inlineResult;
17014     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17015
17016     bool success = eeRunWithErrorTrap<Param>(
17017         [](Param* pParam) {
17018             DWORD                  dwRestrictions = 0;
17019             CorInfoInitClassResult initClassResult;
17020
17021 #ifdef DEBUG
17022             const char* methodName;
17023             const char* className;
17024             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17025
17026             if (JitConfig.JitNoInline())
17027             {
17028                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17029                 goto _exit;
17030             }
17031 #endif
17032
17033             /* Try to get the code address/size for the method */
17034
17035             CORINFO_METHOD_INFO methInfo;
17036             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17037             {
17038                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17039                 goto _exit;
17040             }
17041
17042             bool forceInline;
17043             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17044
17045             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17046
17047             if (pParam->result->IsFailure())
17048             {
17049                 assert(pParam->result->IsNever());
17050                 goto _exit;
17051             }
17052
17053             // Speculatively check if initClass() can be done.
17054             // If it can be done, we will try to inline the method. If inlining
17055             // succeeds, then we will do the non-speculative initClass() and commit it.
17056             // If this speculative call to initClass() fails, there is no point
17057             // trying to inline this method.
17058             initClassResult =
17059                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17060                                                            pParam->exactContextHnd /* context */,
17061                                                            TRUE /* speculative */);
17062
17063             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17064             {
17065                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17066                 goto _exit;
17067             }
17068
17069             // Given the EE the final say in whether to inline or not.
17070             // This should be last since for verifiable code, this can be expensive
17071
17072             /* VM Inline check also ensures that the method is verifiable if needed */
17073             CorInfoInline vmResult;
17074             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17075                                                                   &dwRestrictions);
17076
17077             if (vmResult == INLINE_FAIL)
17078             {
17079                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17080             }
17081             else if (vmResult == INLINE_NEVER)
17082             {
17083                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17084             }
17085
17086             if (pParam->result->IsFailure())
17087             {
17088                 // Make sure not to report this one.  It was already reported by the VM.
17089                 pParam->result->SetReported();
17090                 goto _exit;
17091             }
17092
17093             // check for unsupported inlining restrictions
17094             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17095
17096             if (dwRestrictions & INLINE_SAME_THIS)
17097             {
17098                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17099                 assert(thisArg);
17100
17101                 if (!pParam->pThis->impIsThis(thisArg))
17102                 {
17103                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17104                     goto _exit;
17105                 }
17106             }
17107
17108             /* Get the method properties */
17109
17110             CORINFO_CLASS_HANDLE clsHandle;
17111             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17112             unsigned clsAttr;
17113             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17114
17115             /* Get the return type */
17116
17117             var_types fncRetType;
17118             fncRetType = pParam->call->TypeGet();
17119
17120 #ifdef DEBUG
17121             var_types fncRealRetType;
17122             fncRealRetType = JITtype2varType(methInfo.args.retType);
17123
17124             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17125                    // <BUGNUM> VSW 288602 </BUGNUM>
17126                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17127                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17128                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17129 #endif
17130
17131             //
17132             // Allocate an InlineCandidateInfo structure
17133             //
17134             InlineCandidateInfo* pInfo;
17135             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17136
17137             pInfo->dwRestrictions  = dwRestrictions;
17138             pInfo->methInfo        = methInfo;
17139             pInfo->methAttr        = pParam->methAttr;
17140             pInfo->clsHandle       = clsHandle;
17141             pInfo->clsAttr         = clsAttr;
17142             pInfo->fncRetType      = fncRetType;
17143             pInfo->exactContextHnd = pParam->exactContextHnd;
17144             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17145             pInfo->initClassResult = initClassResult;
17146
17147             *(pParam->ppInlineCandidateInfo) = pInfo;
17148
17149         _exit:;
17150         },
17151         &param);
17152     if (!success)
17153     {
17154         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17155     }
17156 }
17157
17158 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17159                                       GenTreePtr    curArgVal,
17160                                       unsigned      argNum,
17161                                       InlineResult* inlineResult)
17162 {
17163     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17164
17165     if (curArgVal->gtOper == GT_MKREFANY)
17166     {
17167         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17168         return;
17169     }
17170
17171     inlCurArgInfo->argNode = curArgVal;
17172
17173     GenTreePtr lclVarTree;
17174     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17175     {
17176         inlCurArgInfo->argIsByRefToStructLocal = true;
17177 #ifdef FEATURE_SIMD
17178         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17179         {
17180             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17181         }
17182 #endif // FEATURE_SIMD
17183     }
17184
17185     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17186     {
17187         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17188         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17189     }
17190
17191     if (curArgVal->gtOper == GT_LCL_VAR)
17192     {
17193         inlCurArgInfo->argIsLclVar = true;
17194
17195         /* Remember the "original" argument number */
17196         curArgVal->gtLclVar.gtLclILoffs = argNum;
17197     }
17198
17199     if ((curArgVal->OperKind() & GTK_CONST) ||
17200         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17201     {
17202         inlCurArgInfo->argIsInvariant = true;
17203         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17204         {
17205             /* Abort, but do not mark as not inlinable */
17206             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17207             return;
17208         }
17209     }
17210
17211     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17212     {
17213         inlCurArgInfo->argHasLdargaOp = true;
17214     }
17215
17216 #ifdef DEBUG
17217     if (verbose)
17218     {
17219         if (inlCurArgInfo->argIsThis)
17220         {
17221             printf("thisArg:");
17222         }
17223         else
17224         {
17225             printf("\nArgument #%u:", argNum);
17226         }
17227         if (inlCurArgInfo->argIsLclVar)
17228         {
17229             printf(" is a local var");
17230         }
17231         if (inlCurArgInfo->argIsInvariant)
17232         {
17233             printf(" is a constant");
17234         }
17235         if (inlCurArgInfo->argHasGlobRef)
17236         {
17237             printf(" has global refs");
17238         }
17239         if (inlCurArgInfo->argHasSideEff)
17240         {
17241             printf(" has side effects");
17242         }
17243         if (inlCurArgInfo->argHasLdargaOp)
17244         {
17245             printf(" has ldarga effect");
17246         }
17247         if (inlCurArgInfo->argHasStargOp)
17248         {
17249             printf(" has starg effect");
17250         }
17251         if (inlCurArgInfo->argIsByRefToStructLocal)
17252         {
17253             printf(" is byref to a struct local");
17254         }
17255
17256         printf("\n");
17257         gtDispTree(curArgVal);
17258         printf("\n");
17259     }
17260 #endif
17261 }
17262
17263 /*****************************************************************************
17264  *
17265  */
17266
17267 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17268 {
17269     assert(!compIsForInlining());
17270
17271     GenTreePtr           call         = pInlineInfo->iciCall;
17272     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17273     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17274     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17275     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17276     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17277
17278     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17279
17280     /* init the argument stuct */
17281
17282     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17283
17284     /* Get hold of the 'this' pointer and the argument list proper */
17285
17286     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17287     GenTreePtr argList = call->gtCall.gtCallArgs;
17288     unsigned   argCnt  = 0; // Count of the arguments
17289
17290     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17291
17292     if (thisArg)
17293     {
17294         inlArgInfo[0].argIsThis = true;
17295
17296         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17297
17298         if (inlineResult->IsFailure())
17299         {
17300             return;
17301         }
17302
17303         /* Increment the argument count */
17304         argCnt++;
17305     }
17306
17307     /* Record some information about each of the arguments */
17308     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17309
17310 #if USER_ARGS_COME_LAST
17311     unsigned typeCtxtArg = thisArg ? 1 : 0;
17312 #else  // USER_ARGS_COME_LAST
17313     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17314 #endif // USER_ARGS_COME_LAST
17315
17316     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17317     {
17318         if (argTmp == argList && hasRetBuffArg)
17319         {
17320             continue;
17321         }
17322
17323         // Ignore the type context argument
17324         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17325         {
17326             typeCtxtArg = 0xFFFFFFFF;
17327             continue;
17328         }
17329
17330         assert(argTmp->gtOper == GT_LIST);
17331         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17332
17333         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17334
17335         if (inlineResult->IsFailure())
17336         {
17337             return;
17338         }
17339
17340         /* Increment the argument count */
17341         argCnt++;
17342     }
17343
17344     /* Make sure we got the arg number right */
17345     assert(argCnt == methInfo->args.totalILArgs());
17346
17347 #ifdef FEATURE_SIMD
17348     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17349 #endif // FEATURE_SIMD
17350
17351     /* We have typeless opcodes, get type information from the signature */
17352
17353     if (thisArg)
17354     {
17355         var_types sigType;
17356
17357         if (clsAttr & CORINFO_FLG_VALUECLASS)
17358         {
17359             sigType = TYP_BYREF;
17360         }
17361         else
17362         {
17363             sigType = TYP_REF;
17364         }
17365
17366         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17367         lclVarInfo[0].lclHasLdlocaOp = false;
17368
17369 #ifdef FEATURE_SIMD
17370         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17371         // the inlining multiplier) for anything in that assembly.
17372         // But we only need to normalize it if it is a TYP_STRUCT
17373         // (which we need to do even if we have already set foundSIMDType).
17374         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17375         {
17376             if (sigType == TYP_STRUCT)
17377             {
17378                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17379             }
17380             foundSIMDType = true;
17381         }
17382 #endif // FEATURE_SIMD
17383         lclVarInfo[0].lclTypeInfo = sigType;
17384
17385         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17386                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17387                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17388
17389         if (genActualType(thisArg->gtType) != genActualType(sigType))
17390         {
17391             if (sigType == TYP_REF)
17392             {
17393                 /* The argument cannot be bashed into a ref (see bug 750871) */
17394                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17395                 return;
17396             }
17397
17398             /* This can only happen with byrefs <-> ints/shorts */
17399
17400             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17401             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17402
17403             if (sigType == TYP_BYREF)
17404             {
17405                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17406             }
17407             else if (thisArg->gtType == TYP_BYREF)
17408             {
17409                 assert(sigType == TYP_I_IMPL);
17410
17411                 /* If possible change the BYREF to an int */
17412                 if (thisArg->IsVarAddr())
17413                 {
17414                     thisArg->gtType              = TYP_I_IMPL;
17415                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17416                 }
17417                 else
17418                 {
17419                     /* Arguments 'int <- byref' cannot be bashed */
17420                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17421                     return;
17422                 }
17423             }
17424         }
17425     }
17426
17427     /* Init the types of the arguments and make sure the types
17428      * from the trees match the types in the signature */
17429
17430     CORINFO_ARG_LIST_HANDLE argLst;
17431     argLst = methInfo->args.args;
17432
17433     unsigned i;
17434     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17435     {
17436         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17437
17438         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17439
17440 #ifdef FEATURE_SIMD
17441         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17442         {
17443             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17444             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17445             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17446             foundSIMDType = true;
17447             if (sigType == TYP_STRUCT)
17448             {
17449                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17450                 sigType              = structType;
17451             }
17452         }
17453 #endif // FEATURE_SIMD
17454
17455         lclVarInfo[i].lclTypeInfo    = sigType;
17456         lclVarInfo[i].lclHasLdlocaOp = false;
17457
17458         /* Does the tree type match the signature type? */
17459
17460         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17461
17462         if (sigType != inlArgNode->gtType)
17463         {
17464             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17465                but in bad IL cases with caller-callee signature mismatches we can see other types.
17466                Intentionally reject cases with mismatches so the jit is more flexible when
17467                encountering bad IL. */
17468
17469             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17470                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17471                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17472
17473             if (!isPlausibleTypeMatch)
17474             {
17475                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17476                 return;
17477             }
17478
17479             /* Is it a narrowing or widening cast?
17480              * Widening casts are ok since the value computed is already
17481              * normalized to an int (on the IL stack) */
17482
17483             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17484             {
17485                 if (sigType == TYP_BYREF)
17486                 {
17487                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17488                 }
17489                 else if (inlArgNode->gtType == TYP_BYREF)
17490                 {
17491                     assert(varTypeIsIntOrI(sigType));
17492
17493                     /* If possible bash the BYREF to an int */
17494                     if (inlArgNode->IsVarAddr())
17495                     {
17496                         inlArgNode->gtType           = TYP_I_IMPL;
17497                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17498                     }
17499                     else
17500                     {
17501                         /* Arguments 'int <- byref' cannot be changed */
17502                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17503                         return;
17504                     }
17505                 }
17506                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17507                 {
17508                     /* Narrowing cast */
17509
17510                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17511                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17512                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17513                     {
17514                         /* We don't need to insert a cast here as the variable
17515                            was assigned a normalized value of the right type */
17516
17517                         continue;
17518                     }
17519
17520                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17521
17522                     inlArgInfo[i].argIsLclVar = false;
17523
17524                     /* Try to fold the node in case we have constant arguments */
17525
17526                     if (inlArgInfo[i].argIsInvariant)
17527                     {
17528                         inlArgNode            = gtFoldExprConst(inlArgNode);
17529                         inlArgInfo[i].argNode = inlArgNode;
17530                         assert(inlArgNode->OperIsConst());
17531                     }
17532                 }
17533 #ifdef _TARGET_64BIT_
17534                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17535                 {
17536                     // This should only happen for int -> native int widening
17537                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17538
17539                     inlArgInfo[i].argIsLclVar = false;
17540
17541                     /* Try to fold the node in case we have constant arguments */
17542
17543                     if (inlArgInfo[i].argIsInvariant)
17544                     {
17545                         inlArgNode            = gtFoldExprConst(inlArgNode);
17546                         inlArgInfo[i].argNode = inlArgNode;
17547                         assert(inlArgNode->OperIsConst());
17548                     }
17549                 }
17550 #endif // _TARGET_64BIT_
17551             }
17552         }
17553     }
17554
17555     /* Init the types of the local variables */
17556
17557     CORINFO_ARG_LIST_HANDLE localsSig;
17558     localsSig = methInfo->locals.args;
17559
17560     for (i = 0; i < methInfo->locals.numArgs; i++)
17561     {
17562         bool      isPinned;
17563         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17564
17565         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17566         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17567         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17568
17569         if (isPinned)
17570         {
17571             // Pinned locals may cause inlines to fail.
17572             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17573             if (inlineResult->IsFailure())
17574             {
17575                 return;
17576             }
17577         }
17578
17579         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17580
17581         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17582         // out on the inline.
17583         if (type == TYP_STRUCT)
17584         {
17585             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17586             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17587             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17588             {
17589                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17590                 if (inlineResult->IsFailure())
17591                 {
17592                     return;
17593                 }
17594
17595                 // Do further notification in the case where the call site is rare; some policies do
17596                 // not track the relative hotness of call sites for "always" inline cases.
17597                 if (pInlineInfo->iciBlock->isRunRarely())
17598                 {
17599                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17600                     if (inlineResult->IsFailure())
17601                     {
17602
17603                         return;
17604                     }
17605                 }
17606             }
17607         }
17608
17609         localsSig = info.compCompHnd->getArgNext(localsSig);
17610
17611 #ifdef FEATURE_SIMD
17612         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17613         {
17614             foundSIMDType = true;
17615             if (featureSIMD && type == TYP_STRUCT)
17616             {
17617                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17618                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17619             }
17620         }
17621 #endif // FEATURE_SIMD
17622     }
17623
17624 #ifdef FEATURE_SIMD
17625     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17626     {
17627         foundSIMDType = true;
17628     }
17629     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17630 #endif // FEATURE_SIMD
17631 }
17632
17633 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17634 {
17635     assert(compIsForInlining());
17636
17637     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17638
17639     if (tmpNum == BAD_VAR_NUM)
17640     {
17641         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17642
17643         // The lifetime of this local might span multiple BBs.
17644         // So it is a long lifetime local.
17645         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17646
17647         lvaTable[tmpNum].lvType = lclTyp;
17648         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17649         {
17650             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17651         }
17652
17653         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17654         {
17655             lvaTable[tmpNum].lvPinned = 1;
17656
17657             if (!impInlineInfo->hasPinnedLocals)
17658             {
17659                 // If the inlinee returns a value, use a spill temp
17660                 // for the return value to ensure that even in case
17661                 // where the return expression refers to one of the
17662                 // pinned locals, we can unpin the local right after
17663                 // the inlined method body.
17664                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17665                 {
17666                     lvaInlineeReturnSpillTemp =
17667                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17668                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17669                 }
17670             }
17671
17672             impInlineInfo->hasPinnedLocals = true;
17673         }
17674
17675         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17676         {
17677             if (varTypeIsStruct(lclTyp))
17678             {
17679                 lvaSetStruct(tmpNum,
17680                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17681                              true /* unsafe value cls check */);
17682             }
17683             else
17684             {
17685                 // This is a wrapped primitive.  Make sure the verstate knows that
17686                 lvaTable[tmpNum].lvVerTypeInfo =
17687                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17688             }
17689         }
17690     }
17691
17692     return tmpNum;
17693 }
17694
17695 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17696 // Only use this method for the arguments of the inlinee method.
17697 // !!! Do not use it for the locals of the inlinee method. !!!!
17698
17699 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17700 {
17701     /* Get the argument type */
17702     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17703
17704     GenTreePtr op1 = nullptr;
17705
17706     // constant or address of local
17707     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17708     {
17709         /* Clone the constant. Note that we cannot directly use argNode
17710         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17711         would introduce aliasing between inlArgInfo[].argNode and
17712         impInlineExpr. Then gtFoldExpr() could change it, causing further
17713         references to the argument working off of the bashed copy. */
17714
17715         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17716         PREFIX_ASSUME(op1 != nullptr);
17717         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17718     }
17719     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17720     {
17721         /* Argument is a local variable (of the caller)
17722          * Can we re-use the passed argument node? */
17723
17724         op1                          = inlArgInfo[lclNum].argNode;
17725         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17726
17727         if (inlArgInfo[lclNum].argIsUsed)
17728         {
17729             assert(op1->gtOper == GT_LCL_VAR);
17730             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17731
17732             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17733             {
17734                 lclTyp = genActualType(lclTyp);
17735             }
17736
17737             /* Create a new lcl var node - remember the argument lclNum */
17738             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17739         }
17740     }
17741     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17742     {
17743         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17744            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17745            This way we will increase the chance for this byref to be optimized away by
17746            a subsequent "dereference" operation.
17747
17748            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17749            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17750            For example, if the caller is:
17751                 ldloca.s   V_1  // V_1 is a local struct
17752                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17753            and the callee being inlined has:
17754                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17755                     ldarga.s   ptrToInts
17756                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17757            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17758            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17759         */
17760         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17761                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17762         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17763     }
17764     else
17765     {
17766         /* Argument is a complex expression - it must be evaluated into a temp */
17767
17768         if (inlArgInfo[lclNum].argHasTmp)
17769         {
17770             assert(inlArgInfo[lclNum].argIsUsed);
17771             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17772
17773             /* Create a new lcl var node - remember the argument lclNum */
17774             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17775
17776             /* This is the second or later use of the this argument,
17777             so we have to use the temp (instead of the actual arg) */
17778             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17779         }
17780         else
17781         {
17782             /* First time use */
17783             assert(inlArgInfo[lclNum].argIsUsed == false);
17784
17785             /* Reserve a temp for the expression.
17786             * Use a large size node as we may change it later */
17787
17788             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17789
17790             lvaTable[tmpNum].lvType = lclTyp;
17791             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17792             if (inlArgInfo[lclNum].argHasLdargaOp)
17793             {
17794                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17795             }
17796
17797             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17798             {
17799                 if (varTypeIsStruct(lclTyp))
17800                 {
17801                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17802                                  true /* unsafe value cls check */);
17803                 }
17804                 else
17805                 {
17806                     // This is a wrapped primitive.  Make sure the verstate knows that
17807                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17808                 }
17809             }
17810
17811             inlArgInfo[lclNum].argHasTmp = true;
17812             inlArgInfo[lclNum].argTmpNum = tmpNum;
17813
17814             // If we require strict exception order, then arguments must
17815             // be evaluated in sequence before the body of the inlined method.
17816             // So we need to evaluate them to a temp.
17817             // Also, if arguments have global references, we need to
17818             // evaluate them to a temp before the inlined body as the
17819             // inlined body may be modifying the global ref.
17820             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17821             // if it is a struct, because it requires some additional handling.
17822
17823             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17824             {
17825                 /* Get a *LARGE* LCL_VAR node */
17826                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17827
17828                 /* Record op1 as the very first use of this argument.
17829                 If there are no further uses of the arg, we may be
17830                 able to use the actual arg node instead of the temp.
17831                 If we do see any further uses, we will clear this. */
17832                 inlArgInfo[lclNum].argBashTmpNode = op1;
17833             }
17834             else
17835             {
17836                 /* Get a small LCL_VAR node */
17837                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17838                 /* No bashing of this argument */
17839                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17840             }
17841         }
17842     }
17843
17844     /* Mark the argument as used */
17845
17846     inlArgInfo[lclNum].argIsUsed = true;
17847
17848     return op1;
17849 }
17850
17851 /******************************************************************************
17852  Is this the original "this" argument to the call being inlined?
17853
17854  Note that we do not inline methods with "starg 0", and so we do not need to
17855  worry about it.
17856 */
17857
17858 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17859 {
17860     assert(compIsForInlining());
17861     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17862 }
17863
17864 //-----------------------------------------------------------------------------
17865 // This function checks if a dereference in the inlinee can guarantee that
17866 // the "this" is non-NULL.
17867 // If we haven't hit a branch or a side effect, and we are dereferencing
17868 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17869 // then we can avoid a separate null pointer check.
17870 //
17871 // "additionalTreesToBeEvaluatedBefore"
17872 // is the set of pending trees that have not yet been added to the statement list,
17873 // and which have been removed from verCurrentState.esStack[]
17874
17875 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17876                                                                   GenTreePtr  variableBeingDereferenced,
17877                                                                   InlArgInfo* inlArgInfo)
17878 {
17879     assert(compIsForInlining());
17880     assert(opts.OptEnabled(CLFLG_INLINING));
17881
17882     BasicBlock* block = compCurBB;
17883
17884     GenTreePtr stmt;
17885     GenTreePtr expr;
17886
17887     if (block != fgFirstBB)
17888     {
17889         return FALSE;
17890     }
17891
17892     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17893     {
17894         return FALSE;
17895     }
17896
17897     if (additionalTreesToBeEvaluatedBefore &&
17898         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17899     {
17900         return FALSE;
17901     }
17902
17903     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17904     {
17905         expr = stmt->gtStmt.gtStmtExpr;
17906
17907         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17908         {
17909             return FALSE;
17910         }
17911     }
17912
17913     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17914     {
17915         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17916         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17917         {
17918             return FALSE;
17919         }
17920     }
17921
17922     return TRUE;
17923 }
17924
17925 /******************************************************************************/
17926 // Check the inlining eligibility of this GT_CALL node.
17927 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17928
17929 // Todo: find a way to record the failure reasons in the IR (or
17930 // otherwise build tree context) so when we do the inlining pass we
17931 // can capture these reasons
17932
17933 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17934                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17935                                       CORINFO_CALL_INFO*     callInfo)
17936 {
17937     // Let the strategy know there's another call
17938     impInlineRoot()->m_inlineStrategy->NoteCall();
17939
17940     if (!opts.OptEnabled(CLFLG_INLINING))
17941     {
17942         /* XXX Mon 8/18/2008
17943          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
17944          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
17945          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
17946          * figure out why we did not set MAXOPT for this compile.
17947          */
17948         assert(!compIsForInlining());
17949         return;
17950     }
17951
17952     if (compIsForImportOnly())
17953     {
17954         // Don't bother creating the inline candidate during verification.
17955         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17956         // that leads to the creation of multiple instances of Compiler.
17957         return;
17958     }
17959
17960     GenTreeCall* call = callNode->AsCall();
17961     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17962
17963     // Don't inline if not optimizing root method
17964     if (opts.compDbgCode)
17965     {
17966         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17967         return;
17968     }
17969
17970     // Don't inline if inlining into root method is disabled.
17971     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17972     {
17973         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17974         return;
17975     }
17976
17977     // Inlining candidate determination needs to honor only IL tail prefix.
17978     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17979     if (call->IsTailPrefixedCall())
17980     {
17981         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17982         return;
17983     }
17984
17985     // Tail recursion elimination takes precedence over inlining.
17986     // TODO: We may want to do some of the additional checks from fgMorphCall
17987     // here to reduce the chance we don't inline a call that won't be optimized
17988     // as a fast tail call or turned into a loop.
17989     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17990     {
17991         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17992         return;
17993     }
17994
17995     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17996     {
17997         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
17998         return;
17999     }
18000
18001     /* Ignore helper calls */
18002
18003     if (call->gtCallType == CT_HELPER)
18004     {
18005         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18006         return;
18007     }
18008
18009     /* Ignore indirect calls */
18010     if (call->gtCallType == CT_INDIRECT)
18011     {
18012         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18013         return;
18014     }
18015
18016     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18017      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18018      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18019
18020     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18021     unsigned              methAttr;
18022
18023     // Reuse method flags from the original callInfo if possible
18024     if (fncHandle == callInfo->hMethod)
18025     {
18026         methAttr = callInfo->methodFlags;
18027     }
18028     else
18029     {
18030         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18031     }
18032
18033 #ifdef DEBUG
18034     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18035     {
18036         methAttr |= CORINFO_FLG_FORCEINLINE;
18037     }
18038 #endif
18039
18040     // Check for COMPlus_AggressiveInlining
18041     if (compDoAggressiveInlining)
18042     {
18043         methAttr |= CORINFO_FLG_FORCEINLINE;
18044     }
18045
18046     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18047     {
18048         /* Don't bother inline blocks that are in the filter region */
18049         if (bbInCatchHandlerILRange(compCurBB))
18050         {
18051 #ifdef DEBUG
18052             if (verbose)
18053             {
18054                 printf("\nWill not inline blocks that are in the catch handler region\n");
18055             }
18056
18057 #endif
18058
18059             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18060             return;
18061         }
18062
18063         if (bbInFilterILRange(compCurBB))
18064         {
18065 #ifdef DEBUG
18066             if (verbose)
18067             {
18068                 printf("\nWill not inline blocks that are in the filter region\n");
18069             }
18070 #endif
18071
18072             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18073             return;
18074         }
18075     }
18076
18077     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18078
18079     if (opts.compNeedSecurityCheck)
18080     {
18081         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18082         return;
18083     }
18084
18085     /* Check if we tried to inline this method before */
18086
18087     if (methAttr & CORINFO_FLG_DONT_INLINE)
18088     {
18089         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18090         return;
18091     }
18092
18093     /* Cannot inline synchronized methods */
18094
18095     if (methAttr & CORINFO_FLG_SYNCH)
18096     {
18097         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18098         return;
18099     }
18100
18101     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18102
18103     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18104     {
18105         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18106         return;
18107     }
18108
18109     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18110     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18111
18112     if (inlineResult.IsFailure())
18113     {
18114         return;
18115     }
18116
18117     // The old value should be NULL
18118     assert(call->gtInlineCandidateInfo == nullptr);
18119
18120     call->gtInlineCandidateInfo = inlineCandidateInfo;
18121
18122     // Mark the call node as inline candidate.
18123     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18124
18125     // Let the strategy know there's another candidate.
18126     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18127
18128     // Since we're not actually inlining yet, and this call site is
18129     // still just an inline candidate, there's nothing to report.
18130     inlineResult.SetReported();
18131 }
18132
18133 /******************************************************************************/
18134 // Returns true if the given intrinsic will be implemented by target-specific
18135 // instructions
18136
18137 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18138 {
18139 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18140     switch (intrinsicId)
18141     {
18142         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18143         //
18144         // TODO: Because the x86 backend only targets SSE for floating-point code,
18145         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18146         //       implemented those intrinsics as x87 instructions). If this poses
18147         //       a CQ problem, it may be necessary to change the implementation of
18148         //       the helper calls to decrease call overhead or switch back to the
18149         //       x87 instructions. This is tracked by #7097.
18150         case CORINFO_INTRINSIC_Sqrt:
18151         case CORINFO_INTRINSIC_Abs:
18152             return true;
18153
18154         default:
18155             return false;
18156     }
18157 #elif defined(_TARGET_ARM64_)
18158     switch (intrinsicId)
18159     {
18160         case CORINFO_INTRINSIC_Sqrt:
18161         case CORINFO_INTRINSIC_Abs:
18162         case CORINFO_INTRINSIC_Round:
18163             return true;
18164
18165         default:
18166             return false;
18167     }
18168 #elif defined(_TARGET_ARM_)
18169     switch (intrinsicId)
18170     {
18171         case CORINFO_INTRINSIC_Sqrt:
18172         case CORINFO_INTRINSIC_Abs:
18173         case CORINFO_INTRINSIC_Round:
18174             return true;
18175
18176         default:
18177             return false;
18178     }
18179 #elif defined(_TARGET_X86_)
18180     switch (intrinsicId)
18181     {
18182         case CORINFO_INTRINSIC_Sin:
18183         case CORINFO_INTRINSIC_Cos:
18184         case CORINFO_INTRINSIC_Sqrt:
18185         case CORINFO_INTRINSIC_Abs:
18186         case CORINFO_INTRINSIC_Round:
18187             return true;
18188
18189         default:
18190             return false;
18191     }
18192 #else
18193     // TODO: This portion of logic is not implemented for other arch.
18194     // The reason for returning true is that on all other arch the only intrinsic
18195     // enabled are target intrinsics.
18196     return true;
18197 #endif //_TARGET_AMD64_
18198 }
18199
18200 /******************************************************************************/
18201 // Returns true if the given intrinsic will be implemented by calling System.Math
18202 // methods.
18203
18204 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18205 {
18206     // Currently, if an math intrisic is not implemented by target-specific
18207     // intructions, it will be implemented by a System.Math call. In the
18208     // future, if we turn to implementing some of them with helper callers,
18209     // this predicate needs to be revisited.
18210     return !IsTargetIntrinsic(intrinsicId);
18211 }
18212
18213 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18214 {
18215     switch (intrinsicId)
18216     {
18217         case CORINFO_INTRINSIC_Sin:
18218         case CORINFO_INTRINSIC_Sqrt:
18219         case CORINFO_INTRINSIC_Abs:
18220         case CORINFO_INTRINSIC_Cos:
18221         case CORINFO_INTRINSIC_Round:
18222         case CORINFO_INTRINSIC_Cosh:
18223         case CORINFO_INTRINSIC_Sinh:
18224         case CORINFO_INTRINSIC_Tan:
18225         case CORINFO_INTRINSIC_Tanh:
18226         case CORINFO_INTRINSIC_Asin:
18227         case CORINFO_INTRINSIC_Acos:
18228         case CORINFO_INTRINSIC_Atan:
18229         case CORINFO_INTRINSIC_Atan2:
18230         case CORINFO_INTRINSIC_Log10:
18231         case CORINFO_INTRINSIC_Pow:
18232         case CORINFO_INTRINSIC_Exp:
18233         case CORINFO_INTRINSIC_Ceiling:
18234         case CORINFO_INTRINSIC_Floor:
18235             return true;
18236         default:
18237             return false;
18238     }
18239 }
18240
18241 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18242 {
18243     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18244 }
18245 /*****************************************************************************/