Merge pull request #9522 from sandreenko/GVM-for-corert
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492     // On coreclr the check for GC includes a "may" to account for the special
1493     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1495     // pointer.
1496     const bool mayContainGCPtrs =
1497         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1498
1499 #ifdef FEATURE_SIMD
1500     // Check to see if this is a SIMD type.
1501     if (featureSIMD && !mayContainGCPtrs)
1502     {
1503         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1504
1505         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1506         {
1507             unsigned int sizeBytes;
1508             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509             if (simdBaseType != TYP_UNKNOWN)
1510             {
1511                 assert(sizeBytes == originalSize);
1512                 structType = getSIMDTypeForSize(sizeBytes);
1513                 if (pSimdBaseType != nullptr)
1514                 {
1515                     *pSimdBaseType = simdBaseType;
1516                 }
1517                 // Also indicate that we use floating point registers.
1518                 compFloatingPointUsed = true;
1519             }
1520         }
1521     }
1522 #endif // FEATURE_SIMD
1523
1524     // Fetch GC layout info if requested
1525     if (gcLayout != nullptr)
1526     {
1527         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1528
1529         // Verify that the quick test up above via the class attributes gave a
1530         // safe view of the type's GCness.
1531         //
1532         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1533         // does not report any gc fields.
1534
1535         assert(mayContainGCPtrs || (numGCVars == 0));
1536
1537         if (pNumGCVars != nullptr)
1538         {
1539             *pNumGCVars = numGCVars;
1540         }
1541     }
1542     else
1543     {
1544         // Can't safely ask for number of GC pointers without also
1545         // asking for layout.
1546         assert(pNumGCVars == nullptr);
1547     }
1548
1549     return structType;
1550 }
1551
1552 //****************************************************************************
1553 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1554 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1555 //
1556 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1557                                       CORINFO_CLASS_HANDLE structHnd,
1558                                       unsigned             curLevel,
1559                                       bool                 forceNormalization /*=false*/)
1560 {
1561     assert(forceNormalization || varTypeIsStruct(structVal));
1562     assert(structHnd != NO_CLASS_HANDLE);
1563     var_types structType = structVal->TypeGet();
1564     bool      makeTemp   = false;
1565     if (structType == TYP_STRUCT)
1566     {
1567         structType = impNormStructType(structHnd);
1568     }
1569     bool                 alreadyNormalized = false;
1570     GenTreeLclVarCommon* structLcl         = nullptr;
1571
1572     genTreeOps oper = structVal->OperGet();
1573     switch (oper)
1574     {
1575         // GT_RETURN and GT_MKREFANY don't capture the handle.
1576         case GT_RETURN:
1577             break;
1578         case GT_MKREFANY:
1579             alreadyNormalized = true;
1580             break;
1581
1582         case GT_CALL:
1583             structVal->gtCall.gtRetClsHnd = structHnd;
1584             makeTemp                      = true;
1585             break;
1586
1587         case GT_RET_EXPR:
1588             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1589             makeTemp                         = true;
1590             break;
1591
1592         case GT_ARGPLACE:
1593             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1594             break;
1595
1596         case GT_INDEX:
1597             // This will be transformed to an OBJ later.
1598             alreadyNormalized                    = true;
1599             structVal->gtIndex.gtStructElemClass = structHnd;
1600             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1601             break;
1602
1603         case GT_FIELD:
1604             // Wrap it in a GT_OBJ.
1605             structVal->gtType = structType;
1606             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1607             break;
1608
1609         case GT_LCL_VAR:
1610         case GT_LCL_FLD:
1611             structLcl = structVal->AsLclVarCommon();
1612             // Wrap it in a GT_OBJ.
1613             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1614             __fallthrough;
1615
1616         case GT_OBJ:
1617         case GT_BLK:
1618         case GT_DYN_BLK:
1619         case GT_ASG:
1620             // These should already have the appropriate type.
1621             assert(structVal->gtType == structType);
1622             alreadyNormalized = true;
1623             break;
1624
1625         case GT_IND:
1626             assert(structVal->gtType == structType);
1627             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1628             alreadyNormalized = true;
1629             break;
1630
1631 #ifdef FEATURE_SIMD
1632         case GT_SIMD:
1633             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1634             break;
1635 #endif // FEATURE_SIMD
1636
1637         case GT_COMMA:
1638         {
1639             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1640             GenTree* blockNode = structVal->gtOp.gtOp2;
1641             assert(blockNode->gtType == structType);
1642
1643             // Is this GT_COMMA(op1, GT_COMMA())?
1644             GenTree* parent = structVal;
1645             if (blockNode->OperGet() == GT_COMMA)
1646             {
1647                 // Find the last node in the comma chain.
1648                 do
1649                 {
1650                     assert(blockNode->gtType == structType);
1651                     parent    = blockNode;
1652                     blockNode = blockNode->gtOp.gtOp2;
1653                 } while (blockNode->OperGet() == GT_COMMA);
1654             }
1655
1656 #ifdef FEATURE_SIMD
1657             if (blockNode->OperGet() == GT_SIMD)
1658             {
1659                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1660                 alreadyNormalized  = true;
1661             }
1662             else
1663 #endif
1664             {
1665                 assert(blockNode->OperIsBlk());
1666
1667                 // Sink the GT_COMMA below the blockNode addr.
1668                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1669                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1670                 //
1671                 // In case of a chained GT_COMMA case, we sink the last
1672                 // GT_COMMA below the blockNode addr.
1673                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1674                 assert(blockNodeAddr->gtType == TYP_BYREF);
1675                 GenTree* commaNode    = parent;
1676                 commaNode->gtType     = TYP_BYREF;
1677                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1678                 blockNode->gtOp.gtOp1 = commaNode;
1679                 if (parent == structVal)
1680                 {
1681                     structVal = blockNode;
1682                 }
1683                 alreadyNormalized = true;
1684             }
1685         }
1686         break;
1687
1688         default:
1689             assert(!"Unexpected node in impNormStructVal()");
1690             break;
1691     }
1692     structVal->gtType  = structType;
1693     GenTree* structObj = structVal;
1694
1695     if (!alreadyNormalized || forceNormalization)
1696     {
1697         if (makeTemp)
1698         {
1699             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1700
1701             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1702
1703             // The structVal is now the temp itself
1704
1705             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1706             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1707             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1708         }
1709         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1710         {
1711             // Wrap it in a GT_OBJ
1712             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1713         }
1714     }
1715
1716     if (structLcl != nullptr)
1717     {
1718         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1719         // so we don't set GTF_EXCEPT here.
1720         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1721         {
1722             structObj->gtFlags &= ~GTF_GLOB_REF;
1723         }
1724     }
1725     else
1726     {
1727         // In general a OBJ is an indirection and could raise an exception.
1728         structObj->gtFlags |= GTF_EXCEPT;
1729     }
1730     return (structObj);
1731 }
1732
1733 /******************************************************************************/
1734 // Given a type token, generate code that will evaluate to the correct
1735 // handle representation of that token (type handle, field handle, or method handle)
1736 //
1737 // For most cases, the handle is determined at compile-time, and the code
1738 // generated is simply an embedded handle.
1739 //
1740 // Run-time lookup is required if the enclosing method is shared between instantiations
1741 // and the token refers to formal type parameters whose instantiation is not known
1742 // at compile-time.
1743 //
1744 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1745                                       BOOL*                   pRuntimeLookup /* = NULL */,
1746                                       BOOL                    mustRestoreHandle /* = FALSE */,
1747                                       BOOL                    importParent /* = FALSE */)
1748 {
1749     assert(!fgGlobalMorph);
1750
1751     CORINFO_GENERICHANDLE_RESULT embedInfo;
1752     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1753
1754     if (pRuntimeLookup)
1755     {
1756         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1757     }
1758
1759     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1760     {
1761         switch (embedInfo.handleType)
1762         {
1763             case CORINFO_HANDLETYPE_CLASS:
1764                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1765                 break;
1766
1767             case CORINFO_HANDLETYPE_METHOD:
1768                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1769                 break;
1770
1771             case CORINFO_HANDLETYPE_FIELD:
1772                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1773                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1774                 break;
1775
1776             default:
1777                 break;
1778         }
1779     }
1780
1781     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1782                            embedInfo.compileTimeHandle);
1783 }
1784
1785 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                      CORINFO_LOOKUP*         pLookup,
1787                                      unsigned                handleFlags,
1788                                      void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                                unsigned              handleFlags,
1828                                                void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1843 }
1844
1845 GenTreePtr Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853 #if COR_JIT_EE_VERSION > 460
1854     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1855     {
1856         return nullptr;
1857     }
1858 #else
1859     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1860 #endif
1861
1862     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1863
1864     op1->gtCall.setEntryPoint(lookup);
1865
1866     return op1;
1867 }
1868 #endif
1869
1870 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1871 {
1872     GenTreePtr op1 = nullptr;
1873
1874     switch (pCallInfo->kind)
1875     {
1876         case CORINFO_CALL:
1877             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1878
1879 #ifdef FEATURE_READYTORUN_COMPILER
1880             if (opts.IsReadyToRun())
1881             {
1882                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1883                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1884                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1885             }
1886             else
1887             {
1888                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1889             }
1890 #endif
1891             break;
1892
1893         case CORINFO_CALL_CODE_POINTER:
1894             if (compIsForInlining())
1895             {
1896                 // Don't import runtime lookups when inlining
1897                 // Inlining has to be aborted in such a case
1898                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1899                 return nullptr;
1900             }
1901
1902             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1903             break;
1904
1905         default:
1906             noway_assert(!"unknown call kind");
1907             break;
1908     }
1909
1910     return op1;
1911 }
1912
1913 //------------------------------------------------------------------------
1914 // getRuntimeContextTree: find pointer to context for runtime lookup.
1915 //
1916 // Arguments:
1917 //    kind - lookup kind.
1918 //
1919 // Return Value:
1920 //    Return GenTree pointer to generic shared context.
1921 //
1922 // Notes:
1923 //    Reports about generic context using.
1924
1925 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1926 {
1927     GenTreePtr ctxTree = nullptr;
1928
1929     // Collectible types requires that for shared generic code, if we use the generic context parameter
1930     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1931     // context parameter is this that we don't need the eager reporting logic.)
1932     lvaGenericsContextUsed = true;
1933
1934     if (kind == CORINFO_LOOKUP_THISOBJ)
1935     {
1936         // this Object
1937         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1938
1939         // Vtable pointer of this object
1940         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1941         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1942         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1943     }
1944     else
1945     {
1946         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1947
1948         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1949     }
1950     return ctxTree;
1951 }
1952
1953 /*****************************************************************************/
1954 /* Import a dictionary lookup to access a handle in code shared between
1955    generic instantiations.
1956    The lookup depends on the typeContext which is only available at
1957    runtime, and not at compile-time.
1958    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1959    The cases are:
1960
1961    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1962       instantiation-specific handle, and the tokens to lookup the handle.
1963    2. pLookup->indirections != CORINFO_USEHELPER :
1964       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1965           to get the handle.
1966       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1967           If it is non-NULL, it is the handle required. Else, call a helper
1968           to lookup the handle.
1969  */
1970
1971 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1972                                             CORINFO_LOOKUP*         pLookup,
1973                                             void*                   compileTimeHandle)
1974 {
1975
1976     // This method can only be called from the importer instance of the Compiler.
1977     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1978     assert(!compIsForInlining());
1979
1980     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1981
1982 #ifdef FEATURE_READYTORUN_COMPILER
1983     if (opts.IsReadyToRun())
1984     {
1985         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1986                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1987     }
1988 #endif
1989
1990     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1991     // It's available only via the run-time helper function
1992     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1993     {
1994         GenTreeArgList* helperArgs =
1995             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1996                                                       nullptr, compileTimeHandle));
1997
1998         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1999     }
2000
2001     // Slot pointer
2002     GenTreePtr slotPtrTree = ctxTree;
2003
2004     if (pRuntimeLookup->testForNull)
2005     {
2006         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2007                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2008     }
2009
2010     // Applied repeated indirections
2011     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2012     {
2013         if (i != 0)
2014         {
2015             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2016             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2017             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2018         }
2019         if (pRuntimeLookup->offsets[i] != 0)
2020         {
2021             slotPtrTree =
2022                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2023         }
2024     }
2025
2026     // No null test required
2027     if (!pRuntimeLookup->testForNull)
2028     {
2029         if (pRuntimeLookup->indirections == 0)
2030         {
2031             return slotPtrTree;
2032         }
2033
2034         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2035         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2036
2037         if (!pRuntimeLookup->testForFixup)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2043
2044         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2045                                       nullptr DEBUGARG("impRuntimeLookup test"));
2046         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2047
2048         // Use a GT_AND to check for the lowest bit and indirect if it is set
2049         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2050         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2051         relop->gtFlags |= GTF_RELOP_QMARK;
2052
2053         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2054                            nullptr DEBUGARG("impRuntimeLookup indir"));
2055         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2056         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2057         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2058
2059         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2060
2061         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2062         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2063         return gtNewLclvNode(tmp, TYP_I_IMPL);
2064     }
2065
2066     assert(pRuntimeLookup->indirections != 0);
2067
2068     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2069
2070     // Extract the handle
2071     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2072     handle->gtFlags |= GTF_IND_NONFAULTING;
2073
2074     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2075                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2076
2077     // Call to helper
2078     GenTreeArgList* helperArgs =
2079         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2080                                                   compileTimeHandle));
2081     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2082
2083     // Check for null and possibly call helper
2084     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2085     relop->gtFlags |= GTF_RELOP_QMARK;
2086
2087     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2088                                                          gtNewNothingNode(), // do nothing if nonnull
2089                                                          helperCall);
2090
2091     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2092
2093     unsigned tmp;
2094     if (handleCopy->IsLocal())
2095     {
2096         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2097     }
2098     else
2099     {
2100         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2101     }
2102
2103     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2104     return gtNewLclvNode(tmp, TYP_I_IMPL);
2105 }
2106
2107 /******************************************************************************
2108  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2109  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2110  *     else, grab a new temp.
2111  *  For structs (which can be pushed on the stack using obj, etc),
2112  *  special handling is needed
2113  */
2114
2115 struct RecursiveGuard
2116 {
2117 public:
2118     RecursiveGuard()
2119     {
2120         m_pAddress = nullptr;
2121     }
2122
2123     ~RecursiveGuard()
2124     {
2125         if (m_pAddress)
2126         {
2127             *m_pAddress = false;
2128         }
2129     }
2130
2131     void Init(bool* pAddress, bool bInitialize)
2132     {
2133         assert(pAddress && *pAddress == false && "Recursive guard violation");
2134         m_pAddress = pAddress;
2135
2136         if (bInitialize)
2137         {
2138             *m_pAddress = true;
2139         }
2140     }
2141
2142 protected:
2143     bool* m_pAddress;
2144 };
2145
2146 bool Compiler::impSpillStackEntry(unsigned level,
2147                                   unsigned tnum
2148 #ifdef DEBUG
2149                                   ,
2150                                   bool        bAssertOnRecursion,
2151                                   const char* reason
2152 #endif
2153                                   )
2154 {
2155
2156 #ifdef DEBUG
2157     RecursiveGuard guard;
2158     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2159 #endif
2160
2161     GenTreePtr tree = verCurrentState.esStack[level].val;
2162
2163     /* Allocate a temp if we haven't been asked to use a particular one */
2164
2165     if (tiVerificationNeeded)
2166     {
2167         // Ignore bad temp requests (they will happen with bad code and will be
2168         // catched when importing the destblock)
2169         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2170         {
2171             return false;
2172         }
2173     }
2174     else
2175     {
2176         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2177         {
2178             return false;
2179         }
2180     }
2181
2182     if (tnum == BAD_VAR_NUM)
2183     {
2184         tnum = lvaGrabTemp(true DEBUGARG(reason));
2185     }
2186     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2187     {
2188         // if verification is needed and tnum's type is incompatible with
2189         // type on that stack, we grab a new temp. This is safe since
2190         // we will throw a verification exception in the dest block.
2191
2192         var_types valTyp = tree->TypeGet();
2193         var_types dstTyp = lvaTable[tnum].TypeGet();
2194
2195         // if the two types are different, we return. This will only happen with bad code and will
2196         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2197         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2198             !(
2199 #ifndef _TARGET_64BIT_
2200                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2201 #endif // !_TARGET_64BIT_
2202                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2203         {
2204             if (verNeedsVerification())
2205             {
2206                 return false;
2207             }
2208         }
2209     }
2210
2211     /* Assign the spilled entry to the temp */
2212     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2213
2214     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2215     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2216     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2217     verCurrentState.esStack[level].val = temp;
2218
2219     return true;
2220 }
2221
2222 /*****************************************************************************
2223  *
2224  *  Ensure that the stack has only spilled values
2225  */
2226
2227 void Compiler::impSpillStackEnsure(bool spillLeaves)
2228 {
2229     assert(!spillLeaves || opts.compDbgCode);
2230
2231     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2232     {
2233         GenTreePtr tree = verCurrentState.esStack[level].val;
2234
2235         if (!spillLeaves && tree->OperIsLeaf())
2236         {
2237             continue;
2238         }
2239
2240         // Temps introduced by the importer itself don't need to be spilled
2241
2242         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2243
2244         if (isTempLcl)
2245         {
2246             continue;
2247         }
2248
2249         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2250     }
2251 }
2252
2253 void Compiler::impSpillEvalStack()
2254 {
2255     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2256     {
2257         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2258     }
2259 }
2260
2261 /*****************************************************************************
2262  *
2263  *  If the stack contains any trees with side effects in them, assign those
2264  *  trees to temps and append the assignments to the statement list.
2265  *  On return the stack is guaranteed to be empty.
2266  */
2267
2268 inline void Compiler::impEvalSideEffects()
2269 {
2270     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2271     verCurrentState.esStackDepth = 0;
2272 }
2273
2274 /*****************************************************************************
2275  *
2276  *  If the stack contains any trees with side effects in them, assign those
2277  *  trees to temps and replace them on the stack with refs to their temps.
2278  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2279  */
2280
2281 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2282 {
2283     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2284
2285     /* Before we make any appends to the tree list we must spill the
2286      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2287
2288     impSpillSpecialSideEff();
2289
2290     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2291     {
2292         chkLevel = verCurrentState.esStackDepth;
2293     }
2294
2295     assert(chkLevel <= verCurrentState.esStackDepth);
2296
2297     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2298
2299     for (unsigned i = 0; i < chkLevel; i++)
2300     {
2301         GenTreePtr tree = verCurrentState.esStack[i].val;
2302
2303         GenTreePtr lclVarTree;
2304
2305         if ((tree->gtFlags & spillFlags) != 0 ||
2306             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2307              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2308              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2309                                            // lvAddrTaken flag.
2310         {
2311             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2312         }
2313     }
2314 }
2315
2316 /*****************************************************************************
2317  *
2318  *  If the stack contains any trees with special side effects in them, assign
2319  *  those trees to temps and replace them on the stack with refs to their temps.
2320  */
2321
2322 inline void Compiler::impSpillSpecialSideEff()
2323 {
2324     // Only exception objects need to be carefully handled
2325
2326     if (!compCurBB->bbCatchTyp)
2327     {
2328         return;
2329     }
2330
2331     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2332     {
2333         GenTreePtr tree = verCurrentState.esStack[level].val;
2334         // Make sure if we have an exception object in the sub tree we spill ourselves.
2335         if (gtHasCatchArg(tree))
2336         {
2337             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2338         }
2339     }
2340 }
2341
2342 /*****************************************************************************
2343  *
2344  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2345  */
2346
2347 void Compiler::impSpillValueClasses()
2348 {
2349     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2350     {
2351         GenTreePtr tree = verCurrentState.esStack[level].val;
2352
2353         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2354         {
2355             // Tree walk was aborted, which means that we found a
2356             // value class on the stack.  Need to spill that
2357             // stack entry.
2358
2359             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2360         }
2361     }
2362 }
2363
2364 /*****************************************************************************
2365  *
2366  *  Callback that checks if a tree node is TYP_STRUCT
2367  */
2368
2369 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2370 {
2371     fgWalkResult walkResult = WALK_CONTINUE;
2372
2373     if ((*pTree)->gtType == TYP_STRUCT)
2374     {
2375         // Abort the walk and indicate that we found a value class
2376
2377         walkResult = WALK_ABORT;
2378     }
2379
2380     return walkResult;
2381 }
2382
2383 /*****************************************************************************
2384  *
2385  *  If the stack contains any trees with references to local #lclNum, assign
2386  *  those trees to temps and replace their place on the stack with refs to
2387  *  their temps.
2388  */
2389
2390 void Compiler::impSpillLclRefs(ssize_t lclNum)
2391 {
2392     /* Before we make any appends to the tree list we must spill the
2393      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2394
2395     impSpillSpecialSideEff();
2396
2397     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2398     {
2399         GenTreePtr tree = verCurrentState.esStack[level].val;
2400
2401         /* If the tree may throw an exception, and the block has a handler,
2402            then we need to spill assignments to the local if the local is
2403            live on entry to the handler.
2404            Just spill 'em all without considering the liveness */
2405
2406         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2407
2408         /* Skip the tree if it doesn't have an affected reference,
2409            unless xcptnCaught */
2410
2411         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2412         {
2413             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2414         }
2415     }
2416 }
2417
2418 /*****************************************************************************
2419  *
2420  *  Push catch arg onto the stack.
2421  *  If there are jumps to the beginning of the handler, insert basic block
2422  *  and spill catch arg to a temp. Update the handler block if necessary.
2423  *
2424  *  Returns the basic block of the actual handler.
2425  */
2426
2427 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2428 {
2429     // Do not inject the basic block twice on reimport. This should be
2430     // hit only under JIT stress. See if the block is the one we injected.
2431     // Note that EH canonicalization can inject internal blocks here. We might
2432     // be able to re-use such a block (but we don't, right now).
2433     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2434         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2435     {
2436         GenTreePtr tree = hndBlk->bbTreeList;
2437
2438         if (tree != nullptr && tree->gtOper == GT_STMT)
2439         {
2440             tree = tree->gtStmt.gtStmtExpr;
2441             assert(tree != nullptr);
2442
2443             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2444                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2445             {
2446                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2447
2448                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2449
2450                 return hndBlk->bbNext;
2451             }
2452         }
2453
2454         // If we get here, it must have been some other kind of internal block. It's possible that
2455         // someone prepended something to our injected block, but that's unlikely.
2456     }
2457
2458     /* Push the exception address value on the stack */
2459     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2460
2461     /* Mark the node as having a side-effect - i.e. cannot be
2462      * moved around since it is tied to a fixed location (EAX) */
2463     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2464
2465     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2466     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2467     {
2468         if (hndBlk->bbRefs == 1)
2469         {
2470             hndBlk->bbRefs++;
2471         }
2472
2473         /* Create extra basic block for the spill */
2474         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2475         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2476         newBlk->setBBWeight(hndBlk->bbWeight);
2477         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2478
2479         /* Account for the new link we are about to create */
2480         hndBlk->bbRefs++;
2481
2482         /* Spill into a temp */
2483         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2484         lvaTable[tempNum].lvType = TYP_REF;
2485         arg                      = gtNewTempAssign(tempNum, arg);
2486
2487         hndBlk->bbStkTempsIn = tempNum;
2488
2489         /* Report the debug info. impImportBlockCode won't treat
2490          * the actual handler as exception block and thus won't do it for us. */
2491         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2492         {
2493             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2494             arg            = gtNewStmt(arg, impCurStmtOffs);
2495         }
2496
2497         fgInsertStmtAtEnd(newBlk, arg);
2498
2499         arg = gtNewLclvNode(tempNum, TYP_REF);
2500     }
2501
2502     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2503
2504     return hndBlk;
2505 }
2506
2507 /*****************************************************************************
2508  *
2509  *  Given a tree, clone it. *pClone is set to the cloned tree.
2510  *  Returns the original tree if the cloning was easy,
2511  *   else returns the temp to which the tree had to be spilled to.
2512  *  If the tree has side-effects, it will be spilled to a temp.
2513  */
2514
2515 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2516                                   GenTreePtr*          pClone,
2517                                   CORINFO_CLASS_HANDLE structHnd,
2518                                   unsigned             curLevel,
2519                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2520 {
2521     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2522     {
2523         GenTreePtr clone = gtClone(tree, true);
2524
2525         if (clone)
2526         {
2527             *pClone = clone;
2528             return tree;
2529         }
2530     }
2531
2532     /* Store the operand in a temp and return the temp */
2533
2534     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2535
2536     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2537     // return a struct type. It also may modify the struct type to a more
2538     // specialized type (e.g. a SIMD type).  So we will get the type from
2539     // the lclVar AFTER calling impAssignTempGen().
2540
2541     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2542     var_types type = genActualType(lvaTable[temp].TypeGet());
2543
2544     *pClone = gtNewLclvNode(temp, type);
2545     return gtNewLclvNode(temp, type);
2546 }
2547
2548 /*****************************************************************************
2549  * Remember the IL offset (including stack-empty info) for the trees we will
2550  * generate now.
2551  */
2552
2553 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2554 {
2555     if (compIsForInlining())
2556     {
2557         GenTreePtr callStmt = impInlineInfo->iciStmt;
2558         assert(callStmt->gtOper == GT_STMT);
2559         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2560     }
2561     else
2562     {
2563         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2564         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2565         impCurStmtOffs    = offs | stkBit;
2566     }
2567 }
2568
2569 /*****************************************************************************
2570  * Returns current IL offset with stack-empty and call-instruction info incorporated
2571  */
2572 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2573 {
2574     if (compIsForInlining())
2575     {
2576         return BAD_IL_OFFSET;
2577     }
2578     else
2579     {
2580         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2581         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2582         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2583         return offs | stkBit | callInstructionBit;
2584     }
2585 }
2586
2587 /*****************************************************************************
2588  *
2589  *  Remember the instr offset for the statements
2590  *
2591  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2592  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2593  *  as some of the trees corresponding to code up to impCurOpcOffs might
2594  *  still be sitting on the stack.
2595  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2596  *  This should be called when an opcode finally/explicitly causes
2597  *  impAppendTree(tree) to be called (as opposed to being called because of
2598  *  a spill caused by the opcode)
2599  */
2600
2601 #ifdef DEBUG
2602
2603 void Compiler::impNoteLastILoffs()
2604 {
2605     if (impLastILoffsStmt == nullptr)
2606     {
2607         // We should have added a statement for the current basic block
2608         // Is this assert correct ?
2609
2610         assert(impTreeLast);
2611         assert(impTreeLast->gtOper == GT_STMT);
2612
2613         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2614     }
2615     else
2616     {
2617         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2618         impLastILoffsStmt                          = nullptr;
2619     }
2620 }
2621
2622 #endif // DEBUG
2623
2624 /*****************************************************************************
2625  * We don't create any GenTree (excluding spills) for a branch.
2626  * For debugging info, we need a placeholder so that we can note
2627  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2628  */
2629
2630 void Compiler::impNoteBranchOffs()
2631 {
2632     if (opts.compDbgCode)
2633     {
2634         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2635     }
2636 }
2637
2638 /*****************************************************************************
2639  * Locate the next stmt boundary for which we need to record info.
2640  * We will have to spill the stack at such boundaries if it is not
2641  * already empty.
2642  * Returns the next stmt boundary (after the start of the block)
2643  */
2644
2645 unsigned Compiler::impInitBlockLineInfo()
2646 {
2647     /* Assume the block does not correspond with any IL offset. This prevents
2648        us from reporting extra offsets. Extra mappings can cause confusing
2649        stepping, especially if the extra mapping is a jump-target, and the
2650        debugger does not ignore extra mappings, but instead rewinds to the
2651        nearest known offset */
2652
2653     impCurStmtOffsSet(BAD_IL_OFFSET);
2654
2655     if (compIsForInlining())
2656     {
2657         return ~0;
2658     }
2659
2660     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2661
2662     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2663     {
2664         impCurStmtOffsSet(blockOffs);
2665     }
2666
2667     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2668     {
2669         impCurStmtOffsSet(blockOffs);
2670     }
2671
2672     /* Always report IL offset 0 or some tests get confused.
2673        Probably a good idea anyways */
2674
2675     if (blockOffs == 0)
2676     {
2677         impCurStmtOffsSet(blockOffs);
2678     }
2679
2680     if (!info.compStmtOffsetsCount)
2681     {
2682         return ~0;
2683     }
2684
2685     /* Find the lowest explicit stmt boundary within the block */
2686
2687     /* Start looking at an entry that is based on our instr offset */
2688
2689     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2690
2691     if (index >= info.compStmtOffsetsCount)
2692     {
2693         index = info.compStmtOffsetsCount - 1;
2694     }
2695
2696     /* If we've guessed too far, back up */
2697
2698     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2699     {
2700         index--;
2701     }
2702
2703     /* If we guessed short, advance ahead */
2704
2705     while (info.compStmtOffsets[index] < blockOffs)
2706     {
2707         index++;
2708
2709         if (index == info.compStmtOffsetsCount)
2710         {
2711             return info.compStmtOffsetsCount;
2712         }
2713     }
2714
2715     assert(index < info.compStmtOffsetsCount);
2716
2717     if (info.compStmtOffsets[index] == blockOffs)
2718     {
2719         /* There is an explicit boundary for the start of this basic block.
2720            So we will start with bbCodeOffs. Else we will wait until we
2721            get to the next explicit boundary */
2722
2723         impCurStmtOffsSet(blockOffs);
2724
2725         index++;
2726     }
2727
2728     return index;
2729 }
2730
2731 /*****************************************************************************/
2732
2733 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2734 {
2735     switch (opcode)
2736     {
2737         case CEE_CALL:
2738         case CEE_CALLI:
2739         case CEE_CALLVIRT:
2740             return true;
2741
2742         default:
2743             return false;
2744     }
2745 }
2746
2747 /*****************************************************************************/
2748
2749 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2750 {
2751     switch (opcode)
2752     {
2753         case CEE_CALL:
2754         case CEE_CALLI:
2755         case CEE_CALLVIRT:
2756         case CEE_JMP:
2757         case CEE_NEWOBJ:
2758         case CEE_NEWARR:
2759             return true;
2760
2761         default:
2762             return false;
2763     }
2764 }
2765
2766 /*****************************************************************************/
2767
2768 // One might think it is worth caching these values, but results indicate
2769 // that it isn't.
2770 // In addition, caching them causes SuperPMI to be unable to completely
2771 // encapsulate an individual method context.
2772 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2773 {
2774     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2775     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2776     return refAnyClass;
2777 }
2778
2779 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2780 {
2781     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2782     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2783     return typeHandleClass;
2784 }
2785
2786 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2787 {
2788     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2789     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2790     return argIteratorClass;
2791 }
2792
2793 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2794 {
2795     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2796     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2797     return stringClass;
2798 }
2799
2800 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2801 {
2802     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2803     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2804     return objectClass;
2805 }
2806
2807 /*****************************************************************************
2808  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2809  *  set its type to TYP_BYREF when we create it. We know if it can be
2810  *  changed to TYP_I_IMPL only at the point where we use it
2811  */
2812
2813 /* static */
2814 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2815 {
2816     if (tree1->IsVarAddr())
2817     {
2818         tree1->gtType = TYP_I_IMPL;
2819     }
2820
2821     if (tree2 && tree2->IsVarAddr())
2822     {
2823         tree2->gtType = TYP_I_IMPL;
2824     }
2825 }
2826
2827 /*****************************************************************************
2828  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2829  *  to make that an explicit cast in our trees, so any implicit casts that
2830  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2831  *  turned into explicit casts here.
2832  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2833  */
2834
2835 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2836 {
2837     var_types currType   = genActualType(tree->gtType);
2838     var_types wantedType = genActualType(dstTyp);
2839
2840     if (wantedType != currType)
2841     {
2842         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2843         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2844         {
2845             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2846             {
2847                 tree->gtType = TYP_I_IMPL;
2848             }
2849         }
2850 #ifdef _TARGET_64BIT_
2851         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2852         {
2853             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2854             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2855         }
2856         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2857         {
2858             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2859             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2860         }
2861 #endif // _TARGET_64BIT_
2862     }
2863
2864     return tree;
2865 }
2866
2867 /*****************************************************************************
2868  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2869  *  but we want to make that an explicit cast in our trees, so any implicit casts
2870  *  that exist in the IL are turned into explicit casts here.
2871  */
2872
2873 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2874 {
2875 #ifndef LEGACY_BACKEND
2876     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2877     {
2878         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2879     }
2880 #endif // !LEGACY_BACKEND
2881
2882     return tree;
2883 }
2884
2885 //------------------------------------------------------------------------
2886 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2887 //    with a GT_COPYBLK node.
2888 //
2889 // Arguments:
2890 //    sig - The InitializeArray signature.
2891 //
2892 // Return Value:
2893 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2894 //    nullptr otherwise.
2895 //
2896 // Notes:
2897 //    The function recognizes the following IL pattern:
2898 //      ldc <length> or a list of ldc <lower bound>/<length>
2899 //      newarr or newobj
2900 //      dup
2901 //      ldtoken <field handle>
2902 //      call InitializeArray
2903 //    The lower bounds need not be constant except when the array rank is 1.
2904 //    The function recognizes all kinds of arrays thus enabling a small runtime
2905 //    such as CoreRT to skip providing an implementation for InitializeArray.
2906
2907 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2908 {
2909     assert(sig->numArgs == 2);
2910
2911     GenTreePtr fieldTokenNode = impStackTop(0).val;
2912     GenTreePtr arrayLocalNode = impStackTop(1).val;
2913
2914     //
2915     // Verify that the field token is known and valid.  Note that It's also
2916     // possible for the token to come from reflection, in which case we cannot do
2917     // the optimization and must therefore revert to calling the helper.  You can
2918     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2919     //
2920
2921     // Check to see if the ldtoken helper call is what we see here.
2922     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2923         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2924     {
2925         return nullptr;
2926     }
2927
2928     // Strip helper call away
2929     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2930
2931     if (fieldTokenNode->gtOper == GT_IND)
2932     {
2933         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2934     }
2935
2936     // Check for constant
2937     if (fieldTokenNode->gtOper != GT_CNS_INT)
2938     {
2939         return nullptr;
2940     }
2941
2942     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2943     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2944     {
2945         return nullptr;
2946     }
2947
2948     //
2949     // We need to get the number of elements in the array and the size of each element.
2950     // We verify that the newarr statement is exactly what we expect it to be.
2951     // If it's not then we just return NULL and we don't optimize this call
2952     //
2953
2954     //
2955     // It is possible the we don't have any statements in the block yet
2956     //
2957     if (impTreeLast->gtOper != GT_STMT)
2958     {
2959         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2960         return nullptr;
2961     }
2962
2963     //
2964     // We start by looking at the last statement, making sure it's an assignment, and
2965     // that the target of the assignment is the array passed to InitializeArray.
2966     //
2967     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2968     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2969         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2970         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2971     {
2972         return nullptr;
2973     }
2974
2975     //
2976     // Make sure that the object being assigned is a helper call.
2977     //
2978
2979     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2980     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2981     {
2982         return nullptr;
2983     }
2984
2985     //
2986     // Verify that it is one of the new array helpers.
2987     //
2988
2989     bool isMDArray = false;
2990
2991     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2992         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2993         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2995 #ifdef FEATURE_READYTORUN_COMPILER
2996         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2997 #endif
2998             )
2999     {
3000 #if COR_JIT_EE_VERSION > 460
3001         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3002         {
3003             return nullptr;
3004         }
3005
3006         isMDArray = true;
3007 #endif
3008     }
3009
3010     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3011
3012     //
3013     // Make sure we found a compile time handle to the array
3014     //
3015
3016     if (!arrayClsHnd)
3017     {
3018         return nullptr;
3019     }
3020
3021     unsigned rank = 0;
3022     S_UINT32 numElements;
3023
3024     if (isMDArray)
3025     {
3026         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3027
3028         if (rank == 0)
3029         {
3030             return nullptr;
3031         }
3032
3033         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3034         assert(tokenArg != nullptr);
3035         GenTreeArgList* numArgsArg = tokenArg->Rest();
3036         assert(numArgsArg != nullptr);
3037         GenTreeArgList* argsArg = numArgsArg->Rest();
3038         assert(argsArg != nullptr);
3039
3040         //
3041         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3042         // so at least one length must be present and the rank can't exceed 32 so there can
3043         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3044         //
3045
3046         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3047             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3048         {
3049             return nullptr;
3050         }
3051
3052         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3053         bool     lowerBoundsSpecified;
3054
3055         if (numArgs == rank * 2)
3056         {
3057             lowerBoundsSpecified = true;
3058         }
3059         else if (numArgs == rank)
3060         {
3061             lowerBoundsSpecified = false;
3062
3063             //
3064             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3065             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3066             // we get a SDArray as well, see the for loop below.
3067             //
3068
3069             if (rank == 1)
3070             {
3071                 isMDArray = false;
3072             }
3073         }
3074         else
3075         {
3076             return nullptr;
3077         }
3078
3079         //
3080         // The rank is known to be at least 1 so we can start with numElements being 1
3081         // to avoid the need to special case the first dimension.
3082         //
3083
3084         numElements = S_UINT32(1);
3085
3086         struct Match
3087         {
3088             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3089             {
3090                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3091                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3092             }
3093
3094             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3095             {
3096                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3097                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3098                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3099             }
3100
3101             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3102             {
3103                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3104                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3105             }
3106
3107             static bool IsComma(GenTree* tree)
3108             {
3109                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3110             }
3111         };
3112
3113         unsigned argIndex = 0;
3114         GenTree* comma;
3115
3116         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3117         {
3118             if (lowerBoundsSpecified)
3119             {
3120                 //
3121                 // In general lower bounds can be ignored because they're not needed to
3122                 // calculate the total number of elements. But for single dimensional arrays
3123                 // we need to know if the lower bound is 0 because in this case the runtime
3124                 // creates a SDArray and this affects the way the array data offset is calculated.
3125                 //
3126
3127                 if (rank == 1)
3128                 {
3129                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3130                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3131                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3132
3133                     if (lowerBoundNode->IsIntegralConst(0))
3134                     {
3135                         isMDArray = false;
3136                     }
3137                 }
3138
3139                 comma = comma->gtGetOp2();
3140                 argIndex++;
3141             }
3142
3143             GenTree* lengthNodeAssign = comma->gtGetOp1();
3144             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3145             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3146
3147             if (!lengthNode->IsCnsIntOrI())
3148             {
3149                 return nullptr;
3150             }
3151
3152             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3153             argIndex++;
3154         }
3155
3156         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3157
3158         if (argIndex != numArgs)
3159         {
3160             return nullptr;
3161         }
3162     }
3163     else
3164     {
3165         //
3166         // Make sure there are exactly two arguments:  the array class and
3167         // the number of elements.
3168         //
3169
3170         GenTreePtr arrayLengthNode;
3171
3172         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3173 #ifdef FEATURE_READYTORUN_COMPILER
3174         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3175         {
3176             // Array length is 1st argument for readytorun helper
3177             arrayLengthNode = args->Current();
3178         }
3179         else
3180 #endif
3181         {
3182             // Array length is 2nd argument for regular helper
3183             arrayLengthNode = args->Rest()->Current();
3184         }
3185
3186         //
3187         // Make sure that the number of elements look valid.
3188         //
3189         if (arrayLengthNode->gtOper != GT_CNS_INT)
3190         {
3191             return nullptr;
3192         }
3193
3194         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3195
3196         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3197         {
3198             return nullptr;
3199         }
3200     }
3201
3202     CORINFO_CLASS_HANDLE elemClsHnd;
3203     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3204
3205     //
3206     // Note that genTypeSize will return zero for non primitive types, which is exactly
3207     // what we want (size will then be 0, and we will catch this in the conditional below).
3208     // Note that we don't expect this to fail for valid binaries, so we assert in the
3209     // non-verification case (the verification case should not assert but rather correctly
3210     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3211     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3212     // why.
3213     //
3214
3215     S_UINT32 elemSize(genTypeSize(elementType));
3216     S_UINT32 size = elemSize * S_UINT32(numElements);
3217
3218     if (size.IsOverflow())
3219     {
3220         return nullptr;
3221     }
3222
3223     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3224     {
3225         assert(verNeedsVerification());
3226         return nullptr;
3227     }
3228
3229     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3230     if (!initData)
3231     {
3232         return nullptr;
3233     }
3234
3235     //
3236     // At this point we are ready to commit to implementing the InitializeArray
3237     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3238     // return the struct assignment node.
3239     //
3240
3241     impPopStack();
3242     impPopStack();
3243
3244     const unsigned blkSize = size.Value();
3245     GenTreePtr     dst;
3246
3247     if (isMDArray)
3248     {
3249         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3250
3251         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3252     }
3253     else
3254     {
3255         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3256     }
3257     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3258     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3259     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3260
3261     return gtNewBlkOpNode(blk,     // dst
3262                           src,     // src
3263                           blkSize, // size
3264                           false,   // volatil
3265                           true);   // copyBlock
3266 }
3267
3268 /*****************************************************************************/
3269 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3270 // Returns NULL if an intrinsic cannot be used
3271
3272 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3273                                   CORINFO_CLASS_HANDLE  clsHnd,
3274                                   CORINFO_METHOD_HANDLE method,
3275                                   CORINFO_SIG_INFO*     sig,
3276                                   int                   memberRef,
3277                                   bool                  readonlyCall,
3278                                   bool                  tailCall,
3279                                   CorInfoIntrinsics*    pIntrinsicID)
3280 {
3281     bool mustExpand = false;
3282 #if COR_JIT_EE_VERSION > 460
3283     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3284 #else
3285     CorInfoIntrinsics intrinsicID                                      = info.compCompHnd->getIntrinsicID(method);
3286 #endif
3287     *pIntrinsicID = intrinsicID;
3288
3289 #ifndef _TARGET_ARM_
3290     genTreeOps interlockedOperator;
3291 #endif
3292
3293     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3294     {
3295         // must be done regardless of DbgCode and MinOpts
3296         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3297     }
3298 #ifdef _TARGET_64BIT_
3299     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3300     {
3301         // must be done regardless of DbgCode and MinOpts
3302         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3303     }
3304 #else
3305     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3306 #endif
3307
3308     GenTreePtr retNode = nullptr;
3309
3310     //
3311     // We disable the inlining of instrinsics for MinOpts.
3312     //
3313     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3314     {
3315         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3316         return retNode;
3317     }
3318
3319     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3320     // seem to work properly for Infinity values, we don't do
3321     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3322
3323     var_types callType = JITtype2varType(sig->retType);
3324
3325     /* First do the intrinsics which are always smaller than a call */
3326
3327     switch (intrinsicID)
3328     {
3329         GenTreePtr op1, op2;
3330
3331         case CORINFO_INTRINSIC_Sin:
3332         case CORINFO_INTRINSIC_Sqrt:
3333         case CORINFO_INTRINSIC_Abs:
3334         case CORINFO_INTRINSIC_Cos:
3335         case CORINFO_INTRINSIC_Round:
3336         case CORINFO_INTRINSIC_Cosh:
3337         case CORINFO_INTRINSIC_Sinh:
3338         case CORINFO_INTRINSIC_Tan:
3339         case CORINFO_INTRINSIC_Tanh:
3340         case CORINFO_INTRINSIC_Asin:
3341         case CORINFO_INTRINSIC_Acos:
3342         case CORINFO_INTRINSIC_Atan:
3343         case CORINFO_INTRINSIC_Atan2:
3344         case CORINFO_INTRINSIC_Log10:
3345         case CORINFO_INTRINSIC_Pow:
3346         case CORINFO_INTRINSIC_Exp:
3347         case CORINFO_INTRINSIC_Ceiling:
3348         case CORINFO_INTRINSIC_Floor:
3349
3350             // These are math intrinsics
3351
3352             assert(callType != TYP_STRUCT);
3353
3354             op1 = nullptr;
3355
3356 #if defined(LEGACY_BACKEND)
3357             if (IsTargetIntrinsic(intrinsicID))
3358 #elif !defined(_TARGET_X86_)
3359             // Intrinsics that are not implemented directly by target instructions will
3360             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3361             // don't do this optimization, because
3362             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3363             //  b) It will be non-trivial task or too late to re-materialize a surviving
3364             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3365             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3366 #else
3367             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3368             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3369             // code generation for certain EH constructs.
3370             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3371 #endif
3372             {
3373                 switch (sig->numArgs)
3374                 {
3375                     case 1:
3376                         op1 = impPopStack().val;
3377
3378 #if FEATURE_X87_DOUBLES
3379
3380                         // X87 stack doesn't differentiate between float/double
3381                         // so it doesn't need a cast, but everybody else does
3382                         // Just double check it is at least a FP type
3383                         noway_assert(varTypeIsFloating(op1));
3384
3385 #else // FEATURE_X87_DOUBLES
3386
3387                         if (op1->TypeGet() != callType)
3388                         {
3389                             op1 = gtNewCastNode(callType, op1, callType);
3390                         }
3391
3392 #endif // FEATURE_X87_DOUBLES
3393
3394                         op1 = new (this, GT_INTRINSIC)
3395                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3396                         break;
3397
3398                     case 2:
3399                         op2 = impPopStack().val;
3400                         op1 = impPopStack().val;
3401
3402 #if FEATURE_X87_DOUBLES
3403
3404                         // X87 stack doesn't differentiate between float/double
3405                         // so it doesn't need a cast, but everybody else does
3406                         // Just double check it is at least a FP type
3407                         noway_assert(varTypeIsFloating(op2));
3408                         noway_assert(varTypeIsFloating(op1));
3409
3410 #else // FEATURE_X87_DOUBLES
3411
3412                         if (op2->TypeGet() != callType)
3413                         {
3414                             op2 = gtNewCastNode(callType, op2, callType);
3415                         }
3416                         if (op1->TypeGet() != callType)
3417                         {
3418                             op1 = gtNewCastNode(callType, op1, callType);
3419                         }
3420
3421 #endif // FEATURE_X87_DOUBLES
3422
3423                         op1 = new (this, GT_INTRINSIC)
3424                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3425                         break;
3426
3427                     default:
3428                         NO_WAY("Unsupported number of args for Math Instrinsic");
3429                 }
3430
3431 #ifndef LEGACY_BACKEND
3432                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3433                 {
3434                     op1->gtFlags |= GTF_CALL;
3435                 }
3436 #endif
3437             }
3438
3439             retNode = op1;
3440             break;
3441
3442 #ifdef _TARGET_XARCH_
3443         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3444         case CORINFO_INTRINSIC_InterlockedAdd32:
3445             interlockedOperator = GT_LOCKADD;
3446             goto InterlockedBinOpCommon;
3447         case CORINFO_INTRINSIC_InterlockedXAdd32:
3448             interlockedOperator = GT_XADD;
3449             goto InterlockedBinOpCommon;
3450         case CORINFO_INTRINSIC_InterlockedXchg32:
3451             interlockedOperator = GT_XCHG;
3452             goto InterlockedBinOpCommon;
3453
3454 #ifdef _TARGET_AMD64_
3455         case CORINFO_INTRINSIC_InterlockedAdd64:
3456             interlockedOperator = GT_LOCKADD;
3457             goto InterlockedBinOpCommon;
3458         case CORINFO_INTRINSIC_InterlockedXAdd64:
3459             interlockedOperator = GT_XADD;
3460             goto InterlockedBinOpCommon;
3461         case CORINFO_INTRINSIC_InterlockedXchg64:
3462             interlockedOperator = GT_XCHG;
3463             goto InterlockedBinOpCommon;
3464 #endif // _TARGET_AMD64_
3465
3466         InterlockedBinOpCommon:
3467             assert(callType != TYP_STRUCT);
3468             assert(sig->numArgs == 2);
3469
3470             op2 = impPopStack().val;
3471             op1 = impPopStack().val;
3472
3473             // This creates:
3474             //   val
3475             // XAdd
3476             //   addr
3477             //     field (for example)
3478             //
3479             // In the case where the first argument is the address of a local, we might
3480             // want to make this *not* make the var address-taken -- but atomic instructions
3481             // on a local are probably pretty useless anyway, so we probably don't care.
3482
3483             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3484             op1->gtFlags |= GTF_GLOB_EFFECT;
3485             retNode = op1;
3486             break;
3487 #endif // _TARGET_XARCH_
3488
3489         case CORINFO_INTRINSIC_MemoryBarrier:
3490
3491             assert(sig->numArgs == 0);
3492
3493             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3494             op1->gtFlags |= GTF_GLOB_EFFECT;
3495             retNode = op1;
3496             break;
3497
3498 #ifdef _TARGET_XARCH_
3499         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3500         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3501 #ifdef _TARGET_AMD64_
3502         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3503 #endif
3504         {
3505             assert(callType != TYP_STRUCT);
3506             assert(sig->numArgs == 3);
3507             GenTreePtr op3;
3508
3509             op3 = impPopStack().val; // comparand
3510             op2 = impPopStack().val; // value
3511             op1 = impPopStack().val; // location
3512
3513             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3514
3515             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3516             retNode = node;
3517             break;
3518         }
3519 #endif
3520
3521         case CORINFO_INTRINSIC_StringLength:
3522             op1 = impPopStack().val;
3523             if (!opts.MinOpts() && !opts.compDbgCode)
3524             {
3525                 GenTreeArrLen* arrLen =
3526                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3527                 op1 = arrLen;
3528             }
3529             else
3530             {
3531                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3532                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3533                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3534                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3535             }
3536             retNode = op1;
3537             break;
3538
3539         case CORINFO_INTRINSIC_StringGetChar:
3540             op2 = impPopStack().val;
3541             op1 = impPopStack().val;
3542             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3543             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3544             retNode = op1;
3545             break;
3546
3547         case CORINFO_INTRINSIC_InitializeArray:
3548             retNode = impInitializeArrayIntrinsic(sig);
3549             break;
3550
3551         case CORINFO_INTRINSIC_Array_Address:
3552         case CORINFO_INTRINSIC_Array_Get:
3553         case CORINFO_INTRINSIC_Array_Set:
3554             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3555             break;
3556
3557         case CORINFO_INTRINSIC_GetTypeFromHandle:
3558             op1 = impStackTop(0).val;
3559             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3560                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3561             {
3562                 op1 = impPopStack().val;
3563                 // Change call to return RuntimeType directly.
3564                 op1->gtType = TYP_REF;
3565                 retNode     = op1;
3566             }
3567             // Call the regular function.
3568             break;
3569
3570         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3571             op1 = impStackTop(0).val;
3572             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3574             {
3575                 // Old tree
3576                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3577                 //
3578                 // New tree
3579                 // TreeToGetNativeTypeHandle
3580
3581                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3582                 // to that helper.
3583
3584                 op1 = impPopStack().val;
3585
3586                 // Get native TypeHandle argument to old helper
3587                 op1 = op1->gtCall.gtCallArgs;
3588                 assert(op1->OperIsList());
3589                 assert(op1->gtOp.gtOp2 == nullptr);
3590                 op1     = op1->gtOp.gtOp1;
3591                 retNode = op1;
3592             }
3593             // Call the regular function.
3594             break;
3595
3596 #ifndef LEGACY_BACKEND
3597         case CORINFO_INTRINSIC_Object_GetType:
3598
3599             op1 = impPopStack().val;
3600             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3601
3602             // Set the CALL flag to indicate that the operator is implemented by a call.
3603             // Set also the EXCEPTION flag because the native implementation of
3604             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3605             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3606             retNode = op1;
3607             break;
3608 #endif
3609         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3610         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3611         // substitution.  The parameter byref will be assigned into the newly allocated object.
3612         case CORINFO_INTRINSIC_ByReference_Ctor:
3613         {
3614             // Remove call to constructor and directly assign the byref passed
3615             // to the call to the first slot of the ByReference struct.
3616             op1                                    = impPopStack().val;
3617             GenTreePtr           thisptr           = newobjThis;
3618             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3619             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3620             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3621             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3622             assert(byReferenceStruct != nullptr);
3623             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3624             retNode = assign;
3625             break;
3626         }
3627         // Implement ptr value getter for ByReference struct.
3628         case CORINFO_INTRINSIC_ByReference_Value:
3629         {
3630             op1                         = impPopStack().val;
3631             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3633             retNode                     = field;
3634             break;
3635         }
3636         default:
3637             /* Unknown intrinsic */
3638             break;
3639     }
3640
3641     if (mustExpand)
3642     {
3643         if (retNode == nullptr)
3644         {
3645             NO_WAY("JIT must expand the intrinsic!");
3646         }
3647     }
3648
3649     return retNode;
3650 }
3651
3652 /*****************************************************************************/
3653
3654 GenTreePtr Compiler::impArrayAccessIntrinsic(
3655     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3656 {
3657     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3658        the following, as it generates fatter code.
3659     */
3660
3661     if (compCodeOpt() == SMALL_CODE)
3662     {
3663         return nullptr;
3664     }
3665
3666     /* These intrinsics generate fatter (but faster) code and are only
3667        done if we don't need SMALL_CODE */
3668
3669     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3670
3671     // The rank 1 case is special because it has to handle two array formats
3672     // we will simply not do that case
3673     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3674     {
3675         return nullptr;
3676     }
3677
3678     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3679     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3680
3681     // For the ref case, we will only be able to inline if the types match
3682     // (verifier checks for this, we don't care for the nonverified case and the
3683     // type is final (so we don't need to do the cast)
3684     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3685     {
3686         // Get the call site signature
3687         CORINFO_SIG_INFO LocalSig;
3688         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3689         assert(LocalSig.hasThis());
3690
3691         CORINFO_CLASS_HANDLE actualElemClsHnd;
3692
3693         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3694         {
3695             // Fetch the last argument, the one that indicates the type we are setting.
3696             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3697             for (unsigned r = 0; r < rank; r++)
3698             {
3699                 argType = info.compCompHnd->getArgNext(argType);
3700             }
3701
3702             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3703             actualElemClsHnd = argInfo.GetClassHandle();
3704         }
3705         else
3706         {
3707             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3708
3709             // Fetch the return type
3710             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3711             assert(retInfo.IsByRef());
3712             actualElemClsHnd = retInfo.GetClassHandle();
3713         }
3714
3715         // if it's not final, we can't do the optimization
3716         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3717         {
3718             return nullptr;
3719         }
3720     }
3721
3722     unsigned arrayElemSize;
3723     if (elemType == TYP_STRUCT)
3724     {
3725         assert(arrElemClsHnd);
3726
3727         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3728     }
3729     else
3730     {
3731         arrayElemSize = genTypeSize(elemType);
3732     }
3733
3734     if ((unsigned char)arrayElemSize != arrayElemSize)
3735     {
3736         // arrayElemSize would be truncated as an unsigned char.
3737         // This means the array element is too large. Don't do the optimization.
3738         return nullptr;
3739     }
3740
3741     GenTreePtr val = nullptr;
3742
3743     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3744     {
3745         // Assignment of a struct is more work, and there are more gets than sets.
3746         if (elemType == TYP_STRUCT)
3747         {
3748             return nullptr;
3749         }
3750
3751         val = impPopStack().val;
3752         assert(genActualType(elemType) == genActualType(val->gtType) ||
3753                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3754                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3755                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3756     }
3757
3758     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3759
3760     GenTreePtr inds[GT_ARR_MAX_RANK];
3761     for (unsigned k = rank; k > 0; k--)
3762     {
3763         inds[k - 1] = impPopStack().val;
3764     }
3765
3766     GenTreePtr arr = impPopStack().val;
3767     assert(arr->gtType == TYP_REF);
3768
3769     GenTreePtr arrElem =
3770         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3771                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3772
3773     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3774     {
3775         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3776     }
3777
3778     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3779     {
3780         assert(val != nullptr);
3781         return gtNewAssignNode(arrElem, val);
3782     }
3783     else
3784     {
3785         return arrElem;
3786     }
3787 }
3788
3789 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3790 {
3791     unsigned i;
3792
3793     // do some basic checks first
3794     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3795     {
3796         return FALSE;
3797     }
3798
3799     if (verCurrentState.esStackDepth > 0)
3800     {
3801         // merge stack types
3802         StackEntry* parentStack = block->bbStackOnEntry();
3803         StackEntry* childStack  = verCurrentState.esStack;
3804
3805         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3806         {
3807             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3808             {
3809                 return FALSE;
3810             }
3811         }
3812     }
3813
3814     // merge initialization status of this ptr
3815
3816     if (verTrackObjCtorInitState)
3817     {
3818         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3819         assert(verCurrentState.thisInitialized != TIS_Bottom);
3820
3821         // If the successor block's thisInit state is unknown, copy it from the current state.
3822         if (block->bbThisOnEntry() == TIS_Bottom)
3823         {
3824             *changed = true;
3825             verSetThisInit(block, verCurrentState.thisInitialized);
3826         }
3827         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3828         {
3829             if (block->bbThisOnEntry() != TIS_Top)
3830             {
3831                 *changed = true;
3832                 verSetThisInit(block, TIS_Top);
3833
3834                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3835                 {
3836                     // The block is bad. Control can flow through the block to any handler that catches the
3837                     // verification exception, but the importer ignores bad blocks and therefore won't model
3838                     // this flow in the normal way. To complete the merge into the bad block, the new state
3839                     // needs to be manually pushed to the handlers that may be reached after the verification
3840                     // exception occurs.
3841                     //
3842                     // Usually, the new state was already propagated to the relevant handlers while processing
3843                     // the predecessors of the bad block. The exception is when the bad block is at the start
3844                     // of a try region, meaning it is protected by additional handlers that do not protect its
3845                     // predecessors.
3846                     //
3847                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3848                     {
3849                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3850                         // recursive calls back into this code path (if successors of the current bad block are
3851                         // also bad blocks).
3852                         //
3853                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3854                         verCurrentState.thisInitialized = TIS_Top;
3855                         impVerifyEHBlock(block, true);
3856                         verCurrentState.thisInitialized = origTIS;
3857                     }
3858                 }
3859             }
3860         }
3861     }
3862     else
3863     {
3864         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3865     }
3866
3867     return TRUE;
3868 }
3869
3870 /*****************************************************************************
3871  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3872  *   already logged it (presumably in a more detailed fashion than done here)
3873  * 'bVerificationException' is true for a verification exception, false for a
3874  *   "call unauthorized by host" exception.
3875  */
3876
3877 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3878 {
3879     block->bbJumpKind = BBJ_THROW;
3880     block->bbFlags |= BBF_FAILED_VERIFICATION;
3881
3882     impCurStmtOffsSet(block->bbCodeOffs);
3883
3884 #ifdef DEBUG
3885     // we need this since BeginTreeList asserts otherwise
3886     impTreeList = impTreeLast = nullptr;
3887     block->bbFlags &= ~BBF_IMPORTED;
3888
3889     if (logMsg)
3890     {
3891         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3892                 block->bbCodeOffs, block->bbCodeOffsEnd));
3893         if (verbose)
3894         {
3895             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3896         }
3897     }
3898
3899     if (JitConfig.DebugBreakOnVerificationFailure())
3900     {
3901         DebugBreak();
3902     }
3903 #endif
3904
3905     impBeginTreeList();
3906
3907     // if the stack is non-empty evaluate all the side-effects
3908     if (verCurrentState.esStackDepth > 0)
3909     {
3910         impEvalSideEffects();
3911     }
3912     assert(verCurrentState.esStackDepth == 0);
3913
3914     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3915                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3916     // verCurrentState.esStackDepth = 0;
3917     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3918
3919     // The inliner is not able to handle methods that require throw block, so
3920     // make sure this methods never gets inlined.
3921     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3922 }
3923
3924 /*****************************************************************************
3925  *
3926  */
3927 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3928
3929 {
3930     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3931     // slightly different mechanism in which it calls the JIT to perform IL verification:
3932     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3933     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3934     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3935     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3936     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3937     // to fail upon runtime of the jitted method.
3938     //
3939     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3940     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3941     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3942     // we detect these two conditions, instead of generating a throw statement inside the offending
3943     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3944     // to return false and make RyuJIT behave the same way JIT64 does.
3945     //
3946     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3947     // RyuJIT for the time being until we completely replace JIT64.
3948     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3949
3950     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3951     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3952     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3953     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3954     // be turned off during importation).
3955     CLANG_FORMAT_COMMENT_ANCHOR;
3956
3957 #ifdef _TARGET_64BIT_
3958
3959 #ifdef DEBUG
3960     bool canSkipVerificationResult =
3961         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3962     assert(tiVerificationNeeded || canSkipVerificationResult);
3963 #endif // DEBUG
3964
3965     // Add the non verifiable flag to the compiler
3966     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3967     {
3968         tiIsVerifiableCode = FALSE;
3969     }
3970 #endif //_TARGET_64BIT_
3971     verResetCurrentState(block, &verCurrentState);
3972     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3973
3974 #ifdef DEBUG
3975     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3976 #endif                   // DEBUG
3977 }
3978
3979 /******************************************************************************/
3980 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3981 {
3982     assert(ciType < CORINFO_TYPE_COUNT);
3983
3984     typeInfo tiResult;
3985     switch (ciType)
3986     {
3987         case CORINFO_TYPE_STRING:
3988         case CORINFO_TYPE_CLASS:
3989             tiResult = verMakeTypeInfo(clsHnd);
3990             if (!tiResult.IsType(TI_REF))
3991             { // type must be consistent with element type
3992                 return typeInfo();
3993             }
3994             break;
3995
3996 #ifdef _TARGET_64BIT_
3997         case CORINFO_TYPE_NATIVEINT:
3998         case CORINFO_TYPE_NATIVEUINT:
3999             if (clsHnd)
4000             {
4001                 // If we have more precise information, use it
4002                 return verMakeTypeInfo(clsHnd);
4003             }
4004             else
4005             {
4006                 return typeInfo::nativeInt();
4007             }
4008             break;
4009 #endif // _TARGET_64BIT_
4010
4011         case CORINFO_TYPE_VALUECLASS:
4012         case CORINFO_TYPE_REFANY:
4013             tiResult = verMakeTypeInfo(clsHnd);
4014             // type must be constant with element type;
4015             if (!tiResult.IsValueClass())
4016             {
4017                 return typeInfo();
4018             }
4019             break;
4020         case CORINFO_TYPE_VAR:
4021             return verMakeTypeInfo(clsHnd);
4022
4023         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4024         case CORINFO_TYPE_VOID:
4025             return typeInfo();
4026             break;
4027
4028         case CORINFO_TYPE_BYREF:
4029         {
4030             CORINFO_CLASS_HANDLE childClassHandle;
4031             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4032             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4033         }
4034         break;
4035
4036         default:
4037             if (clsHnd)
4038             { // If we have more precise information, use it
4039                 return typeInfo(TI_STRUCT, clsHnd);
4040             }
4041             else
4042             {
4043                 return typeInfo(JITtype2tiType(ciType));
4044             }
4045     }
4046     return tiResult;
4047 }
4048
4049 /******************************************************************************/
4050
4051 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4052 {
4053     if (clsHnd == nullptr)
4054     {
4055         return typeInfo();
4056     }
4057
4058     // Byrefs should only occur in method and local signatures, which are accessed
4059     // using ICorClassInfo and ICorClassInfo.getChildType.
4060     // So findClass() and getClassAttribs() should not be called for byrefs
4061
4062     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4063     {
4064         assert(!"Did findClass() return a Byref?");
4065         return typeInfo();
4066     }
4067
4068     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4069
4070     if (attribs & CORINFO_FLG_VALUECLASS)
4071     {
4072         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4073
4074         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4075         // not occur here, so we may want to change this to an assert instead.
4076         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4077         {
4078             return typeInfo();
4079         }
4080
4081 #ifdef _TARGET_64BIT_
4082         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4083         {
4084             return typeInfo::nativeInt();
4085         }
4086 #endif // _TARGET_64BIT_
4087
4088         if (t != CORINFO_TYPE_UNDEF)
4089         {
4090             return (typeInfo(JITtype2tiType(t)));
4091         }
4092         else if (bashStructToRef)
4093         {
4094             return (typeInfo(TI_REF, clsHnd));
4095         }
4096         else
4097         {
4098             return (typeInfo(TI_STRUCT, clsHnd));
4099         }
4100     }
4101     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4102     {
4103         // See comment in _typeInfo.h for why we do it this way.
4104         return (typeInfo(TI_REF, clsHnd, true));
4105     }
4106     else
4107     {
4108         return (typeInfo(TI_REF, clsHnd));
4109     }
4110 }
4111
4112 /******************************************************************************/
4113 BOOL Compiler::verIsSDArray(typeInfo ti)
4114 {
4115     if (ti.IsNullObjRef())
4116     { // nulls are SD arrays
4117         return TRUE;
4118     }
4119
4120     if (!ti.IsType(TI_REF))
4121     {
4122         return FALSE;
4123     }
4124
4125     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4126     {
4127         return FALSE;
4128     }
4129     return TRUE;
4130 }
4131
4132 /******************************************************************************/
4133 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4134 /* Returns an error type if anything goes wrong */
4135
4136 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4137 {
4138     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4139
4140     if (!verIsSDArray(arrayObjectType))
4141     {
4142         return typeInfo();
4143     }
4144
4145     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4146     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4147
4148     return verMakeTypeInfo(ciType, childClassHandle);
4149 }
4150
4151 /*****************************************************************************
4152  */
4153 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4154 {
4155     CORINFO_CLASS_HANDLE classHandle;
4156     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4157
4158     var_types type = JITtype2varType(ciType);
4159     if (varTypeIsGC(type))
4160     {
4161         // For efficiency, getArgType only returns something in classHandle for
4162         // value types.  For other types that have addition type info, you
4163         // have to call back explicitly
4164         classHandle = info.compCompHnd->getArgClass(sig, args);
4165         if (!classHandle)
4166         {
4167             NO_WAY("Could not figure out Class specified in argument or local signature");
4168         }
4169     }
4170
4171     return verMakeTypeInfo(ciType, classHandle);
4172 }
4173
4174 /*****************************************************************************/
4175
4176 // This does the expensive check to figure out whether the method
4177 // needs to be verified. It is called only when we fail verification,
4178 // just before throwing the verification exception.
4179
4180 BOOL Compiler::verNeedsVerification()
4181 {
4182     // If we have previously determined that verification is NOT needed
4183     // (for example in Compiler::compCompile), that means verification is really not needed.
4184     // Return the same decision we made before.
4185     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4186
4187     if (!tiVerificationNeeded)
4188     {
4189         return tiVerificationNeeded;
4190     }
4191
4192     assert(tiVerificationNeeded);
4193
4194     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4195     // obtain the answer.
4196     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4197         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4198
4199     // canSkipVerification will return one of the following three values:
4200     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4201     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4202     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4203     //     but need to insert a callout to the VM to ask during runtime
4204     //     whether to skip verification or not.
4205
4206     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4207     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4208     {
4209         tiRuntimeCalloutNeeded = true;
4210     }
4211
4212     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4213     {
4214         // Dev10 706080 - Testers don't like the assert, so just silence it
4215         // by not using the macros that invoke debugAssert.
4216         badCode();
4217     }
4218
4219     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4220     // The following line means we will NOT do jit time verification if canSkipVerification
4221     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4222     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4223     return tiVerificationNeeded;
4224 }
4225
4226 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4227 {
4228     if (ti.IsByRef())
4229     {
4230         return TRUE;
4231     }
4232     if (!ti.IsType(TI_STRUCT))
4233     {
4234         return FALSE;
4235     }
4236     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4237 }
4238
4239 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4240 {
4241     if (ti.IsPermanentHomeByRef())
4242     {
4243         return TRUE;
4244     }
4245     else
4246     {
4247         return FALSE;
4248     }
4249 }
4250
4251 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4252 {
4253     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4254             || ti.IsUnboxedGenericTypeVar() ||
4255             (ti.IsType(TI_STRUCT) &&
4256              // exclude byreflike structs
4257              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4258 }
4259
4260 // Is it a boxed value type?
4261 bool Compiler::verIsBoxedValueType(typeInfo ti)
4262 {
4263     if (ti.GetType() == TI_REF)
4264     {
4265         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4266         return !!eeIsValueClass(clsHnd);
4267     }
4268     else
4269     {
4270         return false;
4271     }
4272 }
4273
4274 /*****************************************************************************
4275  *
4276  *  Check if a TailCall is legal.
4277  */
4278
4279 bool Compiler::verCheckTailCallConstraint(
4280     OPCODE                  opcode,
4281     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4282     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4283     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4284                                                        // return false to the caller.
4285                                                        // If false, it will throw.
4286     )
4287 {
4288     DWORD            mflags;
4289     CORINFO_SIG_INFO sig;
4290     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4291                                    // this counter is used to keep track of how many items have been
4292                                    // virtually popped
4293
4294     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4295     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4296     unsigned              methodClassFlgs = 0;
4297
4298     assert(impOpcodeIsCallOpcode(opcode));
4299
4300     if (compIsForInlining())
4301     {
4302         return false;
4303     }
4304
4305     // for calli, VerifyOrReturn that this is not a virtual method
4306     if (opcode == CEE_CALLI)
4307     {
4308         /* Get the call sig */
4309         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4310
4311         // We don't know the target method, so we have to infer the flags, or
4312         // assume the worst-case.
4313         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4314     }
4315     else
4316     {
4317         methodHnd = pResolvedToken->hMethod;
4318
4319         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4320
4321         // When verifying generic code we pair the method handle with its
4322         // owning class to get the exact method signature.
4323         methodClassHnd = pResolvedToken->hClass;
4324         assert(methodClassHnd);
4325
4326         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4327
4328         // opcode specific check
4329         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4330     }
4331
4332     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4333     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4334
4335     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4336     {
4337         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4338     }
4339
4340     // check compatibility of the arguments
4341     unsigned int argCount;
4342     argCount = sig.numArgs;
4343     CORINFO_ARG_LIST_HANDLE args;
4344     args = sig.args;
4345     while (argCount--)
4346     {
4347         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4348
4349         // check that the argument is not a byref for tailcalls
4350         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4351
4352         // For unsafe code, we might have parameters containing pointer to the stack location.
4353         // Disallow the tailcall for this kind.
4354         CORINFO_CLASS_HANDLE classHandle;
4355         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4356         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4357
4358         args = info.compCompHnd->getArgNext(args);
4359     }
4360
4361     // update popCount
4362     popCount += sig.numArgs;
4363
4364     // check for 'this' which is on non-static methods, not called via NEWOBJ
4365     if (!(mflags & CORINFO_FLG_STATIC))
4366     {
4367         // Always update the popCount.
4368         // This is crucial for the stack calculation to be correct.
4369         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4370         popCount++;
4371
4372         if (opcode == CEE_CALLI)
4373         {
4374             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4375             // on the stack.
4376             if (tiThis.IsValueClass())
4377             {
4378                 tiThis.MakeByRef();
4379             }
4380             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4381         }
4382         else
4383         {
4384             // Check type compatibility of the this argument
4385             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4386             if (tiDeclaredThis.IsValueClass())
4387             {
4388                 tiDeclaredThis.MakeByRef();
4389             }
4390
4391             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4392         }
4393     }
4394
4395     // Tail calls on constrained calls should be illegal too:
4396     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4397     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4398
4399     // Get the exact view of the signature for an array method
4400     if (sig.retType != CORINFO_TYPE_VOID)
4401     {
4402         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4403         {
4404             assert(opcode != CEE_CALLI);
4405             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4406         }
4407     }
4408
4409     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4410     typeInfo tiCallerRetType =
4411         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4412
4413     // void return type gets morphed into the error type, so we have to treat them specially here
4414     if (sig.retType == CORINFO_TYPE_VOID)
4415     {
4416         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4417                                   speculative);
4418     }
4419     else
4420     {
4421         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4422                                                    NormaliseForStack(tiCallerRetType), true),
4423                                   "tailcall return mismatch", speculative);
4424     }
4425
4426     // for tailcall, stack must be empty
4427     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4428
4429     return true; // Yes, tailcall is legal
4430 }
4431
4432 /*****************************************************************************
4433  *
4434  *  Checks the IL verification rules for the call
4435  */
4436
4437 void Compiler::verVerifyCall(OPCODE                  opcode,
4438                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4439                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4440                              bool                    tailCall,
4441                              bool                    readonlyCall,
4442                              const BYTE*             delegateCreateStart,
4443                              const BYTE*             codeAddr,
4444                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4445 {
4446     DWORD             mflags;
4447     CORINFO_SIG_INFO* sig      = nullptr;
4448     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4449                                     // this counter is used to keep track of how many items have been
4450                                     // virtually popped
4451
4452     // for calli, VerifyOrReturn that this is not a virtual method
4453     if (opcode == CEE_CALLI)
4454     {
4455         Verify(false, "Calli not verifiable");
4456         return;
4457     }
4458
4459     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4460     mflags = callInfo->verMethodFlags;
4461
4462     sig = &callInfo->verSig;
4463
4464     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4465     {
4466         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4467     }
4468
4469     // opcode specific check
4470     unsigned methodClassFlgs = callInfo->classFlags;
4471     switch (opcode)
4472     {
4473         case CEE_CALLVIRT:
4474             // cannot do callvirt on valuetypes
4475             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4476             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4477             break;
4478
4479         case CEE_NEWOBJ:
4480         {
4481             assert(!tailCall); // Importer should not allow this
4482             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4483                            "newobj must be on instance");
4484
4485             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4486             {
4487                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4488                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4489                 typeInfo tiDeclaredFtn =
4490                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4491                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4492
4493                 assert(popCount == 0);
4494                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4495                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4496
4497                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4498                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4499                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4500                                "delegate object type mismatch");
4501
4502                 CORINFO_CLASS_HANDLE objTypeHandle =
4503                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4504
4505                 // the method signature must be compatible with the delegate's invoke method
4506
4507                 // check that for virtual functions, the type of the object used to get the
4508                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4509                 // since this is a bit of work to determine in general, we pattern match stylized
4510                 // code sequences
4511
4512                 // the delegate creation code check, which used to be done later, is now done here
4513                 // so we can read delegateMethodRef directly from
4514                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4515                 // we then use it in our call to isCompatibleDelegate().
4516
4517                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4518                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4519                                "must create delegates with certain IL");
4520
4521                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4522                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4523                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4524                 delegateResolvedToken.token        = delegateMethodRef;
4525                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4526                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4527
4528                 CORINFO_CALL_INFO delegateCallInfo;
4529                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4530                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4531
4532                 BOOL isOpenDelegate = FALSE;
4533                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4534                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4535                                                                       &isOpenDelegate),
4536                                "function incompatible with delegate");
4537
4538                 // check the constraints on the target method
4539                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4540                                "delegate target has unsatisfied class constraints");
4541                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4542                                                                             tiActualFtn.GetMethod()),
4543                                "delegate target has unsatisfied method constraints");
4544
4545                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4546                 // for additional verification rules for delegates
4547                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4548                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4549                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4550                 {
4551
4552                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4553 #ifdef DEBUG
4554                         && StrictCheckForNonVirtualCallToVirtualMethod()
4555 #endif
4556                             )
4557                     {
4558                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4559                         {
4560                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4561                                                verIsBoxedValueType(tiActualObj),
4562                                            "The 'this' parameter to the call must be either the calling method's "
4563                                            "'this' parameter or "
4564                                            "a boxed value type.");
4565                         }
4566                     }
4567                 }
4568
4569                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4570                 {
4571                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4572
4573                     Verify(targetIsStatic || !isOpenDelegate,
4574                            "Unverifiable creation of an open instance delegate for a protected member.");
4575
4576                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4577                                                                 ? info.compClassHnd
4578                                                                 : tiActualObj.GetClassHandleForObjRef();
4579
4580                     // In the case of protected methods, it is a requirement that the 'this'
4581                     // pointer be a subclass of the current context.  Perform this check.
4582                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4583                            "Accessing protected method through wrong type.");
4584                 }
4585                 goto DONE_ARGS;
4586             }
4587         }
4588         // fall thru to default checks
4589         default:
4590             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4591     }
4592     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4593                    "can only newobj a delegate constructor");
4594
4595     // check compatibility of the arguments
4596     unsigned int argCount;
4597     argCount = sig->numArgs;
4598     CORINFO_ARG_LIST_HANDLE args;
4599     args = sig->args;
4600     while (argCount--)
4601     {
4602         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4603
4604         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4605         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4606
4607         args = info.compCompHnd->getArgNext(args);
4608     }
4609
4610 DONE_ARGS:
4611
4612     // update popCount
4613     popCount += sig->numArgs;
4614
4615     // check for 'this' which are is non-static methods, not called via NEWOBJ
4616     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4617     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4618     {
4619         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4620         popCount++;
4621
4622         // If it is null, we assume we can access it (since it will AV shortly)
4623         // If it is anything but a reference class, there is no hierarchy, so
4624         // again, we don't need the precise instance class to compute 'protected' access
4625         if (tiThis.IsType(TI_REF))
4626         {
4627             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4628         }
4629
4630         // Check type compatibility of the this argument
4631         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4632         if (tiDeclaredThis.IsValueClass())
4633         {
4634             tiDeclaredThis.MakeByRef();
4635         }
4636
4637         // If this is a call to the base class .ctor, set thisPtr Init for
4638         // this block.
4639         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4640         {
4641             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4642                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4643             {
4644                 assert(verCurrentState.thisInitialized !=
4645                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4646                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4647                                "Call to base class constructor when 'this' is possibly initialized");
4648                 // Otherwise, 'this' is now initialized.
4649                 verCurrentState.thisInitialized = TIS_Init;
4650                 tiThis.SetInitialisedObjRef();
4651             }
4652             else
4653             {
4654                 // We allow direct calls to value type constructors
4655                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4656                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4657                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4658                                "Bad call to a constructor");
4659             }
4660         }
4661
4662         if (pConstrainedResolvedToken != nullptr)
4663         {
4664             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4665
4666             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4667
4668             // We just dereference this and test for equality
4669             tiThis.DereferenceByRef();
4670             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4671                            "this type mismatch with constrained type operand");
4672
4673             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4674             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4675         }
4676
4677         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4678         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4679         {
4680             tiDeclaredThis.SetIsReadonlyByRef();
4681         }
4682
4683         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4684
4685         if (tiThis.IsByRef())
4686         {
4687             // Find the actual type where the method exists (as opposed to what is declared
4688             // in the metadata). This is to prevent passing a byref as the "this" argument
4689             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4690
4691             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4692             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4693                            "Call to base type of valuetype (which is never a valuetype)");
4694         }
4695
4696         // Rules for non-virtual call to a non-final virtual method:
4697
4698         // Define:
4699         // The "this" pointer is considered to be "possibly written" if
4700         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4701         //   (or)
4702         //   2. It has been stored to (STARG.0) anywhere in the method.
4703
4704         // A non-virtual call to a non-final virtual method is only allowed if
4705         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4706         //   (or)
4707         //   2. The this pointer passed to the callee is the current method's this pointer.
4708         //      (and) The current method's this pointer is not "possibly written".
4709
4710         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4711         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4712         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4713         // hard and more error prone.
4714
4715         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4716 #ifdef DEBUG
4717             && StrictCheckForNonVirtualCallToVirtualMethod()
4718 #endif
4719                 )
4720         {
4721             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4722             {
4723                 VerifyOrReturn(
4724                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4725                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4726                     "a boxed value type.");
4727             }
4728         }
4729     }
4730
4731     // check any constraints on the callee's class and type parameters
4732     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4733                    "method has unsatisfied class constraints");
4734     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4735                    "method has unsatisfied method constraints");
4736
4737     if (mflags & CORINFO_FLG_PROTECTED)
4738     {
4739         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4740                        "Can't access protected method");
4741     }
4742
4743     // Get the exact view of the signature for an array method
4744     if (sig->retType != CORINFO_TYPE_VOID)
4745     {
4746         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4747     }
4748
4749     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4750     // The methods supported by array types are under the control of the EE
4751     // so we can trust that only the Address operation returns a byref.
4752     if (readonlyCall)
4753     {
4754         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4755         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4756                        "unexpected use of readonly prefix");
4757     }
4758
4759     // Verify the tailcall
4760     if (tailCall)
4761     {
4762         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4763     }
4764 }
4765
4766 /*****************************************************************************
4767  *  Checks that a delegate creation is done using the following pattern:
4768  *     dup
4769  *     ldvirtftn targetMemberRef
4770  *  OR
4771  *     ldftn targetMemberRef
4772  *
4773  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4774  *  not in this basic block)
4775  *
4776  *  targetMemberRef is read from the code sequence.
4777  *  targetMemberRef is validated iff verificationNeeded.
4778  */
4779
4780 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4781                                         const BYTE*  codeAddr,
4782                                         mdMemberRef& targetMemberRef)
4783 {
4784     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4785     {
4786         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4787         return TRUE;
4788     }
4789     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4790     {
4791         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4792         return TRUE;
4793     }
4794
4795     return FALSE;
4796 }
4797
4798 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4799 {
4800     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4801     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4802     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4803     if (!tiCompatibleWith(value, normPtrVal, true))
4804     {
4805         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4806         compUnsafeCastUsed = true;
4807     }
4808     return ptrVal;
4809 }
4810
4811 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4812 {
4813     assert(!instrType.IsStruct());
4814
4815     typeInfo ptrVal;
4816     if (ptr.IsByRef())
4817     {
4818         ptrVal = DereferenceByRef(ptr);
4819         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4820         {
4821             Verify(false, "bad pointer");
4822             compUnsafeCastUsed = true;
4823         }
4824         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4825         {
4826             Verify(false, "pointer not consistent with instr");
4827             compUnsafeCastUsed = true;
4828         }
4829     }
4830     else
4831     {
4832         Verify(false, "pointer not byref");
4833         compUnsafeCastUsed = true;
4834     }
4835
4836     return ptrVal;
4837 }
4838
4839 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4840 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4841 // ld*flda or a st*fld.
4842 // 'enclosingClass' is given if we are accessing a field in some specific type.
4843
4844 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4845                               const CORINFO_FIELD_INFO& fieldInfo,
4846                               const typeInfo*           tiThis,
4847                               BOOL                      mutator,
4848                               BOOL                      allowPlainStructAsThis)
4849 {
4850     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4851     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4852     CORINFO_CLASS_HANDLE instanceClass =
4853         info.compClassHnd; // for statics, we imagine the instance is the current class.
4854
4855     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4856     if (mutator)
4857     {
4858         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4859         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4860         {
4861             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4862                        info.compIsStatic == isStaticField,
4863                    "bad use of initonly field (set or address taken)");
4864         }
4865     }
4866
4867     if (tiThis == nullptr)
4868     {
4869         Verify(isStaticField, "used static opcode with non-static field");
4870     }
4871     else
4872     {
4873         typeInfo tThis = *tiThis;
4874
4875         if (allowPlainStructAsThis && tThis.IsValueClass())
4876         {
4877             tThis.MakeByRef();
4878         }
4879
4880         // If it is null, we assume we can access it (since it will AV shortly)
4881         // If it is anything but a refernce class, there is no hierarchy, so
4882         // again, we don't need the precise instance class to compute 'protected' access
4883         if (tiThis->IsType(TI_REF))
4884         {
4885             instanceClass = tiThis->GetClassHandleForObjRef();
4886         }
4887
4888         // Note that even if the field is static, we require that the this pointer
4889         // satisfy the same constraints as a non-static field  This happens to
4890         // be simpler and seems reasonable
4891         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4892         if (tiDeclaredThis.IsValueClass())
4893         {
4894             tiDeclaredThis.MakeByRef();
4895
4896             // we allow read-only tThis, on any field access (even stores!), because if the
4897             // class implementor wants to prohibit stores he should make the field private.
4898             // we do this by setting the read-only bit on the type we compare tThis to.
4899             tiDeclaredThis.SetIsReadonlyByRef();
4900         }
4901         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4902         {
4903             // Any field access is legal on "uninitialized" this pointers.
4904             // The easiest way to implement this is to simply set the
4905             // initialized bit for the duration of the type check on the
4906             // field access only.  It does not change the state of the "this"
4907             // for the function as a whole. Note that the "tThis" is a copy
4908             // of the original "this" type (*tiThis) passed in.
4909             tThis.SetInitialisedObjRef();
4910         }
4911
4912         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4913     }
4914
4915     // Presently the JIT does not check that we don't store or take the address of init-only fields
4916     // since we cannot guarantee their immutability and it is not a security issue.
4917
4918     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4919     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4920                    "field has unsatisfied class constraints");
4921     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4922     {
4923         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4924                "Accessing protected method through wrong type.");
4925     }
4926 }
4927
4928 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4929 {
4930     if (tiOp1.IsNumberType())
4931     {
4932 #ifdef _TARGET_64BIT_
4933         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4934 #else  // _TARGET_64BIT
4935         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4936         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4937         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4938         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4939 #endif // !_TARGET_64BIT_
4940     }
4941     else if (tiOp1.IsObjRef())
4942     {
4943         switch (opcode)
4944         {
4945             case CEE_BEQ_S:
4946             case CEE_BEQ:
4947             case CEE_BNE_UN_S:
4948             case CEE_BNE_UN:
4949             case CEE_CEQ:
4950             case CEE_CGT_UN:
4951                 break;
4952             default:
4953                 Verify(FALSE, "Cond not allowed on object types");
4954         }
4955         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4956     }
4957     else if (tiOp1.IsByRef())
4958     {
4959         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4960     }
4961     else
4962     {
4963         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4964     }
4965 }
4966
4967 void Compiler::verVerifyThisPtrInitialised()
4968 {
4969     if (verTrackObjCtorInitState)
4970     {
4971         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4972     }
4973 }
4974
4975 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4976 {
4977     // Either target == context, in this case calling an alternate .ctor
4978     // Or target is the immediate parent of context
4979
4980     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4981 }
4982
4983 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4984                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4985                                         CORINFO_CALL_INFO*      pCallInfo)
4986 {
4987     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4988     {
4989         NO_WAY("Virtual call to a function added via EnC is not supported");
4990     }
4991
4992 #ifdef FEATURE_READYTORUN_COMPILER
4993     if (opts.IsReadyToRun())
4994     {
4995         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4996         {
4997             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4998                                                     gtNewArgList(thisPtr));
4999
5000             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5001
5002             return call;
5003         }
5004
5005         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5006         if (IsTargetAbi(CORINFO_CORERT_ABI))
5007         {
5008             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5009
5010             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5011                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5012         }
5013     }
5014 #endif
5015
5016     // Get the exact descriptor for the static callsite
5017     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5018     if (exactTypeDesc == nullptr)
5019     { // compDonotInline()
5020         return nullptr;
5021     }
5022
5023     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5024     if (exactMethodDesc == nullptr)
5025     { // compDonotInline()
5026         return nullptr;
5027     }
5028
5029     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5030
5031     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5032
5033     helpArgs = gtNewListNode(thisPtr, helpArgs);
5034
5035     // Call helper function.  This gets the target address of the final destination callsite.
5036
5037     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5038 }
5039
5040 /*****************************************************************************
5041  *
5042  *  Build and import a box node
5043  */
5044
5045 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5046 {
5047     // Get the tree for the type handle for the boxed object.  In the case
5048     // of shared generic code or ngen'd code this might be an embedded
5049     // computation.
5050     // Note we can only box do it if the class construtor has been called
5051     // We can always do it on primitive types
5052
5053     GenTreePtr op1 = nullptr;
5054     GenTreePtr op2 = nullptr;
5055     var_types  lclTyp;
5056
5057     impSpillSpecialSideEff();
5058
5059     // Now get the expression to box from the stack.
5060     CORINFO_CLASS_HANDLE operCls;
5061     GenTreePtr           exprToBox = impPopStack(operCls).val;
5062
5063     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5064     if (boxHelper == CORINFO_HELP_BOX)
5065     {
5066         // we are doing 'normal' boxing.  This means that we can inline the box operation
5067         // Box(expr) gets morphed into
5068         // temp = new(clsHnd)
5069         // cpobj(temp+4, expr, clsHnd)
5070         // push temp
5071         // The code paths differ slightly below for structs and primitives because
5072         // "cpobj" differs in these cases.  In one case you get
5073         //    impAssignStructPtr(temp+4, expr, clsHnd)
5074         // and the other you get
5075         //    *(temp+4) = expr
5076
5077         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5078         {
5079             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5080         }
5081
5082         // needs to stay in use until this box expression is appended
5083         // some other node.  We approximate this by keeping it alive until
5084         // the opcode stack becomes empty
5085         impBoxTempInUse = true;
5086
5087 #ifdef FEATURE_READYTORUN_COMPILER
5088         bool usingReadyToRunHelper = false;
5089
5090         if (opts.IsReadyToRun())
5091         {
5092             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5093             usingReadyToRunHelper = (op1 != nullptr);
5094         }
5095
5096         if (!usingReadyToRunHelper)
5097 #endif
5098         {
5099             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5100             // and the newfast call with a single call to a dynamic R2R cell that will:
5101             //      1) Load the context
5102             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5103             //      3) Allocate and return the new object for boxing
5104             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5105
5106             // Ensure that the value class is restored
5107             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5108             if (op2 == nullptr)
5109             { // compDonotInline()
5110                 return;
5111             }
5112
5113             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5114                                       gtNewArgList(op2));
5115         }
5116
5117         /* Remember that this basic block contains 'new' of an array */
5118         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5119
5120         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5121
5122         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5123
5124         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5125         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5126         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5127
5128         if (varTypeIsStruct(exprToBox))
5129         {
5130             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5131             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5132         }
5133         else
5134         {
5135             lclTyp = exprToBox->TypeGet();
5136             if (lclTyp == TYP_BYREF)
5137             {
5138                 lclTyp = TYP_I_IMPL;
5139             }
5140             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5141             if (impIsPrimitive(jitType))
5142             {
5143                 lclTyp = JITtype2varType(jitType);
5144             }
5145             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5146                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5147             var_types srcTyp = exprToBox->TypeGet();
5148             var_types dstTyp = lclTyp;
5149
5150             if (srcTyp != dstTyp)
5151             {
5152                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5153                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5154                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5155             }
5156             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5157         }
5158
5159         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5160         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5161
5162         // Record that this is a "box" node.
5163         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5164
5165         // If it is a value class, mark the "box" node.  We can use this information
5166         // to optimise several cases:
5167         //    "box(x) == null" --> false
5168         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5169         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5170
5171         op1->gtFlags |= GTF_BOX_VALUE;
5172         assert(op1->IsBoxedValue());
5173         assert(asg->gtOper == GT_ASG);
5174     }
5175     else
5176     {
5177         // Don't optimize, just call the helper and be done with it
5178
5179         // Ensure that the value class is restored
5180         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5181         if (op2 == nullptr)
5182         { // compDonotInline()
5183             return;
5184         }
5185
5186         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5187         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5188     }
5189
5190     /* Push the result back on the stack, */
5191     /* even if clsHnd is a value class we want the TI_REF */
5192     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5193     impPushOnStack(op1, tiRetVal);
5194 }
5195
5196 //------------------------------------------------------------------------
5197 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5198 //
5199 // Arguments:
5200 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5201 //                     by a call to CEEInfo::resolveToken().
5202 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5203 //                by a call to CEEInfo::getCallInfo().
5204 //
5205 // Assumptions:
5206 //    The multi-dimensional array constructor arguments (array dimensions) are
5207 //    pushed on the IL stack on entry to this method.
5208 //
5209 // Notes:
5210 //    Multi-dimensional array constructors are imported as calls to a JIT
5211 //    helper, not as regular calls.
5212
5213 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5214 {
5215     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5216     if (classHandle == nullptr)
5217     { // compDonotInline()
5218         return;
5219     }
5220
5221     assert(pCallInfo->sig.numArgs);
5222
5223     GenTreePtr      node;
5224     GenTreeArgList* args;
5225
5226     //
5227     // There are two different JIT helpers that can be used to allocate
5228     // multi-dimensional arrays:
5229     //
5230     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5231     //      This variant is deprecated. It should be eventually removed.
5232     //
5233     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5234     //      pointer to block of int32s. This variant is more portable.
5235     //
5236     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5237     // unconditionally would require ReadyToRun version bump.
5238     //
5239     CLANG_FORMAT_COMMENT_ANCHOR;
5240
5241 #if COR_JIT_EE_VERSION > 460
5242     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5243     {
5244         LclVarDsc* newObjArrayArgsVar;
5245
5246         // Reuse the temp used to pass the array dimensions to avoid bloating
5247         // the stack frame in case there are multiple calls to multi-dim array
5248         // constructors within a single method.
5249         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5250         {
5251             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5252             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5253             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5254         }
5255
5256         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5257         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5258         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5259             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5260
5261         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5262         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5263         // to one allocation at a time.
5264         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5265
5266         //
5267         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5268         //  - Array class handle
5269         //  - Number of dimension arguments
5270         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5271         //
5272
5273         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5274         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5275
5276         // Pop dimension arguments from the stack one at a time and store it
5277         // into lvaNewObjArrayArgs temp.
5278         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5279         {
5280             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5281
5282             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5283             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5284             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5285                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5286             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5287
5288             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5289         }
5290
5291         args = gtNewArgList(node);
5292
5293         // pass number of arguments to the helper
5294         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5295
5296         args = gtNewListNode(classHandle, args);
5297
5298         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5299     }
5300     else
5301 #endif
5302     {
5303         //
5304         // The varargs helper needs the type and method handles as last
5305         // and  last-1 param (this is a cdecl call, so args will be
5306         // pushed in reverse order on the CPU stack)
5307         //
5308
5309         args = gtNewArgList(classHandle);
5310
5311         // pass number of arguments to the helper
5312         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5313
5314         unsigned argFlags = 0;
5315         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5316
5317         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5318
5319         // varargs, so we pop the arguments
5320         node->gtFlags |= GTF_CALL_POP_ARGS;
5321
5322 #ifdef DEBUG
5323         // At the present time we don't track Caller pop arguments
5324         // that have GC references in them
5325         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5326         {
5327             assert(temp->Current()->gtType != TYP_REF);
5328         }
5329 #endif
5330     }
5331
5332     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5333     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5334
5335     // Remember that this basic block contains 'new' of a md array
5336     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5337
5338     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5339 }
5340
5341 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5342                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5343                                       CORINFO_THIS_TRANSFORM  transform)
5344 {
5345     switch (transform)
5346     {
5347         case CORINFO_DEREF_THIS:
5348         {
5349             GenTreePtr obj = thisPtr;
5350
5351             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5352             impBashVarAddrsToI(obj);
5353             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5354             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5355
5356             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5357             // ldind could point anywhere, example a boxed class static int
5358             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5359
5360             return obj;
5361         }
5362
5363         case CORINFO_BOX_THIS:
5364         {
5365             // Constraint calls where there might be no
5366             // unboxed entry point require us to implement the call via helper.
5367             // These only occur when a possible target of the call
5368             // may have inherited an implementation of an interface
5369             // method from System.Object or System.ValueType.  The EE does not provide us with
5370             // "unboxed" versions of these methods.
5371
5372             GenTreePtr obj = thisPtr;
5373
5374             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5375             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5376             obj->gtFlags |= GTF_EXCEPT;
5377
5378             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5379             var_types   objType = JITtype2varType(jitTyp);
5380             if (impIsPrimitive(jitTyp))
5381             {
5382                 if (obj->OperIsBlk())
5383                 {
5384                     obj->ChangeOperUnchecked(GT_IND);
5385
5386                     // Obj could point anywhere, example a boxed class static int
5387                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5388                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5389                 }
5390
5391                 obj->gtType = JITtype2varType(jitTyp);
5392                 assert(varTypeIsArithmetic(obj->gtType));
5393             }
5394
5395             // This pushes on the dereferenced byref
5396             // This is then used immediately to box.
5397             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5398
5399             // This pops off the byref-to-a-value-type remaining on the stack and
5400             // replaces it with a boxed object.
5401             // This is then used as the object to the virtual call immediately below.
5402             impImportAndPushBox(pConstrainedResolvedToken);
5403             if (compDonotInline())
5404             {
5405                 return nullptr;
5406             }
5407
5408             obj = impPopStack().val;
5409             return obj;
5410         }
5411         case CORINFO_NO_THIS_TRANSFORM:
5412         default:
5413             return thisPtr;
5414     }
5415 }
5416
5417 //------------------------------------------------------------------------
5418 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5419 //
5420 // Return Value:
5421 //    true if PInvoke inlining should be enabled in current method, false otherwise
5422 //
5423 // Notes:
5424 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5425
5426 bool Compiler::impCanPInvokeInline()
5427 {
5428     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5429            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5430         ;
5431 }
5432
5433 //------------------------------------------------------------------------
5434 // impCanPInvokeInlineCallSite: basic legality checks using information
5435 // from a call to see if the call qualifies as an inline pinvoke.
5436 //
5437 // Arguments:
5438 //    block      - block contaning the call, or for inlinees, block
5439 //                 containing the call being inlined
5440 //
5441 // Return Value:
5442 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5443 //
5444 // Notes:
5445 //    For runtimes that support exception handling interop there are
5446 //    restrictions on using inline pinvoke in handler regions.
5447 //
5448 //    * We have to disable pinvoke inlining inside of filters because
5449 //    in case the main execution (i.e. in the try block) is inside
5450 //    unmanaged code, we cannot reuse the inlined stub (we still need
5451 //    the original state until we are in the catch handler)
5452 //
5453 //    * We disable pinvoke inlining inside handlers since the GSCookie
5454 //    is in the inlined Frame (see
5455 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5456 //    this would not protect framelets/return-address of handlers.
5457 //
5458 //    These restrictions are currently also in place for CoreCLR but
5459 //    can be relaxed when coreclr/#8459 is addressed.
5460
5461 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5462 {
5463     if (block->hasHndIndex())
5464     {
5465         return false;
5466     }
5467
5468     // The remaining limitations do not apply to CoreRT
5469     if (IsTargetAbi(CORINFO_CORERT_ABI))
5470     {
5471         return true;
5472     }
5473
5474 #ifdef _TARGET_AMD64_
5475     // On x64, we disable pinvoke inlining inside of try regions.
5476     // Here is the comment from JIT64 explaining why:
5477     //
5478     //   [VSWhidbey: 611015] - because the jitted code links in the
5479     //   Frame (instead of the stub) we rely on the Frame not being
5480     //   'active' until inside the stub.  This normally happens by the
5481     //   stub setting the return address pointer in the Frame object
5482     //   inside the stub.  On a normal return, the return address
5483     //   pointer is zeroed out so the Frame can be safely re-used, but
5484     //   if an exception occurs, nobody zeros out the return address
5485     //   pointer.  Thus if we re-used the Frame object, it would go
5486     //   'active' as soon as we link it into the Frame chain.
5487     //
5488     //   Technically we only need to disable PInvoke inlining if we're
5489     //   in a handler or if we're in a try body with a catch or
5490     //   filter/except where other non-handler code in this method
5491     //   might run and try to re-use the dirty Frame object.
5492     //
5493     //   A desktop test case where this seems to matter is
5494     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5495     if (block->hasTryIndex())
5496     {
5497         return false;
5498     }
5499 #endif // _TARGET_AMD64_
5500
5501     return true;
5502 }
5503
5504 //------------------------------------------------------------------------
5505 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5506 // if it can be expressed as an inline pinvoke.
5507 //
5508 // Arguments:
5509 //    call       - tree for the call
5510 //    methHnd    - handle for the method being called (may be null)
5511 //    sig        - signature of the method being called
5512 //    mflags     - method flags for the method being called
5513 //    block      - block contaning the call, or for inlinees, block
5514 //                 containing the call being inlined
5515 //
5516 // Notes:
5517 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5518 //
5519 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5520 //   call passes a combination of legality and profitabilty checks.
5521 //
5522 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5523
5524 void Compiler::impCheckForPInvokeCall(
5525     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5526 {
5527     CorInfoUnmanagedCallConv unmanagedCallConv;
5528
5529     // If VM flagged it as Pinvoke, flag the call node accordingly
5530     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5531     {
5532         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5533     }
5534
5535     if (methHnd)
5536     {
5537         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5538         {
5539             return;
5540         }
5541
5542         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5543     }
5544     else
5545     {
5546         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5547         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5548         {
5549             // Used by the IL Stubs.
5550             callConv = CORINFO_CALLCONV_C;
5551         }
5552         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5553         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5554         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5555         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5556
5557         assert(!call->gtCall.gtCallCookie);
5558     }
5559
5560     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5561         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5562     {
5563         return;
5564     }
5565     optNativeCallCount++;
5566
5567     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5568     {
5569         // PInvoke CALLI in IL stubs must be inlined
5570     }
5571     else
5572     {
5573         // Check legality
5574         if (!impCanPInvokeInlineCallSite(block))
5575         {
5576             return;
5577         }
5578
5579         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5580         // profitability checks
5581         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5582         {
5583             if (!impCanPInvokeInline())
5584             {
5585                 return;
5586             }
5587
5588             // Size-speed tradeoff: don't use inline pinvoke at rarely
5589             // executed call sites.  The non-inline version is more
5590             // compact.
5591             if (block->isRunRarely())
5592             {
5593                 return;
5594             }
5595         }
5596
5597         // The expensive check should be last
5598         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5599         {
5600             return;
5601         }
5602     }
5603
5604     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5605
5606     call->gtFlags |= GTF_CALL_UNMANAGED;
5607     info.compCallUnmanaged++;
5608
5609     // AMD64 convention is same for native and managed
5610     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5611     {
5612         call->gtFlags |= GTF_CALL_POP_ARGS;
5613     }
5614
5615     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5616     {
5617         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5618     }
5619 }
5620
5621 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5622 {
5623     var_types callRetTyp = JITtype2varType(sig->retType);
5624
5625     /* The function pointer is on top of the stack - It may be a
5626      * complex expression. As it is evaluated after the args,
5627      * it may cause registered args to be spilled. Simply spill it.
5628      */
5629
5630     // Ignore this trivial case.
5631     if (impStackTop().val->gtOper != GT_LCL_VAR)
5632     {
5633         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5634                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5635     }
5636
5637     /* Get the function pointer */
5638
5639     GenTreePtr fptr = impPopStack().val;
5640     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5641
5642 #ifdef DEBUG
5643     // This temporary must never be converted to a double in stress mode,
5644     // because that can introduce a call to the cast helper after the
5645     // arguments have already been evaluated.
5646
5647     if (fptr->OperGet() == GT_LCL_VAR)
5648     {
5649         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5650     }
5651 #endif
5652
5653     /* Create the call node */
5654
5655     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5656
5657     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5658
5659     return call;
5660 }
5661
5662 /*****************************************************************************/
5663
5664 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5665 {
5666     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5667
5668     /* Since we push the arguments in reverse order (i.e. right -> left)
5669      * spill any side effects from the stack
5670      *
5671      * OBS: If there is only one side effect we do not need to spill it
5672      *      thus we have to spill all side-effects except last one
5673      */
5674
5675     unsigned lastLevelWithSideEffects = UINT_MAX;
5676
5677     unsigned argsToReverse = sig->numArgs;
5678
5679     // For "thiscall", the first argument goes in a register. Since its
5680     // order does not need to be changed, we do not need to spill it
5681
5682     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5683     {
5684         assert(argsToReverse);
5685         argsToReverse--;
5686     }
5687
5688 #ifndef _TARGET_X86_
5689     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5690     argsToReverse = 0;
5691 #endif
5692
5693     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5694     {
5695         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5696         {
5697             assert(lastLevelWithSideEffects == UINT_MAX);
5698
5699             impSpillStackEntry(level,
5700                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5701         }
5702         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5703         {
5704             if (lastLevelWithSideEffects != UINT_MAX)
5705             {
5706                 /* We had a previous side effect - must spill it */
5707                 impSpillStackEntry(lastLevelWithSideEffects,
5708                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5709
5710                 /* Record the level for the current side effect in case we will spill it */
5711                 lastLevelWithSideEffects = level;
5712             }
5713             else
5714             {
5715                 /* This is the first side effect encountered - record its level */
5716
5717                 lastLevelWithSideEffects = level;
5718             }
5719         }
5720     }
5721
5722     /* The argument list is now "clean" - no out-of-order side effects
5723      * Pop the argument list in reverse order */
5724
5725     unsigned   argFlags = 0;
5726     GenTreePtr args     = call->gtCall.gtCallArgs =
5727         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5728
5729     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5730     {
5731         GenTreePtr thisPtr = args->Current();
5732         impBashVarAddrsToI(thisPtr);
5733         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5734     }
5735
5736     if (args)
5737     {
5738         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5739     }
5740 }
5741
5742 //------------------------------------------------------------------------
5743 // impInitClass: Build a node to initialize the class before accessing the
5744 //               field if necessary
5745 //
5746 // Arguments:
5747 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5748 //                     by a call to CEEInfo::resolveToken().
5749 //
5750 // Return Value: If needed, a pointer to the node that will perform the class
5751 //               initializtion.  Otherwise, nullptr.
5752 //
5753
5754 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5755 {
5756     CorInfoInitClassResult initClassResult =
5757         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5758
5759     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5760     {
5761         return nullptr;
5762     }
5763     BOOL runtimeLookup;
5764
5765     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5766
5767     if (node == nullptr)
5768     {
5769         assert(compDonotInline());
5770         return nullptr;
5771     }
5772
5773     if (runtimeLookup)
5774     {
5775         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5776     }
5777     else
5778     {
5779         // Call the shared non gc static helper, as its the fastest
5780         node = fgGetSharedCCtor(pResolvedToken->hClass);
5781     }
5782
5783     return node;
5784 }
5785
5786 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5787 {
5788     GenTreePtr op1 = nullptr;
5789
5790     switch (lclTyp)
5791     {
5792         int     ival;
5793         __int64 lval;
5794         double  dval;
5795
5796         case TYP_BOOL:
5797             ival = *((bool*)fldAddr);
5798             goto IVAL_COMMON;
5799
5800         case TYP_BYTE:
5801             ival = *((signed char*)fldAddr);
5802             goto IVAL_COMMON;
5803
5804         case TYP_UBYTE:
5805             ival = *((unsigned char*)fldAddr);
5806             goto IVAL_COMMON;
5807
5808         case TYP_SHORT:
5809             ival = *((short*)fldAddr);
5810             goto IVAL_COMMON;
5811
5812         case TYP_CHAR:
5813         case TYP_USHORT:
5814             ival = *((unsigned short*)fldAddr);
5815             goto IVAL_COMMON;
5816
5817         case TYP_UINT:
5818         case TYP_INT:
5819             ival = *((int*)fldAddr);
5820         IVAL_COMMON:
5821             op1 = gtNewIconNode(ival);
5822             break;
5823
5824         case TYP_LONG:
5825         case TYP_ULONG:
5826             lval = *((__int64*)fldAddr);
5827             op1  = gtNewLconNode(lval);
5828             break;
5829
5830         case TYP_FLOAT:
5831             dval = *((float*)fldAddr);
5832             op1  = gtNewDconNode(dval);
5833 #if !FEATURE_X87_DOUBLES
5834             // X87 stack doesn't differentiate between float/double
5835             // so R4 is treated as R8, but everybody else does
5836             op1->gtType = TYP_FLOAT;
5837 #endif // FEATURE_X87_DOUBLES
5838             break;
5839
5840         case TYP_DOUBLE:
5841             dval = *((double*)fldAddr);
5842             op1  = gtNewDconNode(dval);
5843             break;
5844
5845         default:
5846             assert(!"Unexpected lclTyp");
5847             break;
5848     }
5849
5850     return op1;
5851 }
5852
5853 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5854                                                 CORINFO_ACCESS_FLAGS    access,
5855                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5856                                                 var_types               lclTyp)
5857 {
5858     GenTreePtr op1;
5859
5860     switch (pFieldInfo->fieldAccessor)
5861     {
5862         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5863         {
5864             assert(!compIsForInlining());
5865
5866             // We first call a special helper to get the statics base pointer
5867             op1 = impParentClassTokenToHandle(pResolvedToken);
5868
5869             // compIsForInlining() is false so we should not neve get NULL here
5870             assert(op1 != nullptr);
5871
5872             var_types type = TYP_BYREF;
5873
5874             switch (pFieldInfo->helper)
5875             {
5876                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5877                     type = TYP_I_IMPL;
5878                     break;
5879                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5880                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5881                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5882                     break;
5883                 default:
5884                     assert(!"unknown generic statics helper");
5885                     break;
5886             }
5887
5888             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5889
5890             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5891             op1              = gtNewOperNode(GT_ADD, type, op1,
5892                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5893         }
5894         break;
5895
5896         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5897         {
5898 #ifdef FEATURE_READYTORUN_COMPILER
5899             if (opts.IsReadyToRun())
5900             {
5901                 unsigned callFlags = 0;
5902
5903                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5904                 {
5905                     callFlags |= GTF_CALL_HOISTABLE;
5906                 }
5907
5908                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5909
5910                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5911             }
5912             else
5913 #endif
5914             {
5915                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5916             }
5917
5918             {
5919                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5920                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5921                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5922             }
5923             break;
5924         }
5925 #if COR_JIT_EE_VERSION > 460
5926         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5927         {
5928 #ifdef FEATURE_READYTORUN_COMPILER
5929             noway_assert(opts.IsReadyToRun());
5930             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5931             assert(kind.needsRuntimeLookup);
5932
5933             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5934             GenTreeArgList* args    = gtNewArgList(ctxTree);
5935
5936             unsigned callFlags = 0;
5937
5938             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5939             {
5940                 callFlags |= GTF_CALL_HOISTABLE;
5941             }
5942             var_types type = TYP_BYREF;
5943             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5944
5945             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5946             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5947             op1              = gtNewOperNode(GT_ADD, type, op1,
5948                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5949 #else
5950             unreached();
5951 #endif // FEATURE_READYTORUN_COMPILER
5952         }
5953         break;
5954 #endif // COR_JIT_EE_VERSION > 460
5955         default:
5956         {
5957             if (!(access & CORINFO_ACCESS_ADDRESS))
5958             {
5959                 // In future, it may be better to just create the right tree here instead of folding it later.
5960                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5961
5962                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5963                 {
5964                     op1->gtType = TYP_REF; // points at boxed object
5965                     FieldSeqNode* firstElemFldSeq =
5966                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5967                     op1 =
5968                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5969                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5970
5971                     if (varTypeIsStruct(lclTyp))
5972                     {
5973                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5974                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5975                     }
5976                     else
5977                     {
5978                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5979                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5980                     }
5981                 }
5982
5983                 return op1;
5984             }
5985             else
5986             {
5987                 void** pFldAddr = nullptr;
5988                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5989
5990                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5991
5992                 /* Create the data member node */
5993                 if (pFldAddr == nullptr)
5994                 {
5995                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5996                 }
5997                 else
5998                 {
5999                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6000
6001                     // There are two cases here, either the static is RVA based,
6002                     // in which case the type of the FIELD node is not a GC type
6003                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6004                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6005                     // because handles to statics now go into the large object heap
6006
6007                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6008                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6009                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6010                 }
6011             }
6012             break;
6013         }
6014     }
6015
6016     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6017     {
6018         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6019
6020         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6021
6022         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6023                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6024     }
6025
6026     if (!(access & CORINFO_ACCESS_ADDRESS))
6027     {
6028         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6029         op1->gtFlags |= GTF_GLOB_REF;
6030     }
6031
6032     return op1;
6033 }
6034
6035 // In general try to call this before most of the verification work.  Most people expect the access
6036 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6037 // out if you can't access something we also think that you're unverifiable for other reasons.
6038 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6039 {
6040     if (result != CORINFO_ACCESS_ALLOWED)
6041     {
6042         impHandleAccessAllowedInternal(result, helperCall);
6043     }
6044 }
6045
6046 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6047 {
6048     switch (result)
6049     {
6050         case CORINFO_ACCESS_ALLOWED:
6051             break;
6052         case CORINFO_ACCESS_ILLEGAL:
6053             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6054             // method is verifiable.  Otherwise, delay the exception to runtime.
6055             if (compIsForImportOnly())
6056             {
6057                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6058             }
6059             else
6060             {
6061                 impInsertHelperCall(helperCall);
6062             }
6063             break;
6064         case CORINFO_ACCESS_RUNTIME_CHECK:
6065             impInsertHelperCall(helperCall);
6066             break;
6067     }
6068 }
6069
6070 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6071 {
6072     // Construct the argument list
6073     GenTreeArgList* args = nullptr;
6074     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6075     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6076     {
6077         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6078         GenTreePtr                currentArg = nullptr;
6079         switch (helperArg.argType)
6080         {
6081             case CORINFO_HELPER_ARG_TYPE_Field:
6082                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6083                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6084                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6085                 break;
6086             case CORINFO_HELPER_ARG_TYPE_Method:
6087                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6088                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6089                 break;
6090             case CORINFO_HELPER_ARG_TYPE_Class:
6091                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6092                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6093                 break;
6094             case CORINFO_HELPER_ARG_TYPE_Module:
6095                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6096                 break;
6097             case CORINFO_HELPER_ARG_TYPE_Const:
6098                 currentArg = gtNewIconNode(helperArg.constant);
6099                 break;
6100             default:
6101                 NO_WAY("Illegal helper arg type");
6102         }
6103         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6104     }
6105
6106     /* TODO-Review:
6107      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6108      * Also, consider sticking this in the first basic block.
6109      */
6110     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6111     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6112 }
6113
6114 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6115                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6116                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6117 {
6118 #ifdef FEATURE_CORECLR
6119     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6120     {
6121         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6122         // This helper throws an exception if the CLR host disallows the call.
6123
6124         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6125                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6126                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6127         // Append the callout statement
6128         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6129     }
6130 #endif // FEATURE_CORECLR
6131 }
6132
6133 // Checks whether the return types of caller and callee are compatible
6134 // so that callee can be tail called. Note that here we don't check
6135 // compatibility in IL Verifier sense, but on the lines of return type
6136 // sizes are equal and get returned in the same return register.
6137 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6138                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6139                                             var_types            calleeRetType,
6140                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6141 {
6142     // Note that we can not relax this condition with genActualType() as the
6143     // calling convention dictates that the caller of a function with a small
6144     // typed return value is responsible for normalizing the return val.
6145     if (callerRetType == calleeRetType)
6146     {
6147         return true;
6148     }
6149
6150 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6151     // Jit64 compat:
6152     if (callerRetType == TYP_VOID)
6153     {
6154         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6155         //     tail.call
6156         //     pop
6157         //     ret
6158         //
6159         // Note that the above IL pattern is not valid as per IL verification rules.
6160         // Therefore, only full trust code can take advantage of this pattern.
6161         return true;
6162     }
6163
6164     // These checks return true if the return value type sizes are the same and
6165     // get returned in the same return register i.e. caller doesn't need to normalize
6166     // return value. Some of the tail calls permitted by below checks would have
6167     // been rejected by IL Verifier before we reached here.  Therefore, only full
6168     // trust code can make those tail calls.
6169     unsigned callerRetTypeSize = 0;
6170     unsigned calleeRetTypeSize = 0;
6171     bool     isCallerRetTypMBEnreg =
6172         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6173     bool isCalleeRetTypMBEnreg =
6174         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6175
6176     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6177     {
6178         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6179     }
6180 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6181
6182     return false;
6183 }
6184
6185 // For prefixFlags
6186 enum
6187 {
6188     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6189     PREFIX_TAILCALL_IMPLICIT =
6190         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6191     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6192     PREFIX_VOLATILE    = 0x00000100,
6193     PREFIX_UNALIGNED   = 0x00001000,
6194     PREFIX_CONSTRAINED = 0x00010000,
6195     PREFIX_READONLY    = 0x00100000
6196 };
6197
6198 /********************************************************************************
6199  *
6200  * Returns true if the current opcode and and the opcodes following it correspond
6201  * to a supported tail call IL pattern.
6202  *
6203  */
6204 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6205                                       OPCODE      curOpcode,
6206                                       const BYTE* codeAddrOfNextOpcode,
6207                                       const BYTE* codeEnd,
6208                                       bool        isRecursive,
6209                                       bool*       isCallPopAndRet /* = nullptr */)
6210 {
6211     // Bail out if the current opcode is not a call.
6212     if (!impOpcodeIsCallOpcode(curOpcode))
6213     {
6214         return false;
6215     }
6216
6217 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6218     // If shared ret tail opt is not enabled, we will enable
6219     // it for recursive methods.
6220     if (isRecursive)
6221 #endif
6222     {
6223         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6224         // sequence. Make sure we don't go past the end of the IL however.
6225         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6226     }
6227
6228     // Bail out if there is no next opcode after call
6229     if (codeAddrOfNextOpcode >= codeEnd)
6230     {
6231         return false;
6232     }
6233
6234     // Scan the opcodes to look for the following IL patterns if either
6235     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6236     //  ii) if tail prefixed, IL verification is not needed for the method.
6237     //
6238     // Only in the above two cases we can allow the below tail call patterns
6239     // violating ECMA spec.
6240     //
6241     // Pattern1:
6242     //       call
6243     //       nop*
6244     //       ret
6245     //
6246     // Pattern2:
6247     //       call
6248     //       nop*
6249     //       pop
6250     //       nop*
6251     //       ret
6252     int    cntPop = 0;
6253     OPCODE nextOpcode;
6254
6255 #ifdef _TARGET_AMD64_
6256     do
6257     {
6258         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6259         codeAddrOfNextOpcode += sizeof(__int8);
6260     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6261              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6262              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6263                                                                                          // one pop seen so far.
6264 #else
6265     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6266 #endif
6267
6268     if (isCallPopAndRet)
6269     {
6270         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6271         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6272     }
6273
6274 #ifdef _TARGET_AMD64_
6275     // Jit64 Compat:
6276     // Tail call IL pattern could be either of the following
6277     // 1) call/callvirt/calli + ret
6278     // 2) call/callvirt/calli + pop + ret in a method returning void.
6279     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6280 #else //!_TARGET_AMD64_
6281     return (nextOpcode == CEE_RET) && (cntPop == 0);
6282 #endif
6283 }
6284
6285 /*****************************************************************************
6286  *
6287  * Determine whether the call could be converted to an implicit tail call
6288  *
6289  */
6290 bool Compiler::impIsImplicitTailCallCandidate(
6291     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6292 {
6293
6294 #if FEATURE_TAILCALL_OPT
6295     if (!opts.compTailCallOpt)
6296     {
6297         return false;
6298     }
6299
6300     if (opts.compDbgCode || opts.MinOpts())
6301     {
6302         return false;
6303     }
6304
6305     // must not be tail prefixed
6306     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6307     {
6308         return false;
6309     }
6310
6311 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6312     // the block containing call is marked as BBJ_RETURN
6313     // We allow shared ret tail call optimization on recursive calls even under
6314     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6315     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6316         return false;
6317 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6318
6319     // must be call+ret or call+pop+ret
6320     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6321     {
6322         return false;
6323     }
6324
6325     return true;
6326 #else
6327     return false;
6328 #endif // FEATURE_TAILCALL_OPT
6329 }
6330
6331 //------------------------------------------------------------------------
6332 // impImportCall: import a call-inspiring opcode
6333 //
6334 // Arguments:
6335 //    opcode                    - opcode that inspires the call
6336 //    pResolvedToken            - resolved token for the call target
6337 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6338 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6339 //    prefixFlags               - IL prefix flags for the call
6340 //    callInfo                  - EE supplied info for the call
6341 //    rawILOffset               - IL offset of the opcode
6342 //
6343 // Returns:
6344 //    Type of the call's return value.
6345 //
6346 // Notes:
6347 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6348 //
6349 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6350 //    uninitalized object.
6351
6352 #ifdef _PREFAST_
6353 #pragma warning(push)
6354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6355 #endif
6356
6357 var_types Compiler::impImportCall(OPCODE                  opcode,
6358                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6359                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6360                                   GenTreePtr              newobjThis,
6361                                   int                     prefixFlags,
6362                                   CORINFO_CALL_INFO*      callInfo,
6363                                   IL_OFFSET               rawILOffset)
6364 {
6365     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6366
6367     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6368     var_types              callRetTyp                     = TYP_COUNT;
6369     CORINFO_SIG_INFO*      sig                            = nullptr;
6370     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6371     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6372     unsigned               clsFlags                       = 0;
6373     unsigned               mflags                         = 0;
6374     unsigned               argFlags                       = 0;
6375     GenTreePtr             call                           = nullptr;
6376     GenTreeArgList*        args                           = nullptr;
6377     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6378     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6379     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6380     bool                   canTailCall                    = true;
6381     const char*            szCanTailCallFailReason        = nullptr;
6382     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6383     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6384
6385     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6386     // do that before tailcalls, but that is probably not the intended
6387     // semantic. So just disallow tailcalls from synchronized methods.
6388     // Also, popping arguments in a varargs function is more work and NYI
6389     // If we have a security object, we have to keep our frame around for callers
6390     // to see any imperative security.
6391     if (info.compFlags & CORINFO_FLG_SYNCH)
6392     {
6393         canTailCall             = false;
6394         szCanTailCallFailReason = "Caller is synchronized";
6395     }
6396 #if !FEATURE_FIXED_OUT_ARGS
6397     else if (info.compIsVarArgs)
6398     {
6399         canTailCall             = false;
6400         szCanTailCallFailReason = "Caller is varargs";
6401     }
6402 #endif // FEATURE_FIXED_OUT_ARGS
6403     else if (opts.compNeedSecurityCheck)
6404     {
6405         canTailCall             = false;
6406         szCanTailCallFailReason = "Caller requires a security check.";
6407     }
6408
6409     // We only need to cast the return value of pinvoke inlined calls that return small types
6410
6411     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6412     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6413     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6414     // the time being that the callee might be compiled by the other JIT and thus the return
6415     // value will need to be widened by us (or not widened at all...)
6416
6417     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6418
6419     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6420     bool bIntrinsicImported = false;
6421
6422     CORINFO_SIG_INFO calliSig;
6423     GenTreeArgList*  extraArg = nullptr;
6424
6425     /*-------------------------------------------------------------------------
6426      * First create the call node
6427      */
6428
6429     if (opcode == CEE_CALLI)
6430     {
6431         /* Get the call site sig */
6432         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6433
6434         callRetTyp = JITtype2varType(calliSig.retType);
6435         clsHnd     = calliSig.retTypeClass;
6436
6437         call = impImportIndirectCall(&calliSig, ilOffset);
6438
6439         // We don't know the target method, so we have to infer the flags, or
6440         // assume the worst-case.
6441         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6442
6443 #ifdef DEBUG
6444         if (verbose)
6445         {
6446             unsigned structSize =
6447                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6448             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6449                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6450         }
6451 #endif
6452         // This should be checked in impImportBlockCode.
6453         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6454
6455         sig = &calliSig;
6456
6457 #ifdef DEBUG
6458         // We cannot lazily obtain the signature of a CALLI call because it has no method
6459         // handle that we can use, so we need to save its full call signature here.
6460         assert(call->gtCall.callSig == nullptr);
6461         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6462         *call->gtCall.callSig = calliSig;
6463 #endif // DEBUG
6464
6465         if (IsTargetAbi(CORINFO_CORERT_ABI))
6466         {
6467             bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
6468             if (managedCall)
6469             {
6470                 addFatPointerCandidate(call->AsCall());
6471             }
6472         }
6473     }
6474     else // (opcode != CEE_CALLI)
6475     {
6476         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6477
6478         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6479         // supply the instantiation parameters necessary to make direct calls to underlying
6480         // shared generic code, rather than calling through instantiating stubs.  If the
6481         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6482         // must indeed pass an instantiation parameter.
6483
6484         methHnd = callInfo->hMethod;
6485
6486         sig        = &(callInfo->sig);
6487         callRetTyp = JITtype2varType(sig->retType);
6488
6489         mflags = callInfo->methodFlags;
6490
6491 #ifdef DEBUG
6492         if (verbose)
6493         {
6494             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6495             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6496                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6497         }
6498 #endif
6499         if (compIsForInlining())
6500         {
6501             /* Does this call site have security boundary restrictions? */
6502
6503             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6504             {
6505                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6506                 return callRetTyp;
6507             }
6508
6509             /* Does the inlinee need a security check token on the frame */
6510
6511             if (mflags & CORINFO_FLG_SECURITYCHECK)
6512             {
6513                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6514                 return callRetTyp;
6515             }
6516
6517             /* Does the inlinee use StackCrawlMark */
6518
6519             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6520             {
6521                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6522                 return callRetTyp;
6523             }
6524
6525             /* For now ignore delegate invoke */
6526
6527             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6528             {
6529                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6530                 return callRetTyp;
6531             }
6532
6533             /* For now ignore varargs */
6534             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6535             {
6536                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6537                 return callRetTyp;
6538             }
6539
6540             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6541             {
6542                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6543                 return callRetTyp;
6544             }
6545
6546             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6547             {
6548                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6549                 return callRetTyp;
6550             }
6551         }
6552
6553         clsHnd = pResolvedToken->hClass;
6554
6555         clsFlags = callInfo->classFlags;
6556
6557 #ifdef DEBUG
6558         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6559
6560         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6561         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6562         const char* modName;
6563         const char* className;
6564         const char* methodName;
6565         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6566             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6567             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6568         {
6569             return impImportJitTestLabelMark(sig->numArgs);
6570         }
6571 #endif // DEBUG
6572
6573         // <NICE> Factor this into getCallInfo </NICE>
6574         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6575         {
6576             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6577                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6578
6579             if (call != nullptr)
6580             {
6581                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6582                        (clsFlags & CORINFO_FLG_FINAL));
6583
6584 #ifdef FEATURE_READYTORUN_COMPILER
6585                 if (call->OperGet() == GT_INTRINSIC)
6586                 {
6587                     if (opts.IsReadyToRun())
6588                     {
6589                         noway_assert(callInfo->kind == CORINFO_CALL);
6590                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6591                     }
6592                     else
6593                     {
6594                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6595                     }
6596                 }
6597 #endif
6598
6599                 bIntrinsicImported = true;
6600                 goto DONE_CALL;
6601             }
6602         }
6603
6604 #ifdef FEATURE_SIMD
6605         if (featureSIMD)
6606         {
6607             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6608             if (call != nullptr)
6609             {
6610                 bIntrinsicImported = true;
6611                 goto DONE_CALL;
6612             }
6613         }
6614 #endif // FEATURE_SIMD
6615
6616         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6617         {
6618             NO_WAY("Virtual call to a function added via EnC is not supported");
6619         }
6620
6621         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6622             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6623             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6624         {
6625             BADCODE("Bad calling convention");
6626         }
6627
6628         //-------------------------------------------------------------------------
6629         //  Construct the call node
6630         //
6631         // Work out what sort of call we're making.
6632         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6633
6634         constraintCallThisTransform = callInfo->thisTransform;
6635
6636         exactContextHnd                = callInfo->contextHandle;
6637         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6638
6639         // Recursive call is treaded as a loop to the begining of the method.
6640         if (methHnd == info.compMethodHnd)
6641         {
6642 #ifdef DEBUG
6643             if (verbose)
6644             {
6645                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6646                         fgFirstBB->bbNum, compCurBB->bbNum);
6647             }
6648 #endif
6649             fgMarkBackwardJump(fgFirstBB, compCurBB);
6650         }
6651
6652         switch (callInfo->kind)
6653         {
6654
6655             case CORINFO_VIRTUALCALL_STUB:
6656             {
6657                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6658                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6659                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6660                 {
6661
6662                     if (compIsForInlining())
6663                     {
6664                         // Don't import runtime lookups when inlining
6665                         // Inlining has to be aborted in such a case
6666                         /* XXX Fri 3/20/2009
6667                          * By the way, this would never succeed.  If the handle lookup is into the generic
6668                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6669                          * inlined code will crash.
6670                          *
6671                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6672                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6673                          * failing here.
6674                          */
6675                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6676                         return callRetTyp;
6677                     }
6678
6679                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6680                     assert(!compDonotInline());
6681
6682                     // This is the rough code to set up an indirect stub call
6683                     assert(stubAddr != nullptr);
6684
6685                     // The stubAddr may be a
6686                     // complex expression. As it is evaluated after the args,
6687                     // it may cause registered args to be spilled. Simply spill it.
6688
6689                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6690                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6691                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6692
6693                     // Create the actual call node
6694
6695                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6696                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6697
6698                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6699
6700                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6701                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6702
6703 #ifdef _TARGET_X86_
6704                     // No tailcalls allowed for these yet...
6705                     canTailCall             = false;
6706                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6707 #endif
6708                 }
6709                 else
6710                 {
6711                     // ok, the stub is available at compile type.
6712
6713                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6714                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6715                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6716                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6717                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6718                     {
6719                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6720                     }
6721                 }
6722
6723 #ifdef FEATURE_READYTORUN_COMPILER
6724                 if (opts.IsReadyToRun())
6725                 {
6726                     // Null check is sometimes needed for ready to run to handle
6727                     // non-virtual <-> virtual changes between versions
6728                     if (callInfo->nullInstanceCheck)
6729                     {
6730                         call->gtFlags |= GTF_CALL_NULLCHECK;
6731                     }
6732                 }
6733 #endif
6734
6735                 break;
6736             }
6737
6738             case CORINFO_VIRTUALCALL_VTABLE:
6739             {
6740                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6741                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6742                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6743                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6744                 break;
6745             }
6746
6747             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6748             {
6749                 if (compIsForInlining())
6750                 {
6751                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6752                     return callRetTyp;
6753                 }
6754
6755                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6756                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6757                 // OK, We've been told to call via LDVIRTFTN, so just
6758                 // take the call now....
6759
6760                 args = impPopList(sig->numArgs, &argFlags, sig);
6761
6762                 GenTreePtr thisPtr = impPopStack().val;
6763                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6764                 if (compDonotInline())
6765                 {
6766                     return callRetTyp;
6767                 }
6768
6769                 // Clone the (possibly transformed) "this" pointer
6770                 GenTreePtr thisPtrCopy;
6771                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6772                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6773
6774                 GenTreePtr fptr = nullptr;
6775                 bool       coreRTGenericVirtualMethod =
6776                     ((sig->callConv & CORINFO_CALLCONV_GENERIC) != 0) && IsTargetAbi(CORINFO_CORERT_ABI);
6777 #if COR_JIT_EE_VERSION > 460
6778                 if (coreRTGenericVirtualMethod)
6779                 {
6780                     GenTreePtr runtimeMethodHandle = nullptr;
6781                     if (callInfo->exactContextNeedsRuntimeLookup)
6782                     {
6783                         runtimeMethodHandle =
6784                             impRuntimeLookupToTree(pResolvedToken, &callInfo->codePointerLookup, methHnd);
6785                     }
6786                     else
6787                     {
6788                         runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
6789                     }
6790                     fptr = gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, GTF_EXCEPT,
6791                                                gtNewArgList(thisPtr, runtimeMethodHandle));
6792                 }
6793                 else
6794 #endif // COR_JIT_EE_VERSION
6795                 {
6796                     fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6797                 }
6798
6799                 if (compDonotInline())
6800                 {
6801                     return callRetTyp;
6802                 }
6803
6804                 thisPtr = nullptr; // can't reuse it
6805
6806                 // Now make an indirect call through the function pointer
6807
6808                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6809                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6810                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6811
6812                 // Create the actual call node
6813
6814                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6815                 call->gtCall.gtCallObjp = thisPtrCopy;
6816                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6817
6818                 if (coreRTGenericVirtualMethod)
6819                 {
6820                     addFatPointerCandidate(call->AsCall());
6821                 }
6822 #ifdef FEATURE_READYTORUN_COMPILER
6823                 if (opts.IsReadyToRun())
6824                 {
6825                     // Null check is needed for ready to run to handle
6826                     // non-virtual <-> virtual changes between versions
6827                     call->gtFlags |= GTF_CALL_NULLCHECK;
6828                 }
6829 #endif
6830
6831                 // Sine we are jumping over some code, check that its OK to skip that code
6832                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6833                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6834                 goto DONE;
6835             }
6836
6837             case CORINFO_CALL:
6838             {
6839                 // This is for a non-virtual, non-interface etc. call
6840                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6841
6842                 // We remove the nullcheck for the GetType call instrinsic.
6843                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6844                 // and instrinsics.
6845                 if (callInfo->nullInstanceCheck &&
6846                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6847                 {
6848                     call->gtFlags |= GTF_CALL_NULLCHECK;
6849                 }
6850
6851 #ifdef FEATURE_READYTORUN_COMPILER
6852                 if (opts.IsReadyToRun())
6853                 {
6854                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6855                 }
6856 #endif
6857                 break;
6858             }
6859
6860             case CORINFO_CALL_CODE_POINTER:
6861             {
6862                 // The EE has asked us to call by computing a code pointer and then doing an
6863                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6864
6865                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6866                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6867
6868                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6869                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6870
6871                 GenTreePtr fptr =
6872                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6873
6874                 if (compDonotInline())
6875                 {
6876                     return callRetTyp;
6877                 }
6878
6879                 // Now make an indirect call through the function pointer
6880
6881                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6882                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6883                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6884
6885                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6886                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6887                 if (callInfo->nullInstanceCheck)
6888                 {
6889                     call->gtFlags |= GTF_CALL_NULLCHECK;
6890                 }
6891
6892                 break;
6893             }
6894
6895             default:
6896                 assert(!"unknown call kind");
6897                 break;
6898         }
6899
6900         //-------------------------------------------------------------------------
6901         // Set more flags
6902
6903         PREFIX_ASSUME(call != nullptr);
6904
6905         if (mflags & CORINFO_FLG_NOGCCHECK)
6906         {
6907             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6908         }
6909
6910         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6911         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6912             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6913             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6914         {
6915             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6916         }
6917     }
6918     assert(sig);
6919     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6920
6921     /* Some sanity checks */
6922
6923     // CALL_VIRT and NEWOBJ must have a THIS pointer
6924     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6925     // static bit and hasThis are negations of one another
6926     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6927     assert(call != nullptr);
6928
6929     /*-------------------------------------------------------------------------
6930      * Check special-cases etc
6931      */
6932
6933     /* Special case - Check if it is a call to Delegate.Invoke(). */
6934
6935     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6936     {
6937         assert(!compIsForInlining());
6938         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6939         assert(mflags & CORINFO_FLG_FINAL);
6940
6941         /* Set the delegate flag */
6942         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6943
6944         if (callInfo->secureDelegateInvoke)
6945         {
6946             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6947         }
6948
6949         if (opcode == CEE_CALLVIRT)
6950         {
6951             assert(mflags & CORINFO_FLG_FINAL);
6952
6953             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6954             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6955             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6956         }
6957     }
6958
6959     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6960     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6961     if (varTypeIsStruct(callRetTyp))
6962     {
6963         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6964         call->gtType = callRetTyp;
6965     }
6966
6967 #if !FEATURE_VARARG
6968     /* Check for varargs */
6969     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6970         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6971     {
6972         BADCODE("Varargs not supported.");
6973     }
6974 #endif // !FEATURE_VARARG
6975
6976     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6977         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6978     {
6979         assert(!compIsForInlining());
6980
6981         /* Set the right flags */
6982
6983         call->gtFlags |= GTF_CALL_POP_ARGS;
6984         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6985
6986         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6987            will be expecting to pop a certain number of arguments, but if we
6988            tailcall to a function with a different number of arguments, we
6989            are hosed. There are ways around this (caller remembers esp value,
6990            varargs is not caller-pop, etc), but not worth it. */
6991         CLANG_FORMAT_COMMENT_ANCHOR;
6992
6993 #ifdef _TARGET_X86_
6994         if (canTailCall)
6995         {
6996             canTailCall             = false;
6997             szCanTailCallFailReason = "Callee is varargs";
6998         }
6999 #endif
7000
7001         /* Get the total number of arguments - this is already correct
7002          * for CALLI - for methods we have to get it from the call site */
7003
7004         if (opcode != CEE_CALLI)
7005         {
7006 #ifdef DEBUG
7007             unsigned numArgsDef = sig->numArgs;
7008 #endif
7009             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7010
7011 #ifdef DEBUG
7012             // We cannot lazily obtain the signature of a vararg call because using its method
7013             // handle will give us only the declared argument list, not the full argument list.
7014             assert(call->gtCall.callSig == nullptr);
7015             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7016             *call->gtCall.callSig = *sig;
7017 #endif
7018
7019             // For vararg calls we must be sure to load the return type of the
7020             // method actually being called, as well as the return types of the
7021             // specified in the vararg signature. With type equivalency, these types
7022             // may not be the same.
7023             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7024             {
7025                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7026                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7027                     sig->retType != CORINFO_TYPE_VAR)
7028                 {
7029                     // Make sure that all valuetypes (including enums) that we push are loaded.
7030                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7031                     // all valuetypes in the method signature are already loaded.
7032                     // We need to be able to find the size of the valuetypes, but we cannot
7033                     // do a class-load from within GC.
7034                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7035                 }
7036             }
7037
7038             assert(numArgsDef <= sig->numArgs);
7039         }
7040
7041         /* We will have "cookie" as the last argument but we cannot push
7042          * it on the operand stack because we may overflow, so we append it
7043          * to the arg list next after we pop them */
7044     }
7045
7046     if (mflags & CORINFO_FLG_SECURITYCHECK)
7047     {
7048         assert(!compIsForInlining());
7049
7050         // Need security prolog/epilog callouts when there is
7051         // imperative security in the method. This is to give security a
7052         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7053
7054         if (compIsForInlining())
7055         {
7056             // Cannot handle this if the method being imported is an inlinee by itself.
7057             // Because inlinee method does not have its own frame.
7058
7059             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7060             return callRetTyp;
7061         }
7062         else
7063         {
7064             tiSecurityCalloutNeeded = true;
7065
7066             // If the current method calls a method which needs a security check,
7067             // (i.e. the method being compiled has imperative security)
7068             // we need to reserve a slot for the security object in
7069             // the current method's stack frame
7070             opts.compNeedSecurityCheck = true;
7071         }
7072     }
7073
7074     //--------------------------- Inline NDirect ------------------------------
7075
7076     // For inline cases we technically should look at both the current
7077     // block and the call site block (or just the latter if we've
7078     // fused the EH trees). However the block-related checks pertain to
7079     // EH and we currently won't inline a method with EH. So for
7080     // inlinees, just checking the call site block is sufficient.
7081     {
7082         // New lexical block here to avoid compilation errors because of GOTOs.
7083         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7084         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7085     }
7086
7087     if (call->gtFlags & GTF_CALL_UNMANAGED)
7088     {
7089         // We set up the unmanaged call by linking the frame, disabling GC, etc
7090         // This needs to be cleaned up on return
7091         if (canTailCall)
7092         {
7093             canTailCall             = false;
7094             szCanTailCallFailReason = "Callee is native";
7095         }
7096
7097         checkForSmallType = true;
7098
7099         impPopArgsForUnmanagedCall(call, sig);
7100
7101         goto DONE;
7102     }
7103     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7104                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7105                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7106                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7107     {
7108         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7109         {
7110             // Normally this only happens with inlining.
7111             // However, a generic method (or type) being NGENd into another module
7112             // can run into this issue as well.  There's not an easy fall-back for NGEN
7113             // so instead we fallback to JIT.
7114             if (compIsForInlining())
7115             {
7116                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7117             }
7118             else
7119             {
7120                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7121             }
7122
7123             return callRetTyp;
7124         }
7125
7126         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7127
7128         // This cookie is required to be either a simple GT_CNS_INT or
7129         // an indirection of a GT_CNS_INT
7130         //
7131         GenTreePtr cookieConst = cookie;
7132         if (cookie->gtOper == GT_IND)
7133         {
7134             cookieConst = cookie->gtOp.gtOp1;
7135         }
7136         assert(cookieConst->gtOper == GT_CNS_INT);
7137
7138         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7139         // we won't allow this tree to participate in any CSE logic
7140         //
7141         cookie->gtFlags |= GTF_DONT_CSE;
7142         cookieConst->gtFlags |= GTF_DONT_CSE;
7143
7144         call->gtCall.gtCallCookie = cookie;
7145
7146         if (canTailCall)
7147         {
7148             canTailCall             = false;
7149             szCanTailCallFailReason = "PInvoke calli";
7150         }
7151     }
7152
7153     /*-------------------------------------------------------------------------
7154      * Create the argument list
7155      */
7156
7157     //-------------------------------------------------------------------------
7158     // Special case - for varargs we have an implicit last argument
7159
7160     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7161     {
7162         assert(!compIsForInlining());
7163
7164         void *varCookie, *pVarCookie;
7165         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7166         {
7167             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7168             return callRetTyp;
7169         }
7170
7171         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7172         assert((!varCookie) != (!pVarCookie));
7173         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7174
7175         assert(extraArg == nullptr);
7176         extraArg = gtNewArgList(cookie);
7177     }
7178
7179     //-------------------------------------------------------------------------
7180     // Extra arg for shared generic code and array methods
7181     //
7182     // Extra argument containing instantiation information is passed in the
7183     // following circumstances:
7184     // (a) To the "Address" method on array classes; the extra parameter is
7185     //     the array's type handle (a TypeDesc)
7186     // (b) To shared-code instance methods in generic structs; the extra parameter
7187     //     is the struct's type handle (a vtable ptr)
7188     // (c) To shared-code per-instantiation non-generic static methods in generic
7189     //     classes and structs; the extra parameter is the type handle
7190     // (d) To shared-code generic methods; the extra parameter is an
7191     //     exact-instantiation MethodDesc
7192     //
7193     // We also set the exact type context associated with the call so we can
7194     // inline the call correctly later on.
7195
7196     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7197     {
7198         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7199         if (clsHnd == nullptr)
7200         {
7201             NO_WAY("CALLI on parameterized type");
7202         }
7203
7204         assert(opcode != CEE_CALLI);
7205
7206         GenTreePtr instParam;
7207         BOOL       runtimeLookup;
7208
7209         // Instantiated generic method
7210         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7211         {
7212             CORINFO_METHOD_HANDLE exactMethodHandle =
7213                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7214
7215             if (!exactContextNeedsRuntimeLookup)
7216             {
7217 #ifdef FEATURE_READYTORUN_COMPILER
7218                 if (opts.IsReadyToRun())
7219                 {
7220                     instParam =
7221                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7222                     if (instParam == nullptr)
7223                     {
7224                         return callRetTyp;
7225                     }
7226                 }
7227                 else
7228 #endif
7229                 {
7230                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7231                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7232                 }
7233             }
7234             else
7235             {
7236                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7237                 if (instParam == nullptr)
7238                 {
7239                     return callRetTyp;
7240                 }
7241             }
7242         }
7243
7244         // otherwise must be an instance method in a generic struct,
7245         // a static method in a generic type, or a runtime-generated array method
7246         else
7247         {
7248             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7249             CORINFO_CLASS_HANDLE exactClassHandle =
7250                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7251
7252             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7253             {
7254                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7255                 return callRetTyp;
7256             }
7257
7258             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7259             {
7260                 // We indicate "readonly" to the Address operation by using a null
7261                 // instParam.
7262                 instParam = gtNewIconNode(0, TYP_REF);
7263             }
7264
7265             if (!exactContextNeedsRuntimeLookup)
7266             {
7267 #ifdef FEATURE_READYTORUN_COMPILER
7268                 if (opts.IsReadyToRun())
7269                 {
7270                     instParam =
7271                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7272                     if (instParam == nullptr)
7273                     {
7274                         return callRetTyp;
7275                     }
7276                 }
7277                 else
7278 #endif
7279                 {
7280                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7281                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7282                 }
7283             }
7284             else
7285             {
7286                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7287                 if (instParam == nullptr)
7288                 {
7289                     return callRetTyp;
7290                 }
7291             }
7292         }
7293
7294         assert(extraArg == nullptr);
7295         extraArg = gtNewArgList(instParam);
7296     }
7297
7298     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7299     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7300     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7301     // exactContextHnd is not currently required when inlining shared generic code into shared
7302     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7303     // (e.g. anything marked needsRuntimeLookup)
7304     if (exactContextNeedsRuntimeLookup)
7305     {
7306         exactContextHnd = nullptr;
7307     }
7308
7309     //-------------------------------------------------------------------------
7310     // The main group of arguments
7311
7312     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7313
7314     if (args)
7315     {
7316         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7317     }
7318
7319     //-------------------------------------------------------------------------
7320     // The "this" pointer
7321
7322     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7323     {
7324         GenTreePtr obj;
7325
7326         if (opcode == CEE_NEWOBJ)
7327         {
7328             obj = newobjThis;
7329         }
7330         else
7331         {
7332             obj = impPopStack().val;
7333             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7334             if (compDonotInline())
7335             {
7336                 return callRetTyp;
7337             }
7338         }
7339
7340         /* Is this a virtual or interface call? */
7341
7342         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7343         {
7344             /* only true object pointers can be virtual */
7345
7346             assert(obj->gtType == TYP_REF);
7347         }
7348         else
7349         {
7350             if (impIsThis(obj))
7351             {
7352                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7353             }
7354         }
7355
7356         /* Store the "this" value in the call */
7357
7358         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7359         call->gtCall.gtCallObjp = obj;
7360     }
7361
7362     //-------------------------------------------------------------------------
7363     // The "this" pointer for "newobj"
7364
7365     if (opcode == CEE_NEWOBJ)
7366     {
7367         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7368         {
7369             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7370             // This is a 'new' of a variable sized object, wher
7371             // the constructor is to return the object.  In this case
7372             // the constructor claims to return VOID but we know it
7373             // actually returns the new object
7374             assert(callRetTyp == TYP_VOID);
7375             callRetTyp   = TYP_REF;
7376             call->gtType = TYP_REF;
7377             impSpillSpecialSideEff();
7378
7379             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7380         }
7381         else
7382         {
7383             if (clsFlags & CORINFO_FLG_DELEGATE)
7384             {
7385                 // New inliner morph it in impImportCall.
7386                 // This will allow us to inline the call to the delegate constructor.
7387                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7388             }
7389
7390             if (!bIntrinsicImported)
7391             {
7392
7393 #if defined(DEBUG) || defined(INLINE_DATA)
7394
7395                 // Keep track of the raw IL offset of the call
7396                 call->gtCall.gtRawILOffset = rawILOffset;
7397
7398 #endif // defined(DEBUG) || defined(INLINE_DATA)
7399
7400                 // Is it an inline candidate?
7401                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7402             }
7403
7404             // append the call node.
7405             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7406
7407             // Now push the value of the 'new onto the stack
7408
7409             // This is a 'new' of a non-variable sized object.
7410             // Append the new node (op1) to the statement list,
7411             // and then push the local holding the value of this
7412             // new instruction on the stack.
7413
7414             if (clsFlags & CORINFO_FLG_VALUECLASS)
7415             {
7416                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7417
7418                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7419                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7420             }
7421             else
7422             {
7423                 if (newobjThis->gtOper == GT_COMMA)
7424                 {
7425                     // In coreclr the callout can be inserted even if verification is disabled
7426                     // so we cannot rely on tiVerificationNeeded alone
7427
7428                     // We must have inserted the callout. Get the real newobj.
7429                     newobjThis = newobjThis->gtOp.gtOp2;
7430                 }
7431
7432                 assert(newobjThis->gtOper == GT_LCL_VAR);
7433                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7434             }
7435         }
7436         return callRetTyp;
7437     }
7438
7439 DONE:
7440
7441     if (tailCall)
7442     {
7443         // This check cannot be performed for implicit tail calls for the reason
7444         // that impIsImplicitTailCallCandidate() is not checking whether return
7445         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7446         // As a result it is possible that in the following case, we find that
7447         // the type stack is non-empty if Callee() is considered for implicit
7448         // tail calling.
7449         //      int Caller(..) { .... void Callee(); ret val; ... }
7450         //
7451         // Note that we cannot check return type compatibility before ImpImportCall()
7452         // as we don't have required info or need to duplicate some of the logic of
7453         // ImpImportCall().
7454         //
7455         // For implicit tail calls, we perform this check after return types are
7456         // known to be compatible.
7457         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7458         {
7459             BADCODE("Stack should be empty after tailcall");
7460         }
7461
7462         // Note that we can not relax this condition with genActualType() as
7463         // the calling convention dictates that the caller of a function with
7464         // a small-typed return value is responsible for normalizing the return val
7465
7466         if (canTailCall &&
7467             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7468                                           callInfo->sig.retTypeClass))
7469         {
7470             canTailCall             = false;
7471             szCanTailCallFailReason = "Return types are not tail call compatible";
7472         }
7473
7474         // Stack empty check for implicit tail calls.
7475         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7476         {
7477 #ifdef _TARGET_AMD64_
7478             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7479             // in JIT64, not an InvalidProgramException.
7480             Verify(false, "Stack should be empty after tailcall");
7481 #else  // _TARGET_64BIT_
7482             BADCODE("Stack should be empty after tailcall");
7483 #endif //!_TARGET_64BIT_
7484         }
7485
7486         // assert(compCurBB is not a catch, finally or filter block);
7487         // assert(compCurBB is not a try block protected by a finally block);
7488
7489         // Check for permission to tailcall
7490         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7491
7492         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7493
7494         if (canTailCall)
7495         {
7496             // True virtual or indirect calls, shouldn't pass in a callee handle.
7497             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7498                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7499                                                        ? nullptr
7500                                                        : methHnd;
7501             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7502
7503             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7504             {
7505                 canTailCall = true;
7506                 if (explicitTailCall)
7507                 {
7508                     // In case of explicit tail calls, mark it so that it is not considered
7509                     // for in-lining.
7510                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7511 #ifdef DEBUG
7512                     if (verbose)
7513                     {
7514                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7515                         printTreeID(call);
7516                         printf("\n");
7517                     }
7518 #endif
7519                 }
7520                 else
7521                 {
7522 #if FEATURE_TAILCALL_OPT
7523                     // Must be an implicit tail call.
7524                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7525
7526                     // It is possible that a call node is both an inline candidate and marked
7527                     // for opportunistic tail calling.  In-lining happens before morhphing of
7528                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7529                     // reason, it will survive to the morphing stage at which point it will be
7530                     // transformed into a tail call after performing additional checks.
7531
7532                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7533 #ifdef DEBUG
7534                     if (verbose)
7535                     {
7536                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7537                         printTreeID(call);
7538                         printf("\n");
7539                     }
7540 #endif
7541
7542 #else //! FEATURE_TAILCALL_OPT
7543                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7544
7545 #endif // FEATURE_TAILCALL_OPT
7546                 }
7547
7548                 // we can't report success just yet...
7549             }
7550             else
7551             {
7552                 canTailCall = false;
7553 // canTailCall reported its reasons already
7554 #ifdef DEBUG
7555                 if (verbose)
7556                 {
7557                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7558                     printTreeID(call);
7559                     printf("\n");
7560                 }
7561 #endif
7562             }
7563         }
7564         else
7565         {
7566             // If this assert fires it means that canTailCall was set to false without setting a reason!
7567             assert(szCanTailCallFailReason != nullptr);
7568
7569 #ifdef DEBUG
7570             if (verbose)
7571             {
7572                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7573                 printTreeID(call);
7574                 printf(": %s\n", szCanTailCallFailReason);
7575             }
7576 #endif
7577             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7578                                                      szCanTailCallFailReason);
7579         }
7580     }
7581
7582     // Note: we assume that small return types are already normalized by the managed callee
7583     // or by the pinvoke stub for calls to unmanaged code.
7584
7585     if (!bIntrinsicImported)
7586     {
7587         //
7588         // Things needed to be checked when bIntrinsicImported is false.
7589         //
7590
7591         assert(call->gtOper == GT_CALL);
7592         assert(sig != nullptr);
7593
7594         // Tail calls require us to save the call site's sig info so we can obtain an argument
7595         // copying thunk from the EE later on.
7596         if (call->gtCall.callSig == nullptr)
7597         {
7598             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7599             *call->gtCall.callSig = *sig;
7600         }
7601
7602         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7603         {
7604             GenTreePtr callObj = call->gtCall.gtCallObjp;
7605             assert(callObj != nullptr);
7606
7607             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7608
7609             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7610                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7611                                                                    impInlineInfo->inlArgInfo))
7612             {
7613                 impInlineInfo->thisDereferencedFirst = true;
7614             }
7615         }
7616
7617 #if defined(DEBUG) || defined(INLINE_DATA)
7618
7619         // Keep track of the raw IL offset of the call
7620         call->gtCall.gtRawILOffset = rawILOffset;
7621
7622 #endif // defined(DEBUG) || defined(INLINE_DATA)
7623
7624         // Is it an inline candidate?
7625         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7626     }
7627
7628 DONE_CALL:
7629     // Push or append the result of the call
7630     if (callRetTyp == TYP_VOID)
7631     {
7632         if (opcode == CEE_NEWOBJ)
7633         {
7634             // we actually did push something, so don't spill the thing we just pushed.
7635             assert(verCurrentState.esStackDepth > 0);
7636             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7637         }
7638         else
7639         {
7640             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7641         }
7642     }
7643     else
7644     {
7645         impSpillSpecialSideEff();
7646
7647         if (clsFlags & CORINFO_FLG_ARRAY)
7648         {
7649             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7650         }
7651
7652         // Find the return type used for verification by interpreting the method signature.
7653         // NB: we are clobbering the already established sig.
7654         if (tiVerificationNeeded)
7655         {
7656             // Actually, we never get the sig for the original method.
7657             sig = &(callInfo->verSig);
7658         }
7659
7660         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7661         tiRetVal.NormaliseForStack();
7662
7663         // The CEE_READONLY prefix modifies the verification semantics of an Address
7664         // operation on an array type.
7665         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7666         {
7667             tiRetVal.SetIsReadonlyByRef();
7668         }
7669
7670         if (tiVerificationNeeded)
7671         {
7672             // We assume all calls return permanent home byrefs. If they
7673             // didn't they wouldn't be verifiable. This is also covering
7674             // the Address() helper for multidimensional arrays.
7675             if (tiRetVal.IsByRef())
7676             {
7677                 tiRetVal.SetIsPermanentHomeByRef();
7678             }
7679         }
7680
7681         if (call->IsCall())
7682         {
7683             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7684
7685             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7686             if (varTypeIsStruct(callRetTyp))
7687             {
7688                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7689             }
7690
7691             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7692             {
7693                 assert(opts.OptEnabled(CLFLG_INLINING));
7694                 assert(!fatPointerCandidate); // We should not try to inline calli.
7695
7696                 // Make the call its own tree (spill the stack if needed).
7697                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7698
7699                 // TODO: Still using the widened type.
7700                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7701             }
7702             else
7703             {
7704                 if (fatPointerCandidate)
7705                 {
7706                     // fatPointer candidates should be in statements of the form call() or var = call().
7707                     // Such form allows to find statements with fat calls without walking through whole trees
7708                     // and removes problems with cutting trees.
7709                     assert(!bIntrinsicImported);
7710                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
7711                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7712                     {
7713                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
7714                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
7715                         varDsc->lvVerTypeInfo = tiRetVal;
7716                         impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
7717                         // impAssignTempGen can change src arg list and return type for call that returns struct.
7718                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7719                         call           = gtNewLclvNode(calliSlot, type);
7720                     }
7721                 }
7722
7723                 // For non-candidates we must also spill, since we
7724                 // might have locals live on the eval stack that this
7725                 // call can modify.
7726                 //
7727                 // Suppress this for certain well-known call targets
7728                 // that we know won't modify locals, eg calls that are
7729                 // recognized in gtCanOptimizeTypeEquality. Otherwise
7730                 // we may break key fragile pattern matches later on.
7731                 bool spillStack = true;
7732                 if (call->IsCall())
7733                 {
7734                     GenTreeCall* callNode = call->AsCall();
7735                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
7736                     {
7737                         spillStack = false;
7738                     }
7739                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
7740                     {
7741                         spillStack = false;
7742                     }
7743                 }
7744
7745                 if (spillStack)
7746                 {
7747                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7748                 }
7749             }
7750         }
7751
7752         if (!bIntrinsicImported)
7753         {
7754             //-------------------------------------------------------------------------
7755             //
7756             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7757                 before returning.
7758                 However, we need to normalize small type values returned by unmanaged
7759                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7760                 if we use the shorter inlined pinvoke stub. */
7761
7762             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7763             {
7764                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7765             }
7766         }
7767
7768         impPushOnStack(call, tiRetVal);
7769     }
7770
7771     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7772     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7773     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7774     //  callInfoCache.uncacheCallInfo();
7775
7776     return callRetTyp;
7777 }
7778 #ifdef _PREFAST_
7779 #pragma warning(pop)
7780 #endif
7781
7782 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7783 {
7784     CorInfoType corType = methInfo->args.retType;
7785
7786     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7787     {
7788         // We have some kind of STRUCT being returned
7789
7790         structPassingKind howToReturnStruct = SPK_Unknown;
7791
7792         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7793
7794         if (howToReturnStruct == SPK_ByReference)
7795         {
7796             return true;
7797         }
7798     }
7799
7800     return false;
7801 }
7802
7803 #ifdef DEBUG
7804 //
7805 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7806 {
7807     TestLabelAndNum tlAndN;
7808     if (numArgs == 2)
7809     {
7810         tlAndN.m_num  = 0;
7811         StackEntry se = impPopStack();
7812         assert(se.seTypeInfo.GetType() == TI_INT);
7813         GenTreePtr val = se.val;
7814         assert(val->IsCnsIntOrI());
7815         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7816     }
7817     else if (numArgs == 3)
7818     {
7819         StackEntry se = impPopStack();
7820         assert(se.seTypeInfo.GetType() == TI_INT);
7821         GenTreePtr val = se.val;
7822         assert(val->IsCnsIntOrI());
7823         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7824         se           = impPopStack();
7825         assert(se.seTypeInfo.GetType() == TI_INT);
7826         val = se.val;
7827         assert(val->IsCnsIntOrI());
7828         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7829     }
7830     else
7831     {
7832         assert(false);
7833     }
7834
7835     StackEntry expSe = impPopStack();
7836     GenTreePtr node  = expSe.val;
7837
7838     // There are a small number of special cases, where we actually put the annotation on a subnode.
7839     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7840     {
7841         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7842         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7843         // offset within the the static field block whose address is returned by the helper call.
7844         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7845         GenTreePtr helperCall = nullptr;
7846         assert(node->OperGet() == GT_IND);
7847         tlAndN.m_num -= 100;
7848         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7849         GetNodeTestData()->Remove(node);
7850     }
7851     else
7852     {
7853         GetNodeTestData()->Set(node, tlAndN);
7854     }
7855
7856     impPushOnStack(node, expSe.seTypeInfo);
7857     return node->TypeGet();
7858 }
7859 #endif // DEBUG
7860
7861 //-----------------------------------------------------------------------------------
7862 //  impFixupCallStructReturn: For a call node that returns a struct type either
7863 //  adjust the return type to an enregisterable type, or set the flag to indicate
7864 //  struct return via retbuf arg.
7865 //
7866 //  Arguments:
7867 //    call       -  GT_CALL GenTree node
7868 //    retClsHnd  -  Class handle of return type of the call
7869 //
7870 //  Return Value:
7871 //    Returns new GenTree node after fixing struct return of call node
7872 //
7873 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7874 {
7875     assert(call->gtOper == GT_CALL);
7876
7877     if (!varTypeIsStruct(call))
7878     {
7879         return call;
7880     }
7881
7882     call->gtCall.gtRetClsHnd = retClsHnd;
7883
7884     GenTreeCall* callNode = call->AsCall();
7885
7886 #if FEATURE_MULTIREG_RET
7887     // Initialize Return type descriptor of call node
7888     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7889     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7890 #endif // FEATURE_MULTIREG_RET
7891
7892 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7893
7894     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7895     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7896
7897     // The return type will remain as the incoming struct type unless normalized to a
7898     // single eightbyte return type below.
7899     callNode->gtReturnType = call->gtType;
7900
7901     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7902     if (retRegCount != 0)
7903     {
7904         if (retRegCount == 1)
7905         {
7906             // struct returned in a single register
7907             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7908         }
7909         else
7910         {
7911             // must be a struct returned in two registers
7912             assert(retRegCount == 2);
7913
7914             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7915             {
7916                 // Force a call returning multi-reg struct to be always of the IR form
7917                 //   tmp = call
7918                 //
7919                 // No need to assign a multi-reg struct to a local var if:
7920                 //  - It is a tail call or
7921                 //  - The call is marked for in-lining later
7922                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7923             }
7924         }
7925     }
7926     else
7927     {
7928         // struct not returned in registers i.e returned via hiddden retbuf arg.
7929         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7930     }
7931
7932 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7933
7934 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7935     // There is no fixup necessary if the return type is a HFA struct.
7936     // HFA structs are returned in registers for ARM32 and ARM64
7937     //
7938     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7939     {
7940         if (call->gtCall.CanTailCall())
7941         {
7942             if (info.compIsVarArgs)
7943             {
7944                 // We cannot tail call because control needs to return to fixup the calling
7945                 // convention for result return.
7946                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7947             }
7948             else
7949             {
7950                 // If we can tail call returning HFA, then don't assign it to
7951                 // a variable back and forth.
7952                 return call;
7953             }
7954         }
7955
7956         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7957         {
7958             return call;
7959         }
7960
7961         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7962         if (retRegCount >= 2)
7963         {
7964             return impAssignMultiRegTypeToVar(call, retClsHnd);
7965         }
7966     }
7967 #endif // _TARGET_ARM_
7968
7969     // Check for TYP_STRUCT type that wraps a primitive type
7970     // Such structs are returned using a single register
7971     // and we change the return type on those calls here.
7972     //
7973     structPassingKind howToReturnStruct;
7974     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7975
7976     if (howToReturnStruct == SPK_ByReference)
7977     {
7978         assert(returnType == TYP_UNKNOWN);
7979         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7980     }
7981     else
7982     {
7983         assert(returnType != TYP_UNKNOWN);
7984         call->gtCall.gtReturnType = returnType;
7985
7986         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7987         if ((returnType == TYP_LONG) && (compLongUsed == false))
7988         {
7989             compLongUsed = true;
7990         }
7991         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7992         {
7993             compFloatingPointUsed = true;
7994         }
7995
7996 #if FEATURE_MULTIREG_RET
7997         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7998         assert(retRegCount != 0);
7999
8000         if (retRegCount >= 2)
8001         {
8002             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
8003             {
8004                 // Force a call returning multi-reg struct to be always of the IR form
8005                 //   tmp = call
8006                 //
8007                 // No need to assign a multi-reg struct to a local var if:
8008                 //  - It is a tail call or
8009                 //  - The call is marked for in-lining later
8010                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8011             }
8012         }
8013 #endif // FEATURE_MULTIREG_RET
8014     }
8015
8016 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8017
8018     return call;
8019 }
8020
8021 /*****************************************************************************
8022    For struct return values, re-type the operand in the case where the ABI
8023    does not use a struct return buffer
8024    Note that this method is only call for !_TARGET_X86_
8025  */
8026
8027 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8028 {
8029     assert(varTypeIsStruct(info.compRetType));
8030     assert(info.compRetBuffArg == BAD_VAR_NUM);
8031
8032 #if defined(_TARGET_XARCH_)
8033
8034 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8035     // No VarArgs for CoreCLR on x64 Unix
8036     assert(!info.compIsVarArgs);
8037
8038     // Is method returning a multi-reg struct?
8039     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8040     {
8041         // In case of multi-reg struct return, we force IR to be one of the following:
8042         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8043         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8044
8045         if (op->gtOper == GT_LCL_VAR)
8046         {
8047             // Make sure that this struct stays in memory and doesn't get promoted.
8048             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8049             lvaTable[lclNum].lvIsMultiRegRet = true;
8050
8051             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8052             op->gtFlags |= GTF_DONT_CSE;
8053
8054             return op;
8055         }
8056
8057         if (op->gtOper == GT_CALL)
8058         {
8059             return op;
8060         }
8061
8062         return impAssignMultiRegTypeToVar(op, retClsHnd);
8063     }
8064 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8065     assert(info.compRetNativeType != TYP_STRUCT);
8066 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8067
8068 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8069
8070     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8071     {
8072         if (op->gtOper == GT_LCL_VAR)
8073         {
8074             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8075             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8076             // Make sure this struct type stays as struct so that we can return it as an HFA
8077             lvaTable[lclNum].lvIsMultiRegRet = true;
8078
8079             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8080             op->gtFlags |= GTF_DONT_CSE;
8081
8082             return op;
8083         }
8084
8085         if (op->gtOper == GT_CALL)
8086         {
8087             if (op->gtCall.IsVarargs())
8088             {
8089                 // We cannot tail call because control needs to return to fixup the calling
8090                 // convention for result return.
8091                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8092                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8093             }
8094             else
8095             {
8096                 return op;
8097             }
8098         }
8099         return impAssignMultiRegTypeToVar(op, retClsHnd);
8100     }
8101
8102 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8103
8104     // Is method returning a multi-reg struct?
8105     if (IsMultiRegReturnedType(retClsHnd))
8106     {
8107         if (op->gtOper == GT_LCL_VAR)
8108         {
8109             // This LCL_VAR stays as a TYP_STRUCT
8110             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8111
8112             // Make sure this struct type is not struct promoted
8113             lvaTable[lclNum].lvIsMultiRegRet = true;
8114
8115             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8116             op->gtFlags |= GTF_DONT_CSE;
8117
8118             return op;
8119         }
8120
8121         if (op->gtOper == GT_CALL)
8122         {
8123             if (op->gtCall.IsVarargs())
8124             {
8125                 // We cannot tail call because control needs to return to fixup the calling
8126                 // convention for result return.
8127                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8128                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8129             }
8130             else
8131             {
8132                 return op;
8133             }
8134         }
8135         return impAssignMultiRegTypeToVar(op, retClsHnd);
8136     }
8137
8138 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8139
8140 REDO_RETURN_NODE:
8141     // adjust the type away from struct to integral
8142     // and no normalizing
8143     if (op->gtOper == GT_LCL_VAR)
8144     {
8145         op->ChangeOper(GT_LCL_FLD);
8146     }
8147     else if (op->gtOper == GT_OBJ)
8148     {
8149         GenTreePtr op1 = op->AsObj()->Addr();
8150
8151         // We will fold away OBJ/ADDR
8152         // except for OBJ/ADDR/INDEX
8153         //     as the array type influences the array element's offset
8154         //     Later in this method we change op->gtType to info.compRetNativeType
8155         //     This is not correct when op is a GT_INDEX as the starting offset
8156         //     for the array elements 'elemOffs' is different for an array of
8157         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8158         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8159         //
8160         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8161         {
8162             // Change '*(&X)' to 'X' and see if we can do better
8163             op = op1->gtOp.gtOp1;
8164             goto REDO_RETURN_NODE;
8165         }
8166         op->gtObj.gtClass = NO_CLASS_HANDLE;
8167         op->ChangeOperUnchecked(GT_IND);
8168         op->gtFlags |= GTF_IND_TGTANYWHERE;
8169     }
8170     else if (op->gtOper == GT_CALL)
8171     {
8172         if (op->AsCall()->TreatAsHasRetBufArg(this))
8173         {
8174             // This must be one of those 'special' helpers that don't
8175             // really have a return buffer, but instead use it as a way
8176             // to keep the trees cleaner with fewer address-taken temps.
8177             //
8178             // Well now we have to materialize the the return buffer as
8179             // an address-taken temp. Then we can return the temp.
8180             //
8181             // NOTE: this code assumes that since the call directly
8182             // feeds the return, then the call must be returning the
8183             // same structure/class/type.
8184             //
8185             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8186
8187             // No need to spill anything as we're about to return.
8188             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8189
8190             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8191             // jump directly to a GT_LCL_FLD.
8192             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8193             op->ChangeOper(GT_LCL_FLD);
8194         }
8195         else
8196         {
8197             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8198
8199             // Don't change the gtType of the node just yet, it will get changed later.
8200             return op;
8201         }
8202     }
8203     else if (op->gtOper == GT_COMMA)
8204     {
8205         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8206     }
8207
8208     op->gtType = info.compRetNativeType;
8209
8210     return op;
8211 }
8212
8213 /*****************************************************************************
8214    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8215    finally-protected try. We find the finally blocks protecting the current
8216    offset (in order) by walking over the complete exception table and
8217    finding enclosing clauses. This assumes that the table is sorted.
8218    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8219
8220    If we are leaving a catch handler, we need to attach the
8221    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8222
8223    After this function, the BBJ_LEAVE block has been converted to a different type.
8224  */
8225
8226 #if !FEATURE_EH_FUNCLETS
8227
8228 void Compiler::impImportLeave(BasicBlock* block)
8229 {
8230 #ifdef DEBUG
8231     if (verbose)
8232     {
8233         printf("\nBefore import CEE_LEAVE:\n");
8234         fgDispBasicBlocks();
8235         fgDispHandlerTab();
8236     }
8237 #endif // DEBUG
8238
8239     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8240     unsigned    blkAddr         = block->bbCodeOffs;
8241     BasicBlock* leaveTarget     = block->bbJumpDest;
8242     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8243
8244     // LEAVE clears the stack, spill side effects, and set stack to 0
8245
8246     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8247     verCurrentState.esStackDepth = 0;
8248
8249     assert(block->bbJumpKind == BBJ_LEAVE);
8250     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8251
8252     BasicBlock* step         = DUMMY_INIT(NULL);
8253     unsigned    encFinallies = 0; // Number of enclosing finallies.
8254     GenTreePtr  endCatches   = NULL;
8255     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8256
8257     unsigned  XTnum;
8258     EHblkDsc* HBtab;
8259
8260     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8261     {
8262         // Grab the handler offsets
8263
8264         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8265         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8266         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8267         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8268
8269         /* Is this a catch-handler we are CEE_LEAVEing out of?
8270          * If so, we need to call CORINFO_HELP_ENDCATCH.
8271          */
8272
8273         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8274         {
8275             // Can't CEE_LEAVE out of a finally/fault handler
8276             if (HBtab->HasFinallyOrFaultHandler())
8277                 BADCODE("leave out of fault/finally block");
8278
8279             // Create the call to CORINFO_HELP_ENDCATCH
8280             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8281
8282             // Make a list of all the currently pending endCatches
8283             if (endCatches)
8284                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8285             else
8286                 endCatches = endCatch;
8287
8288 #ifdef DEBUG
8289             if (verbose)
8290             {
8291                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8292                        "CORINFO_HELP_ENDCATCH\n",
8293                        block->bbNum, XTnum);
8294             }
8295 #endif
8296         }
8297         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8298                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8299         {
8300             /* This is a finally-protected try we are jumping out of */
8301
8302             /* If there are any pending endCatches, and we have already
8303                jumped out of a finally-protected try, then the endCatches
8304                have to be put in a block in an outer try for async
8305                exceptions to work correctly.
8306                Else, just use append to the original block */
8307
8308             BasicBlock* callBlock;
8309
8310             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8311
8312             if (encFinallies == 0)
8313             {
8314                 assert(step == DUMMY_INIT(NULL));
8315                 callBlock             = block;
8316                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8317
8318                 if (endCatches)
8319                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8320
8321 #ifdef DEBUG
8322                 if (verbose)
8323                 {
8324                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8325                            "block BB%02u [%08p]\n",
8326                            callBlock->bbNum, dspPtr(callBlock));
8327                 }
8328 #endif
8329             }
8330             else
8331             {
8332                 assert(step != DUMMY_INIT(NULL));
8333
8334                 /* Calling the finally block */
8335                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8336                 assert(step->bbJumpKind == BBJ_ALWAYS);
8337                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8338                                               // finally in the chain)
8339                 step->bbJumpDest->bbRefs++;
8340
8341                 /* The new block will inherit this block's weight */
8342                 callBlock->setBBWeight(block->bbWeight);
8343                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8344
8345 #ifdef DEBUG
8346                 if (verbose)
8347                 {
8348                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8349                            "[%08p]\n",
8350                            callBlock->bbNum, dspPtr(callBlock));
8351                 }
8352 #endif
8353
8354                 GenTreePtr lastStmt;
8355
8356                 if (endCatches)
8357                 {
8358                     lastStmt         = gtNewStmt(endCatches);
8359                     endLFin->gtNext  = lastStmt;
8360                     lastStmt->gtPrev = endLFin;
8361                 }
8362                 else
8363                 {
8364                     lastStmt = endLFin;
8365                 }
8366
8367                 // note that this sets BBF_IMPORTED on the block
8368                 impEndTreeList(callBlock, endLFin, lastStmt);
8369             }
8370
8371             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8372             /* The new block will inherit this block's weight */
8373             step->setBBWeight(block->bbWeight);
8374             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8375
8376 #ifdef DEBUG
8377             if (verbose)
8378             {
8379                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8380                        "BB%02u [%08p]\n",
8381                        step->bbNum, dspPtr(step));
8382             }
8383 #endif
8384
8385             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8386             assert(finallyNesting <= compHndBBtabCount);
8387
8388             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8389             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8390             endLFin               = gtNewStmt(endLFin);
8391             endCatches            = NULL;
8392
8393             encFinallies++;
8394
8395             invalidatePreds = true;
8396         }
8397     }
8398
8399     /* Append any remaining endCatches, if any */
8400
8401     assert(!encFinallies == !endLFin);
8402
8403     if (encFinallies == 0)
8404     {
8405         assert(step == DUMMY_INIT(NULL));
8406         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8407
8408         if (endCatches)
8409             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8410
8411 #ifdef DEBUG
8412         if (verbose)
8413         {
8414             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8415                    "block BB%02u [%08p]\n",
8416                    block->bbNum, dspPtr(block));
8417         }
8418 #endif
8419     }
8420     else
8421     {
8422         // If leaveTarget is the start of another try block, we want to make sure that
8423         // we do not insert finalStep into that try block. Hence, we find the enclosing
8424         // try block.
8425         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8426
8427         // Insert a new BB either in the try region indicated by tryIndex or
8428         // the handler region indicated by leaveTarget->bbHndIndex,
8429         // depending on which is the inner region.
8430         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8431         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8432         step->bbJumpDest = finalStep;
8433
8434         /* The new block will inherit this block's weight */
8435         finalStep->setBBWeight(block->bbWeight);
8436         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8437
8438 #ifdef DEBUG
8439         if (verbose)
8440         {
8441             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8442                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8443         }
8444 #endif
8445
8446         GenTreePtr lastStmt;
8447
8448         if (endCatches)
8449         {
8450             lastStmt         = gtNewStmt(endCatches);
8451             endLFin->gtNext  = lastStmt;
8452             lastStmt->gtPrev = endLFin;
8453         }
8454         else
8455         {
8456             lastStmt = endLFin;
8457         }
8458
8459         impEndTreeList(finalStep, endLFin, lastStmt);
8460
8461         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8462
8463         // Queue up the jump target for importing
8464
8465         impImportBlockPending(leaveTarget);
8466
8467         invalidatePreds = true;
8468     }
8469
8470     if (invalidatePreds && fgComputePredsDone)
8471     {
8472         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8473         fgRemovePreds();
8474     }
8475
8476 #ifdef DEBUG
8477     fgVerifyHandlerTab();
8478
8479     if (verbose)
8480     {
8481         printf("\nAfter import CEE_LEAVE:\n");
8482         fgDispBasicBlocks();
8483         fgDispHandlerTab();
8484     }
8485 #endif // DEBUG
8486 }
8487
8488 #else // FEATURE_EH_FUNCLETS
8489
8490 void Compiler::impImportLeave(BasicBlock* block)
8491 {
8492 #ifdef DEBUG
8493     if (verbose)
8494     {
8495         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8496         fgDispBasicBlocks();
8497         fgDispHandlerTab();
8498     }
8499 #endif // DEBUG
8500
8501     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8502     unsigned    blkAddr         = block->bbCodeOffs;
8503     BasicBlock* leaveTarget     = block->bbJumpDest;
8504     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8505
8506     // LEAVE clears the stack, spill side effects, and set stack to 0
8507
8508     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8509     verCurrentState.esStackDepth = 0;
8510
8511     assert(block->bbJumpKind == BBJ_LEAVE);
8512     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8513
8514     BasicBlock* step = nullptr;
8515
8516     enum StepType
8517     {
8518         // No step type; step == NULL.
8519         ST_None,
8520
8521         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8522         // That is, is step->bbJumpDest where a finally will return to?
8523         ST_FinallyReturn,
8524
8525         // The step block is a catch return.
8526         ST_Catch,
8527
8528         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8529         ST_Try
8530     };
8531     StepType stepType = ST_None;
8532
8533     unsigned  XTnum;
8534     EHblkDsc* HBtab;
8535
8536     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8537     {
8538         // Grab the handler offsets
8539
8540         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8541         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8542         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8543         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8544
8545         /* Is this a catch-handler we are CEE_LEAVEing out of?
8546          */
8547
8548         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8549         {
8550             // Can't CEE_LEAVE out of a finally/fault handler
8551             if (HBtab->HasFinallyOrFaultHandler())
8552             {
8553                 BADCODE("leave out of fault/finally block");
8554             }
8555
8556             /* We are jumping out of a catch */
8557
8558             if (step == nullptr)
8559             {
8560                 step             = block;
8561                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8562                 stepType         = ST_Catch;
8563
8564 #ifdef DEBUG
8565                 if (verbose)
8566                 {
8567                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8568                            "block\n",
8569                            XTnum, step->bbNum);
8570                 }
8571 #endif
8572             }
8573             else
8574             {
8575                 BasicBlock* exitBlock;
8576
8577                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8578                  * scope */
8579                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8580
8581                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8582                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8583                                               // exit) returns to this block
8584                 step->bbJumpDest->bbRefs++;
8585
8586 #if defined(_TARGET_ARM_)
8587                 if (stepType == ST_FinallyReturn)
8588                 {
8589                     assert(step->bbJumpKind == BBJ_ALWAYS);
8590                     // Mark the target of a finally return
8591                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8592                 }
8593 #endif // defined(_TARGET_ARM_)
8594
8595                 /* The new block will inherit this block's weight */
8596                 exitBlock->setBBWeight(block->bbWeight);
8597                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8598
8599                 /* This exit block is the new step */
8600                 step     = exitBlock;
8601                 stepType = ST_Catch;
8602
8603                 invalidatePreds = true;
8604
8605 #ifdef DEBUG
8606                 if (verbose)
8607                 {
8608                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8609                            exitBlock->bbNum);
8610                 }
8611 #endif
8612             }
8613         }
8614         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8615                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8616         {
8617             /* We are jumping out of a finally-protected try */
8618
8619             BasicBlock* callBlock;
8620
8621             if (step == nullptr)
8622             {
8623 #if FEATURE_EH_CALLFINALLY_THUNKS
8624
8625                 // Put the call to the finally in the enclosing region.
8626                 unsigned callFinallyTryIndex =
8627                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8628                 unsigned callFinallyHndIndex =
8629                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8630                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8631
8632                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8633                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8634                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8635                 // next block, and flow optimizations will remove it.
8636                 block->bbJumpKind = BBJ_ALWAYS;
8637                 block->bbJumpDest = callBlock;
8638                 block->bbJumpDest->bbRefs++;
8639
8640                 /* The new block will inherit this block's weight */
8641                 callBlock->setBBWeight(block->bbWeight);
8642                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8643
8644 #ifdef DEBUG
8645                 if (verbose)
8646                 {
8647                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8648                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8649                            XTnum, block->bbNum, callBlock->bbNum);
8650                 }
8651 #endif
8652
8653 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8654
8655                 callBlock             = block;
8656                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8657
8658 #ifdef DEBUG
8659                 if (verbose)
8660                 {
8661                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8662                            "BBJ_CALLFINALLY block\n",
8663                            XTnum, callBlock->bbNum);
8664                 }
8665 #endif
8666
8667 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8668             }
8669             else
8670             {
8671                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8672                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8673                 // a 'finally'), or the step block is the return from a catch.
8674                 //
8675                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8676                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8677                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8678                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8679                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8680                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8681                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8682                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8683                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8684                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8685                 // stack walks.)
8686
8687                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8688
8689 #if FEATURE_EH_CALLFINALLY_THUNKS
8690                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8691                 {
8692                     // Need to create another step block in the 'try' region that will actually branch to the
8693                     // call-to-finally thunk.
8694                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8695                     step->bbJumpDest  = step2;
8696                     step->bbJumpDest->bbRefs++;
8697                     step2->setBBWeight(block->bbWeight);
8698                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8699
8700 #ifdef DEBUG
8701                     if (verbose)
8702                     {
8703                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8704                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8705                                XTnum, step->bbNum, step2->bbNum);
8706                     }
8707 #endif
8708
8709                     step = step2;
8710                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8711                 }
8712 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8713
8714 #if FEATURE_EH_CALLFINALLY_THUNKS
8715                 unsigned callFinallyTryIndex =
8716                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8717                 unsigned callFinallyHndIndex =
8718                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8719 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8720                 unsigned callFinallyTryIndex = XTnum + 1;
8721                 unsigned callFinallyHndIndex = 0; // don't care
8722 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8723
8724                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8725                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8726                                               // finally in the chain)
8727                 step->bbJumpDest->bbRefs++;
8728
8729 #if defined(_TARGET_ARM_)
8730                 if (stepType == ST_FinallyReturn)
8731                 {
8732                     assert(step->bbJumpKind == BBJ_ALWAYS);
8733                     // Mark the target of a finally return
8734                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8735                 }
8736 #endif // defined(_TARGET_ARM_)
8737
8738                 /* The new block will inherit this block's weight */
8739                 callBlock->setBBWeight(block->bbWeight);
8740                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8741
8742 #ifdef DEBUG
8743                 if (verbose)
8744                 {
8745                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8746                            "BB%02u\n",
8747                            XTnum, callBlock->bbNum);
8748                 }
8749 #endif
8750             }
8751
8752             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8753             stepType = ST_FinallyReturn;
8754
8755             /* The new block will inherit this block's weight */
8756             step->setBBWeight(block->bbWeight);
8757             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8758
8759 #ifdef DEBUG
8760             if (verbose)
8761             {
8762                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8763                        "block BB%02u\n",
8764                        XTnum, step->bbNum);
8765             }
8766 #endif
8767
8768             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8769
8770             invalidatePreds = true;
8771         }
8772         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8773                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8774         {
8775             // We are jumping out of a catch-protected try.
8776             //
8777             // If we are returning from a call to a finally, then we must have a step block within a try
8778             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8779             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8780             // and invoke the appropriate catch.
8781             //
8782             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8783             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8784             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8785             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8786             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8787             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8788             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8789             // For example:
8790             //
8791             // try {
8792             //    try {
8793             //       // something here raises ThreadAbortException
8794             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8795             //    } catch (Exception) {
8796             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8797             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8798             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8799             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8800             //       // need to do this transformation if the current EH block is a try/catch that catches
8801             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8802             //       // information, so currently we do it for all catch types.
8803             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8804             //    }
8805             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8806             // } catch (ThreadAbortException) {
8807             // }
8808             // LABEL_1:
8809             //
8810             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8811             // compiler.
8812
8813             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8814             {
8815                 BasicBlock* catchStep;
8816
8817                 assert(step);
8818
8819                 if (stepType == ST_FinallyReturn)
8820                 {
8821                     assert(step->bbJumpKind == BBJ_ALWAYS);
8822                 }
8823                 else
8824                 {
8825                     assert(stepType == ST_Catch);
8826                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8827                 }
8828
8829                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8830                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8831                 step->bbJumpDest = catchStep;
8832                 step->bbJumpDest->bbRefs++;
8833
8834 #if defined(_TARGET_ARM_)
8835                 if (stepType == ST_FinallyReturn)
8836                 {
8837                     // Mark the target of a finally return
8838                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8839                 }
8840 #endif // defined(_TARGET_ARM_)
8841
8842                 /* The new block will inherit this block's weight */
8843                 catchStep->setBBWeight(block->bbWeight);
8844                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8845
8846 #ifdef DEBUG
8847                 if (verbose)
8848                 {
8849                     if (stepType == ST_FinallyReturn)
8850                     {
8851                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8852                                "BBJ_ALWAYS block BB%02u\n",
8853                                XTnum, catchStep->bbNum);
8854                     }
8855                     else
8856                     {
8857                         assert(stepType == ST_Catch);
8858                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8859                                "BBJ_ALWAYS block BB%02u\n",
8860                                XTnum, catchStep->bbNum);
8861                     }
8862                 }
8863 #endif // DEBUG
8864
8865                 /* This block is the new step */
8866                 step     = catchStep;
8867                 stepType = ST_Try;
8868
8869                 invalidatePreds = true;
8870             }
8871         }
8872     }
8873
8874     if (step == nullptr)
8875     {
8876         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8877
8878 #ifdef DEBUG
8879         if (verbose)
8880         {
8881             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8882                    "block BB%02u to BBJ_ALWAYS\n",
8883                    block->bbNum);
8884         }
8885 #endif
8886     }
8887     else
8888     {
8889         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8890
8891 #if defined(_TARGET_ARM_)
8892         if (stepType == ST_FinallyReturn)
8893         {
8894             assert(step->bbJumpKind == BBJ_ALWAYS);
8895             // Mark the target of a finally return
8896             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8897         }
8898 #endif // defined(_TARGET_ARM_)
8899
8900 #ifdef DEBUG
8901         if (verbose)
8902         {
8903             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8904         }
8905 #endif
8906
8907         // Queue up the jump target for importing
8908
8909         impImportBlockPending(leaveTarget);
8910     }
8911
8912     if (invalidatePreds && fgComputePredsDone)
8913     {
8914         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8915         fgRemovePreds();
8916     }
8917
8918 #ifdef DEBUG
8919     fgVerifyHandlerTab();
8920
8921     if (verbose)
8922     {
8923         printf("\nAfter import CEE_LEAVE:\n");
8924         fgDispBasicBlocks();
8925         fgDispHandlerTab();
8926     }
8927 #endif // DEBUG
8928 }
8929
8930 #endif // FEATURE_EH_FUNCLETS
8931
8932 /*****************************************************************************/
8933 // This is called when reimporting a leave block. It resets the JumpKind,
8934 // JumpDest, and bbNext to the original values
8935
8936 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8937 {
8938 #if FEATURE_EH_FUNCLETS
8939     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8940     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8941     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8942     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8943     // only predecessor are also considered orphans and attempted to be deleted.
8944     //
8945     //  try  {
8946     //     ....
8947     //     try
8948     //     {
8949     //         ....
8950     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8951     //     } finally { }
8952     //  } finally { }
8953     //  OUTSIDE:
8954     //
8955     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8956     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8957     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8958     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8959     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8960     // will be treated as pair and handled correctly.
8961     if (block->bbJumpKind == BBJ_CALLFINALLY)
8962     {
8963         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8964         dupBlock->bbFlags    = block->bbFlags;
8965         dupBlock->bbJumpDest = block->bbJumpDest;
8966         dupBlock->copyEHRegion(block);
8967         dupBlock->bbCatchTyp = block->bbCatchTyp;
8968
8969         // Mark this block as
8970         //  a) not referenced by any other block to make sure that it gets deleted
8971         //  b) weight zero
8972         //  c) prevent from being imported
8973         //  d) as internal
8974         //  e) as rarely run
8975         dupBlock->bbRefs   = 0;
8976         dupBlock->bbWeight = 0;
8977         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8978
8979         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8980         // will be next to each other.
8981         fgInsertBBafter(block, dupBlock);
8982
8983 #ifdef DEBUG
8984         if (verbose)
8985         {
8986             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8987         }
8988 #endif
8989     }
8990 #endif // FEATURE_EH_FUNCLETS
8991
8992     block->bbJumpKind = BBJ_LEAVE;
8993     fgInitBBLookup();
8994     block->bbJumpDest = fgLookupBB(jmpAddr);
8995
8996     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8997     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8998     // reason we don't want to remove the block at this point is that if we call
8999     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9000     // added and the linked list length will be different than fgBBcount.
9001 }
9002
9003 /*****************************************************************************/
9004 // Get the first non-prefix opcode. Used for verification of valid combinations
9005 // of prefixes and actual opcodes.
9006
9007 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9008 {
9009     while (codeAddr < codeEndp)
9010     {
9011         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9012         codeAddr += sizeof(__int8);
9013
9014         if (opcode == CEE_PREFIX1)
9015         {
9016             if (codeAddr >= codeEndp)
9017             {
9018                 break;
9019             }
9020             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9021             codeAddr += sizeof(__int8);
9022         }
9023
9024         switch (opcode)
9025         {
9026             case CEE_UNALIGNED:
9027             case CEE_VOLATILE:
9028             case CEE_TAILCALL:
9029             case CEE_CONSTRAINED:
9030             case CEE_READONLY:
9031                 break;
9032             default:
9033                 return opcode;
9034         }
9035
9036         codeAddr += opcodeSizes[opcode];
9037     }
9038
9039     return CEE_ILLEGAL;
9040 }
9041
9042 /*****************************************************************************/
9043 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9044
9045 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9046 {
9047     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9048
9049     if (!(
9050             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9051             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9052             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9053             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9054             // volatile. prefix is allowed with the ldsfld and stsfld
9055             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9056     {
9057         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9058     }
9059 }
9060
9061 /*****************************************************************************/
9062
9063 #ifdef DEBUG
9064
9065 #undef RETURN // undef contracts RETURN macro
9066
9067 enum controlFlow_t
9068 {
9069     NEXT,
9070     CALL,
9071     RETURN,
9072     THROW,
9073     BRANCH,
9074     COND_BRANCH,
9075     BREAK,
9076     PHI,
9077     META,
9078 };
9079
9080 const static controlFlow_t controlFlow[] = {
9081 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9082 #include "opcode.def"
9083 #undef OPDEF
9084 };
9085
9086 #endif // DEBUG
9087
9088 /*****************************************************************************
9089  *  Determine the result type of an arithemetic operation
9090  *  On 64-bit inserts upcasts when native int is mixed with int32
9091  */
9092 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9093 {
9094     var_types  type = TYP_UNDEF;
9095     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9096
9097     // Arithemetic operations are generally only allowed with
9098     // primitive types, but certain operations are allowed
9099     // with byrefs
9100
9101     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9102     {
9103         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9104         {
9105             // byref1-byref2 => gives a native int
9106             type = TYP_I_IMPL;
9107         }
9108         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9109         {
9110             // [native] int - byref => gives a native int
9111
9112             //
9113             // The reason is that it is possible, in managed C++,
9114             // to have a tree like this:
9115             //
9116             //              -
9117             //             / \
9118             //            /   \
9119             //           /     \
9120             //          /       \
9121             // const(h) int     addr byref
9122             //
9123             // <BUGNUM> VSW 318822 </BUGNUM>
9124             //
9125             // So here we decide to make the resulting type to be a native int.
9126             CLANG_FORMAT_COMMENT_ANCHOR;
9127
9128 #ifdef _TARGET_64BIT_
9129             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9130             {
9131                 // insert an explicit upcast
9132                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9133             }
9134 #endif // _TARGET_64BIT_
9135
9136             type = TYP_I_IMPL;
9137         }
9138         else
9139         {
9140             // byref - [native] int => gives a byref
9141             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9142
9143 #ifdef _TARGET_64BIT_
9144             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9145             {
9146                 // insert an explicit upcast
9147                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9148             }
9149 #endif // _TARGET_64BIT_
9150
9151             type = TYP_BYREF;
9152         }
9153     }
9154     else if ((oper == GT_ADD) &&
9155              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9156     {
9157         // byref + [native] int => gives a byref
9158         // (or)
9159         // [native] int + byref => gives a byref
9160
9161         // only one can be a byref : byref op byref not allowed
9162         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9163         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9164
9165 #ifdef _TARGET_64BIT_
9166         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9167         {
9168             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9169             {
9170                 // insert an explicit upcast
9171                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9172             }
9173         }
9174         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9175         {
9176             // insert an explicit upcast
9177             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9178         }
9179 #endif // _TARGET_64BIT_
9180
9181         type = TYP_BYREF;
9182     }
9183 #ifdef _TARGET_64BIT_
9184     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9185     {
9186         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9187
9188         // int + long => gives long
9189         // long + int => gives long
9190         // we get this because in the IL the long isn't Int64, it's just IntPtr
9191
9192         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9193         {
9194             // insert an explicit upcast
9195             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9196         }
9197         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9198         {
9199             // insert an explicit upcast
9200             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9201         }
9202
9203         type = TYP_I_IMPL;
9204     }
9205 #else  // 32-bit TARGET
9206     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9207     {
9208         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9209
9210         // int + long => gives long
9211         // long + int => gives long
9212
9213         type = TYP_LONG;
9214     }
9215 #endif // _TARGET_64BIT_
9216     else
9217     {
9218         // int + int => gives an int
9219         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9220
9221         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9222                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9223
9224         type = genActualType(op1->gtType);
9225
9226 #if FEATURE_X87_DOUBLES
9227
9228         // For x87, since we only have 1 size of registers, prefer double
9229         // For everybody else, be more precise
9230         if (type == TYP_FLOAT)
9231             type = TYP_DOUBLE;
9232
9233 #else // !FEATURE_X87_DOUBLES
9234
9235         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9236         // Otherwise, turn floats into doubles
9237         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9238         {
9239             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9240             type = TYP_DOUBLE;
9241         }
9242
9243 #endif // FEATURE_X87_DOUBLES
9244     }
9245
9246 #if FEATURE_X87_DOUBLES
9247     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9248 #else  // FEATURE_X87_DOUBLES
9249     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9250 #endif // FEATURE_X87_DOUBLES
9251
9252     return type;
9253 }
9254
9255 /*****************************************************************************
9256  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9257  *
9258  * typeRef contains the token, op1 to contain the value being cast,
9259  * and op2 to contain code that creates the type handle corresponding to typeRef
9260  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9261  */
9262 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9263                                                 GenTreePtr              op2,
9264                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9265                                                 bool                    isCastClass)
9266 {
9267     bool expandInline;
9268
9269     assert(op1->TypeGet() == TYP_REF);
9270
9271     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9272
9273     if (isCastClass)
9274     {
9275         // We only want to expand inline the normal CHKCASTCLASS helper;
9276         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9277     }
9278     else
9279     {
9280         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9281         {
9282             // Get the Class Handle abd class attributes for the type we are casting to
9283             //
9284             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9285
9286             //
9287             // If the class handle is marked as final we can also expand the IsInst check inline
9288             //
9289             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9290
9291             //
9292             // But don't expand inline these two cases
9293             //
9294             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9295             {
9296                 expandInline = false;
9297             }
9298             else if (flags & CORINFO_FLG_CONTEXTFUL)
9299             {
9300                 expandInline = false;
9301             }
9302         }
9303         else
9304         {
9305             //
9306             // We can't expand inline any other helpers
9307             //
9308             expandInline = false;
9309         }
9310     }
9311
9312     if (expandInline)
9313     {
9314         if (compCurBB->isRunRarely())
9315         {
9316             expandInline = false; // not worth the code expansion in a rarely run block
9317         }
9318
9319         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9320         {
9321             expandInline = false; // not worth creating an untracked local variable
9322         }
9323     }
9324
9325     if (!expandInline)
9326     {
9327         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9328         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9329         //
9330         op2->gtFlags |= GTF_DONT_CSE;
9331
9332         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9333     }
9334
9335     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9336
9337     GenTreePtr temp;
9338     GenTreePtr condMT;
9339     //
9340     // expand the methodtable match:
9341     //
9342     //  condMT ==>   GT_NE
9343     //               /    \
9344     //           GT_IND   op2 (typically CNS_INT)
9345     //              |
9346     //           op1Copy
9347     //
9348
9349     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9350     //
9351     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9352     //
9353     // op1 is now known to be a non-complex tree
9354     // thus we can use gtClone(op1) from now on
9355     //
9356
9357     GenTreePtr op2Var = op2;
9358     if (isCastClass)
9359     {
9360         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9361         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9362     }
9363     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9364     temp->gtFlags |= GTF_EXCEPT;
9365     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9366
9367     GenTreePtr condNull;
9368     //
9369     // expand the null check:
9370     //
9371     //  condNull ==>   GT_EQ
9372     //                 /    \
9373     //             op1Copy CNS_INT
9374     //                      null
9375     //
9376     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9377
9378     //
9379     // expand the true and false trees for the condMT
9380     //
9381     GenTreePtr condFalse = gtClone(op1);
9382     GenTreePtr condTrue;
9383     if (isCastClass)
9384     {
9385         //
9386         // use the special helper that skips the cases checked by our inlined cast
9387         //
9388         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9389
9390         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9391     }
9392     else
9393     {
9394         condTrue = gtNewIconNode(0, TYP_REF);
9395     }
9396
9397 #define USE_QMARK_TREES
9398
9399 #ifdef USE_QMARK_TREES
9400     GenTreePtr qmarkMT;
9401     //
9402     // Generate first QMARK - COLON tree
9403     //
9404     //  qmarkMT ==>   GT_QMARK
9405     //                 /     \
9406     //            condMT   GT_COLON
9407     //                      /     \
9408     //                condFalse  condTrue
9409     //
9410     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9411     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9412     condMT->gtFlags |= GTF_RELOP_QMARK;
9413
9414     GenTreePtr qmarkNull;
9415     //
9416     // Generate second QMARK - COLON tree
9417     //
9418     //  qmarkNull ==>  GT_QMARK
9419     //                 /     \
9420     //           condNull  GT_COLON
9421     //                      /     \
9422     //                qmarkMT   op1Copy
9423     //
9424     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9425     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9426     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9427     condNull->gtFlags |= GTF_RELOP_QMARK;
9428
9429     // Make QMark node a top level node by spilling it.
9430     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9431     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9432     return gtNewLclvNode(tmp, TYP_REF);
9433 #endif
9434 }
9435
9436 #ifndef DEBUG
9437 #define assertImp(cond) ((void)0)
9438 #else
9439 #define assertImp(cond)                                                                                                \
9440     do                                                                                                                 \
9441     {                                                                                                                  \
9442         if (!(cond))                                                                                                   \
9443         {                                                                                                              \
9444             const int cchAssertImpBuf = 600;                                                                           \
9445             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9446             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9447                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9448                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9449                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9450             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9451         }                                                                                                              \
9452     } while (0)
9453 #endif // DEBUG
9454
9455 #ifdef _PREFAST_
9456 #pragma warning(push)
9457 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9458 #endif
9459 /*****************************************************************************
9460  *  Import the instr for the given basic block
9461  */
9462 void Compiler::impImportBlockCode(BasicBlock* block)
9463 {
9464 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9465
9466 #ifdef DEBUG
9467
9468     if (verbose)
9469     {
9470         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9471     }
9472 #endif
9473
9474     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9475     IL_OFFSET nxtStmtOffs;
9476
9477     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9478     bool                         expandInline;
9479     CorInfoHelpFunc              helper;
9480     CorInfoIsAccessAllowedResult accessAllowedResult;
9481     CORINFO_HELPER_DESC          calloutHelper;
9482     const BYTE*                  lastLoadToken = nullptr;
9483
9484     // reject cyclic constraints
9485     if (tiVerificationNeeded)
9486     {
9487         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9488         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9489     }
9490
9491     /* Get the tree list started */
9492
9493     impBeginTreeList();
9494
9495     /* Walk the opcodes that comprise the basic block */
9496
9497     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9498     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9499
9500     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9501     IL_OFFSET lastSpillOffs = opcodeOffs;
9502
9503     signed jmpDist;
9504
9505     /* remember the start of the delegate creation sequence (used for verification) */
9506     const BYTE* delegateCreateStart = nullptr;
9507
9508     int  prefixFlags = 0;
9509     bool explicitTailCall, constraintCall, readonlyCall;
9510
9511     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9512     typeInfo tiRetVal;
9513
9514     unsigned numArgs = info.compArgsCount;
9515
9516     /* Now process all the opcodes in the block */
9517
9518     var_types callTyp    = TYP_COUNT;
9519     OPCODE    prevOpcode = CEE_ILLEGAL;
9520
9521     if (block->bbCatchTyp)
9522     {
9523         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9524         {
9525             impCurStmtOffsSet(block->bbCodeOffs);
9526         }
9527
9528         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9529         // to a temp. This is a trade off for code simplicity
9530         impSpillSpecialSideEff();
9531     }
9532
9533     while (codeAddr < codeEndp)
9534     {
9535         bool                   usingReadyToRunHelper = false;
9536         CORINFO_RESOLVED_TOKEN resolvedToken;
9537         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9538         CORINFO_CALL_INFO      callInfo;
9539         CORINFO_FIELD_INFO     fieldInfo;
9540
9541         tiRetVal = typeInfo(); // Default type info
9542
9543         //---------------------------------------------------------------------
9544
9545         /* We need to restrict the max tree depth as many of the Compiler
9546            functions are recursive. We do this by spilling the stack */
9547
9548         if (verCurrentState.esStackDepth)
9549         {
9550             /* Has it been a while since we last saw a non-empty stack (which
9551                guarantees that the tree depth isnt accumulating. */
9552
9553             if ((opcodeOffs - lastSpillOffs) > 200)
9554             {
9555                 impSpillStackEnsure();
9556                 lastSpillOffs = opcodeOffs;
9557             }
9558         }
9559         else
9560         {
9561             lastSpillOffs   = opcodeOffs;
9562             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9563         }
9564
9565         /* Compute the current instr offset */
9566
9567         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9568
9569 #ifndef DEBUG
9570         if (opts.compDbgInfo)
9571 #endif
9572         {
9573             if (!compIsForInlining())
9574             {
9575                 nxtStmtOffs =
9576                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9577
9578                 /* Have we reached the next stmt boundary ? */
9579
9580                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9581                 {
9582                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9583
9584                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9585                     {
9586                         /* We need to provide accurate IP-mapping at this point.
9587                            So spill anything on the stack so that it will form
9588                            gtStmts with the correct stmt offset noted */
9589
9590                         impSpillStackEnsure(true);
9591                     }
9592
9593                     // Has impCurStmtOffs been reported in any tree?
9594
9595                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9596                     {
9597                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9598                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9599
9600                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9601                     }
9602
9603                     if (impCurStmtOffs == BAD_IL_OFFSET)
9604                     {
9605                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9606                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9607
9608                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9609                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9610                         {
9611                             nxtStmtIndex++;
9612                         }
9613
9614                         /* Go to the new stmt */
9615
9616                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9617
9618                         /* Update the stmt boundary index */
9619
9620                         nxtStmtIndex++;
9621                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9622
9623                         /* Are there any more line# entries after this one? */
9624
9625                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9626                         {
9627                             /* Remember where the next line# starts */
9628
9629                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9630                         }
9631                         else
9632                         {
9633                             /* No more line# entries */
9634
9635                             nxtStmtOffs = BAD_IL_OFFSET;
9636                         }
9637                     }
9638                 }
9639                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9640                          (verCurrentState.esStackDepth == 0))
9641                 {
9642                     /* At stack-empty locations, we have already added the tree to
9643                        the stmt list with the last offset. We just need to update
9644                        impCurStmtOffs
9645                      */
9646
9647                     impCurStmtOffsSet(opcodeOffs);
9648                 }
9649                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9650                          impOpcodeIsCallSiteBoundary(prevOpcode))
9651                 {
9652                     /* Make sure we have a type cached */
9653                     assert(callTyp != TYP_COUNT);
9654
9655                     if (callTyp == TYP_VOID)
9656                     {
9657                         impCurStmtOffsSet(opcodeOffs);
9658                     }
9659                     else if (opts.compDbgCode)
9660                     {
9661                         impSpillStackEnsure(true);
9662                         impCurStmtOffsSet(opcodeOffs);
9663                     }
9664                 }
9665                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9666                 {
9667                     if (opts.compDbgCode)
9668                     {
9669                         impSpillStackEnsure(true);
9670                     }
9671
9672                     impCurStmtOffsSet(opcodeOffs);
9673                 }
9674
9675                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9676                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9677             }
9678         }
9679
9680         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9681         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9682         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9683
9684         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9685         GenTreePtr      op1           = DUMMY_INIT(NULL);
9686         GenTreePtr      op2           = DUMMY_INIT(NULL);
9687         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9688         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9689         bool            uns           = DUMMY_INIT(false);
9690
9691         /* Get the next opcode and the size of its parameters */
9692
9693         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9694         codeAddr += sizeof(__int8);
9695
9696 #ifdef DEBUG
9697         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9698         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9699 #endif
9700
9701     DECODE_OPCODE:
9702
9703         // Return if any previous code has caused inline to fail.
9704         if (compDonotInline())
9705         {
9706             return;
9707         }
9708
9709         /* Get the size of additional parameters */
9710
9711         signed int sz = opcodeSizes[opcode];
9712
9713 #ifdef DEBUG
9714         clsHnd  = NO_CLASS_HANDLE;
9715         lclTyp  = TYP_COUNT;
9716         callTyp = TYP_COUNT;
9717
9718         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9719         impCurOpcName = opcodeNames[opcode];
9720
9721         if (verbose && (opcode != CEE_PREFIX1))
9722         {
9723             printf("%s", impCurOpcName);
9724         }
9725
9726         /* Use assertImp() to display the opcode */
9727
9728         op1 = op2 = nullptr;
9729 #endif
9730
9731         /* See what kind of an opcode we have, then */
9732
9733         unsigned mflags   = 0;
9734         unsigned clsFlags = 0;
9735
9736         switch (opcode)
9737         {
9738             unsigned  lclNum;
9739             var_types type;
9740
9741             GenTreePtr op3;
9742             genTreeOps oper;
9743             unsigned   size;
9744
9745             int val;
9746
9747             CORINFO_SIG_INFO     sig;
9748             unsigned             flags;
9749             IL_OFFSET            jmpAddr;
9750             bool                 ovfl, unordered, callNode;
9751             bool                 ldstruct;
9752             CORINFO_CLASS_HANDLE tokenType;
9753
9754             union {
9755                 int     intVal;
9756                 float   fltVal;
9757                 __int64 lngVal;
9758                 double  dblVal;
9759             } cval;
9760
9761             case CEE_PREFIX1:
9762                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9763                 codeAddr += sizeof(__int8);
9764                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9765                 goto DECODE_OPCODE;
9766
9767             SPILL_APPEND:
9768
9769                 // We need to call impSpillLclRefs() for a struct type lclVar.
9770                 // This is done for non-block assignments in the handling of stloc.
9771                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9772                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9773                 {
9774                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9775                 }
9776
9777                 /* Append 'op1' to the list of statements */
9778                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9779                 goto DONE_APPEND;
9780
9781             APPEND:
9782
9783                 /* Append 'op1' to the list of statements */
9784
9785                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9786                 goto DONE_APPEND;
9787
9788             DONE_APPEND:
9789
9790 #ifdef DEBUG
9791                 // Remember at which BC offset the tree was finished
9792                 impNoteLastILoffs();
9793 #endif
9794                 break;
9795
9796             case CEE_LDNULL:
9797                 impPushNullObjRefOnStack();
9798                 break;
9799
9800             case CEE_LDC_I4_M1:
9801             case CEE_LDC_I4_0:
9802             case CEE_LDC_I4_1:
9803             case CEE_LDC_I4_2:
9804             case CEE_LDC_I4_3:
9805             case CEE_LDC_I4_4:
9806             case CEE_LDC_I4_5:
9807             case CEE_LDC_I4_6:
9808             case CEE_LDC_I4_7:
9809             case CEE_LDC_I4_8:
9810                 cval.intVal = (opcode - CEE_LDC_I4_0);
9811                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9812                 goto PUSH_I4CON;
9813
9814             case CEE_LDC_I4_S:
9815                 cval.intVal = getI1LittleEndian(codeAddr);
9816                 goto PUSH_I4CON;
9817             case CEE_LDC_I4:
9818                 cval.intVal = getI4LittleEndian(codeAddr);
9819                 goto PUSH_I4CON;
9820             PUSH_I4CON:
9821                 JITDUMP(" %d", cval.intVal);
9822                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9823                 break;
9824
9825             case CEE_LDC_I8:
9826                 cval.lngVal = getI8LittleEndian(codeAddr);
9827                 JITDUMP(" 0x%016llx", cval.lngVal);
9828                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9829                 break;
9830
9831             case CEE_LDC_R8:
9832                 cval.dblVal = getR8LittleEndian(codeAddr);
9833                 JITDUMP(" %#.17g", cval.dblVal);
9834                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9835                 break;
9836
9837             case CEE_LDC_R4:
9838                 cval.dblVal = getR4LittleEndian(codeAddr);
9839                 JITDUMP(" %#.17g", cval.dblVal);
9840                 {
9841                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9842 #if !FEATURE_X87_DOUBLES
9843                     // X87 stack doesn't differentiate between float/double
9844                     // so R4 is treated as R8, but everybody else does
9845                     cnsOp->gtType = TYP_FLOAT;
9846 #endif // FEATURE_X87_DOUBLES
9847                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9848                 }
9849                 break;
9850
9851             case CEE_LDSTR:
9852
9853                 if (compIsForInlining())
9854                 {
9855                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9856                     {
9857                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9858                         return;
9859                     }
9860                 }
9861
9862                 val = getU4LittleEndian(codeAddr);
9863                 JITDUMP(" %08X", val);
9864                 if (tiVerificationNeeded)
9865                 {
9866                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9867                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9868                 }
9869                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9870
9871                 break;
9872
9873             case CEE_LDARG:
9874                 lclNum = getU2LittleEndian(codeAddr);
9875                 JITDUMP(" %u", lclNum);
9876                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9877                 break;
9878
9879             case CEE_LDARG_S:
9880                 lclNum = getU1LittleEndian(codeAddr);
9881                 JITDUMP(" %u", lclNum);
9882                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9883                 break;
9884
9885             case CEE_LDARG_0:
9886             case CEE_LDARG_1:
9887             case CEE_LDARG_2:
9888             case CEE_LDARG_3:
9889                 lclNum = (opcode - CEE_LDARG_0);
9890                 assert(lclNum >= 0 && lclNum < 4);
9891                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9892                 break;
9893
9894             case CEE_LDLOC:
9895                 lclNum = getU2LittleEndian(codeAddr);
9896                 JITDUMP(" %u", lclNum);
9897                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9898                 break;
9899
9900             case CEE_LDLOC_S:
9901                 lclNum = getU1LittleEndian(codeAddr);
9902                 JITDUMP(" %u", lclNum);
9903                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9904                 break;
9905
9906             case CEE_LDLOC_0:
9907             case CEE_LDLOC_1:
9908             case CEE_LDLOC_2:
9909             case CEE_LDLOC_3:
9910                 lclNum = (opcode - CEE_LDLOC_0);
9911                 assert(lclNum >= 0 && lclNum < 4);
9912                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9913                 break;
9914
9915             case CEE_STARG:
9916                 lclNum = getU2LittleEndian(codeAddr);
9917                 goto STARG;
9918
9919             case CEE_STARG_S:
9920                 lclNum = getU1LittleEndian(codeAddr);
9921             STARG:
9922                 JITDUMP(" %u", lclNum);
9923
9924                 if (tiVerificationNeeded)
9925                 {
9926                     Verify(lclNum < info.compILargsCount, "bad arg num");
9927                 }
9928
9929                 if (compIsForInlining())
9930                 {
9931                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9932                     noway_assert(op1->gtOper == GT_LCL_VAR);
9933                     lclNum = op1->AsLclVar()->gtLclNum;
9934
9935                     goto VAR_ST_VALID;
9936                 }
9937
9938                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9939                 assertImp(lclNum < numArgs);
9940
9941                 if (lclNum == info.compThisArg)
9942                 {
9943                     lclNum = lvaArg0Var;
9944                 }
9945                 lvaTable[lclNum].lvArgWrite = 1;
9946
9947                 if (tiVerificationNeeded)
9948                 {
9949                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9950                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9951                            "type mismatch");
9952
9953                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9954                     {
9955                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9956                     }
9957                 }
9958
9959                 goto VAR_ST;
9960
9961             case CEE_STLOC:
9962                 lclNum = getU2LittleEndian(codeAddr);
9963                 JITDUMP(" %u", lclNum);
9964                 goto LOC_ST;
9965
9966             case CEE_STLOC_S:
9967                 lclNum = getU1LittleEndian(codeAddr);
9968                 JITDUMP(" %u", lclNum);
9969                 goto LOC_ST;
9970
9971             case CEE_STLOC_0:
9972             case CEE_STLOC_1:
9973             case CEE_STLOC_2:
9974             case CEE_STLOC_3:
9975                 lclNum = (opcode - CEE_STLOC_0);
9976                 assert(lclNum >= 0 && lclNum < 4);
9977
9978             LOC_ST:
9979                 if (tiVerificationNeeded)
9980                 {
9981                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9982                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9983                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9984                            "type mismatch");
9985                 }
9986
9987                 if (compIsForInlining())
9988                 {
9989                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9990
9991                     /* Have we allocated a temp for this local? */
9992
9993                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9994
9995                     goto _PopValue;
9996                 }
9997
9998                 lclNum += numArgs;
9999
10000             VAR_ST:
10001
10002                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10003                 {
10004                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10005                     BADCODE("Bad IL");
10006                 }
10007
10008             VAR_ST_VALID:
10009
10010                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10011                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10012
10013                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10014                 {
10015                     lclTyp = lvaGetRealType(lclNum);
10016                 }
10017                 else
10018                 {
10019                     lclTyp = lvaGetActualType(lclNum);
10020                 }
10021
10022             _PopValue:
10023                 /* Pop the value being assigned */
10024
10025                 {
10026                     StackEntry se = impPopStack(clsHnd);
10027                     op1           = se.val;
10028                     tiRetVal      = se.seTypeInfo;
10029                 }
10030
10031 #ifdef FEATURE_SIMD
10032                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10033                 {
10034                     assert(op1->TypeGet() == TYP_STRUCT);
10035                     op1->gtType = lclTyp;
10036                 }
10037 #endif // FEATURE_SIMD
10038
10039                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10040
10041 #ifdef _TARGET_64BIT_
10042                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10043                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10044                 {
10045                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10046                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10047                 }
10048 #endif // _TARGET_64BIT_
10049
10050                 // We had better assign it a value of the correct type
10051                 assertImp(
10052                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10053                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10054                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10055                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10056                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10057                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10058
10059                 /* If op1 is "&var" then its type is the transient "*" and it can
10060                    be used either as TYP_BYREF or TYP_I_IMPL */
10061
10062                 if (op1->IsVarAddr())
10063                 {
10064                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10065
10066                     /* When "&var" is created, we assume it is a byref. If it is
10067                        being assigned to a TYP_I_IMPL var, change the type to
10068                        prevent unnecessary GC info */
10069
10070                     if (genActualType(lclTyp) == TYP_I_IMPL)
10071                     {
10072                         op1->gtType = TYP_I_IMPL;
10073                     }
10074                 }
10075
10076                 /* Filter out simple assignments to itself */
10077
10078                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10079                 {
10080                     if (insertLdloc)
10081                     {
10082                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
10083                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
10084                         CLANG_FORMAT_COMMENT_ANCHOR;
10085
10086 #ifdef DEBUG
10087                         if (tiVerificationNeeded)
10088                         {
10089                             assert(
10090                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10091                         }
10092 #endif
10093
10094                         op1         = nullptr;
10095                         insertLdloc = false;
10096
10097                         impLoadVar(lclNum, opcodeOffs + sz + 1);
10098                         break;
10099                     }
10100                     else if (opts.compDbgCode)
10101                     {
10102                         op1 = gtNewNothingNode();
10103                         goto SPILL_APPEND;
10104                     }
10105                     else
10106                     {
10107                         break;
10108                     }
10109                 }
10110
10111                 /* Create the assignment node */
10112
10113                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10114
10115                 /* If the local is aliased, we need to spill calls and
10116                    indirections from the stack. */
10117
10118                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10119                     verCurrentState.esStackDepth > 0)
10120                 {
10121                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10122                 }
10123
10124                 /* Spill any refs to the local from the stack */
10125
10126                 impSpillLclRefs(lclNum);
10127
10128 #if !FEATURE_X87_DOUBLES
10129                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10130                 // We insert a cast to the dest 'op2' type
10131                 //
10132                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10133                     varTypeIsFloating(op2->gtType))
10134                 {
10135                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10136                 }
10137 #endif // !FEATURE_X87_DOUBLES
10138
10139                 if (varTypeIsStruct(lclTyp))
10140                 {
10141                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10142                 }
10143                 else
10144                 {
10145                     // The code generator generates GC tracking information
10146                     // based on the RHS of the assignment.  Later the LHS (which is
10147                     // is a BYREF) gets used and the emitter checks that that variable
10148                     // is being tracked.  It is not (since the RHS was an int and did
10149                     // not need tracking).  To keep this assert happy, we change the RHS
10150                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10151                     {
10152                         op1->gtType = TYP_BYREF;
10153                     }
10154                     op1 = gtNewAssignNode(op2, op1);
10155                 }
10156
10157                 /* If insertLdloc is true, then we need to insert a ldloc following the
10158                    stloc.  This is done when converting a (dup, stloc) sequence into
10159                    a (stloc, ldloc) sequence. */
10160
10161                 if (insertLdloc)
10162                 {
10163                     // From SPILL_APPEND
10164                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10165
10166 #ifdef DEBUG
10167                     // From DONE_APPEND
10168                     impNoteLastILoffs();
10169 #endif
10170                     op1         = nullptr;
10171                     insertLdloc = false;
10172
10173                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10174                     break;
10175                 }
10176
10177                 goto SPILL_APPEND;
10178
10179             case CEE_LDLOCA:
10180                 lclNum = getU2LittleEndian(codeAddr);
10181                 goto LDLOCA;
10182
10183             case CEE_LDLOCA_S:
10184                 lclNum = getU1LittleEndian(codeAddr);
10185             LDLOCA:
10186                 JITDUMP(" %u", lclNum);
10187                 if (tiVerificationNeeded)
10188                 {
10189                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10190                     Verify(info.compInitMem, "initLocals not set");
10191                 }
10192
10193                 if (compIsForInlining())
10194                 {
10195                     // Get the local type
10196                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10197
10198                     /* Have we allocated a temp for this local? */
10199
10200                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10201
10202                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10203
10204                     goto _PUSH_ADRVAR;
10205                 }
10206
10207                 lclNum += numArgs;
10208                 assertImp(lclNum < info.compLocalsCount);
10209                 goto ADRVAR;
10210
10211             case CEE_LDARGA:
10212                 lclNum = getU2LittleEndian(codeAddr);
10213                 goto LDARGA;
10214
10215             case CEE_LDARGA_S:
10216                 lclNum = getU1LittleEndian(codeAddr);
10217             LDARGA:
10218                 JITDUMP(" %u", lclNum);
10219                 Verify(lclNum < info.compILargsCount, "bad arg num");
10220
10221                 if (compIsForInlining())
10222                 {
10223                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10224                     // followed by a ldfld to load the field.
10225
10226                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10227                     if (op1->gtOper != GT_LCL_VAR)
10228                     {
10229                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10230                         return;
10231                     }
10232
10233                     assert(op1->gtOper == GT_LCL_VAR);
10234
10235                     goto _PUSH_ADRVAR;
10236                 }
10237
10238                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10239                 assertImp(lclNum < numArgs);
10240
10241                 if (lclNum == info.compThisArg)
10242                 {
10243                     lclNum = lvaArg0Var;
10244                 }
10245
10246                 goto ADRVAR;
10247
10248             ADRVAR:
10249
10250                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10251
10252             _PUSH_ADRVAR:
10253                 assert(op1->gtOper == GT_LCL_VAR);
10254
10255                 /* Note that this is supposed to create the transient type "*"
10256                    which may be used as a TYP_I_IMPL. However we catch places
10257                    where it is used as a TYP_I_IMPL and change the node if needed.
10258                    Thus we are pessimistic and may report byrefs in the GC info
10259                    where it was not absolutely needed, but it is safer this way.
10260                  */
10261                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10262
10263                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10264                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10265
10266                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10267                 if (tiVerificationNeeded)
10268                 {
10269                     // Don't allow taking address of uninit this ptr.
10270                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10271                     {
10272                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10273                     }
10274
10275                     if (!tiRetVal.IsByRef())
10276                     {
10277                         tiRetVal.MakeByRef();
10278                     }
10279                     else
10280                     {
10281                         Verify(false, "byref to byref");
10282                     }
10283                 }
10284
10285                 impPushOnStack(op1, tiRetVal);
10286                 break;
10287
10288             case CEE_ARGLIST:
10289
10290                 if (!info.compIsVarArgs)
10291                 {
10292                     BADCODE("arglist in non-vararg method");
10293                 }
10294
10295                 if (tiVerificationNeeded)
10296                 {
10297                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10298                 }
10299                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10300
10301                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10302                    adjusted the arg count cos this is like fetching the last param */
10303                 assertImp(0 < numArgs);
10304                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10305                 lclNum = lvaVarargsHandleArg;
10306                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10307                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10308                 impPushOnStack(op1, tiRetVal);
10309                 break;
10310
10311             case CEE_ENDFINALLY:
10312
10313                 if (compIsForInlining())
10314                 {
10315                     assert(!"Shouldn't have exception handlers in the inliner!");
10316                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10317                     return;
10318                 }
10319
10320                 if (verCurrentState.esStackDepth > 0)
10321                 {
10322                     impEvalSideEffects();
10323                 }
10324
10325                 if (info.compXcptnsCount == 0)
10326                 {
10327                     BADCODE("endfinally outside finally");
10328                 }
10329
10330                 assert(verCurrentState.esStackDepth == 0);
10331
10332                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10333                 goto APPEND;
10334
10335             case CEE_ENDFILTER:
10336
10337                 if (compIsForInlining())
10338                 {
10339                     assert(!"Shouldn't have exception handlers in the inliner!");
10340                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10341                     return;
10342                 }
10343
10344                 block->bbSetRunRarely(); // filters are rare
10345
10346                 if (info.compXcptnsCount == 0)
10347                 {
10348                     BADCODE("endfilter outside filter");
10349                 }
10350
10351                 if (tiVerificationNeeded)
10352                 {
10353                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10354                 }
10355
10356                 op1 = impPopStack().val;
10357                 assertImp(op1->gtType == TYP_INT);
10358                 if (!bbInFilterILRange(block))
10359                 {
10360                     BADCODE("EndFilter outside a filter handler");
10361                 }
10362
10363                 /* Mark current bb as end of filter */
10364
10365                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10366                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10367
10368                 /* Mark catch handler as successor */
10369
10370                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10371                 if (verCurrentState.esStackDepth != 0)
10372                 {
10373                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10374                                                 DEBUGARG(__LINE__));
10375                 }
10376                 goto APPEND;
10377
10378             case CEE_RET:
10379                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10380             RET:
10381                 if (!impReturnInstruction(block, prefixFlags, opcode))
10382                 {
10383                     return; // abort
10384                 }
10385                 else
10386                 {
10387                     break;
10388                 }
10389
10390             case CEE_JMP:
10391
10392                 assert(!compIsForInlining());
10393
10394                 if (tiVerificationNeeded)
10395                 {
10396                     Verify(false, "Invalid opcode: CEE_JMP");
10397                 }
10398
10399                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10400                 {
10401                     /* CEE_JMP does not make sense in some "protected" regions. */
10402
10403                     BADCODE("Jmp not allowed in protected region");
10404                 }
10405
10406                 if (verCurrentState.esStackDepth != 0)
10407                 {
10408                     BADCODE("Stack must be empty after CEE_JMPs");
10409                 }
10410
10411                 _impResolveToken(CORINFO_TOKENKIND_Method);
10412
10413                 JITDUMP(" %08X", resolvedToken.token);
10414
10415                 /* The signature of the target has to be identical to ours.
10416                    At least check that argCnt and returnType match */
10417
10418                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10419                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10420                     sig.retType != info.compMethodInfo->args.retType ||
10421                     sig.callConv != info.compMethodInfo->args.callConv)
10422                 {
10423                     BADCODE("Incompatible target for CEE_JMPs");
10424                 }
10425
10426 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10427
10428                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10429
10430                 /* Mark the basic block as being a JUMP instead of RETURN */
10431
10432                 block->bbFlags |= BBF_HAS_JMP;
10433
10434                 /* Set this flag to make sure register arguments have a location assigned
10435                  * even if we don't use them inside the method */
10436
10437                 compJmpOpUsed = true;
10438
10439                 fgNoStructPromotion = true;
10440
10441                 goto APPEND;
10442
10443 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10444
10445                 // Import this just like a series of LDARGs + tail. + call + ret
10446
10447                 if (info.compIsVarArgs)
10448                 {
10449                     // For now we don't implement true tail calls, so this breaks varargs.
10450                     // So warn the user instead of generating bad code.
10451                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10452                     // implement true tail calls.
10453                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10454                 }
10455
10456                 // First load up the arguments (0 - N)
10457                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10458                 {
10459                     impLoadArg(argNum, opcodeOffs + sz + 1);
10460                 }
10461
10462                 // Now generate the tail call
10463                 noway_assert(prefixFlags == 0);
10464                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10465                 opcode      = CEE_CALL;
10466
10467                 eeGetCallInfo(&resolvedToken, NULL,
10468                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10469
10470                 // All calls and delegates need a security callout.
10471                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10472
10473                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10474                                         opcodeOffs);
10475
10476                 // And finish with the ret
10477                 goto RET;
10478
10479 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10480
10481             case CEE_LDELEMA:
10482                 assertImp(sz == sizeof(unsigned));
10483
10484                 _impResolveToken(CORINFO_TOKENKIND_Class);
10485
10486                 JITDUMP(" %08X", resolvedToken.token);
10487
10488                 ldelemClsHnd = resolvedToken.hClass;
10489
10490                 if (tiVerificationNeeded)
10491                 {
10492                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10493                     typeInfo tiIndex = impStackTop().seTypeInfo;
10494
10495                     // As per ECMA 'index' specified can be either int32 or native int.
10496                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10497
10498                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10499                     Verify(tiArray.IsNullObjRef() ||
10500                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10501                            "bad array");
10502
10503                     tiRetVal = arrayElemType;
10504                     tiRetVal.MakeByRef();
10505                     if (prefixFlags & PREFIX_READONLY)
10506                     {
10507                         tiRetVal.SetIsReadonlyByRef();
10508                     }
10509
10510                     // an array interior pointer is always in the heap
10511                     tiRetVal.SetIsPermanentHomeByRef();
10512                 }
10513
10514                 // If it's a value class array we just do a simple address-of
10515                 if (eeIsValueClass(ldelemClsHnd))
10516                 {
10517                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10518                     if (cit == CORINFO_TYPE_UNDEF)
10519                     {
10520                         lclTyp = TYP_STRUCT;
10521                     }
10522                     else
10523                     {
10524                         lclTyp = JITtype2varType(cit);
10525                     }
10526                     goto ARR_LD_POST_VERIFY;
10527                 }
10528
10529                 // Similarly, if its a readonly access, we can do a simple address-of
10530                 // without doing a runtime type-check
10531                 if (prefixFlags & PREFIX_READONLY)
10532                 {
10533                     lclTyp = TYP_REF;
10534                     goto ARR_LD_POST_VERIFY;
10535                 }
10536
10537                 // Otherwise we need the full helper function with run-time type check
10538                 op1 = impTokenToHandle(&resolvedToken);
10539                 if (op1 == nullptr)
10540                 { // compDonotInline()
10541                     return;
10542                 }
10543
10544                 args = gtNewArgList(op1);                      // Type
10545                 args = gtNewListNode(impPopStack().val, args); // index
10546                 args = gtNewListNode(impPopStack().val, args); // array
10547                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10548
10549                 impPushOnStack(op1, tiRetVal);
10550                 break;
10551
10552             // ldelem for reference and value types
10553             case CEE_LDELEM:
10554                 assertImp(sz == sizeof(unsigned));
10555
10556                 _impResolveToken(CORINFO_TOKENKIND_Class);
10557
10558                 JITDUMP(" %08X", resolvedToken.token);
10559
10560                 ldelemClsHnd = resolvedToken.hClass;
10561
10562                 if (tiVerificationNeeded)
10563                 {
10564                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10565                     typeInfo tiIndex = impStackTop().seTypeInfo;
10566
10567                     // As per ECMA 'index' specified can be either int32 or native int.
10568                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10569                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10570
10571                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10572                            "type of array incompatible with type operand");
10573                     tiRetVal.NormaliseForStack();
10574                 }
10575
10576                 // If it's a reference type or generic variable type
10577                 // then just generate code as though it's a ldelem.ref instruction
10578                 if (!eeIsValueClass(ldelemClsHnd))
10579                 {
10580                     lclTyp = TYP_REF;
10581                     opcode = CEE_LDELEM_REF;
10582                 }
10583                 else
10584                 {
10585                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10586                     lclTyp             = JITtype2varType(jitTyp);
10587                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10588                     tiRetVal.NormaliseForStack();
10589                 }
10590                 goto ARR_LD_POST_VERIFY;
10591
10592             case CEE_LDELEM_I1:
10593                 lclTyp = TYP_BYTE;
10594                 goto ARR_LD;
10595             case CEE_LDELEM_I2:
10596                 lclTyp = TYP_SHORT;
10597                 goto ARR_LD;
10598             case CEE_LDELEM_I:
10599                 lclTyp = TYP_I_IMPL;
10600                 goto ARR_LD;
10601
10602             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10603             // and treating it as TYP_INT avoids other asserts.
10604             case CEE_LDELEM_U4:
10605                 lclTyp = TYP_INT;
10606                 goto ARR_LD;
10607
10608             case CEE_LDELEM_I4:
10609                 lclTyp = TYP_INT;
10610                 goto ARR_LD;
10611             case CEE_LDELEM_I8:
10612                 lclTyp = TYP_LONG;
10613                 goto ARR_LD;
10614             case CEE_LDELEM_REF:
10615                 lclTyp = TYP_REF;
10616                 goto ARR_LD;
10617             case CEE_LDELEM_R4:
10618                 lclTyp = TYP_FLOAT;
10619                 goto ARR_LD;
10620             case CEE_LDELEM_R8:
10621                 lclTyp = TYP_DOUBLE;
10622                 goto ARR_LD;
10623             case CEE_LDELEM_U1:
10624                 lclTyp = TYP_UBYTE;
10625                 goto ARR_LD;
10626             case CEE_LDELEM_U2:
10627                 lclTyp = TYP_CHAR;
10628                 goto ARR_LD;
10629
10630             ARR_LD:
10631
10632                 if (tiVerificationNeeded)
10633                 {
10634                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10635                     typeInfo tiIndex = impStackTop().seTypeInfo;
10636
10637                     // As per ECMA 'index' specified can be either int32 or native int.
10638                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10639                     if (tiArray.IsNullObjRef())
10640                     {
10641                         if (lclTyp == TYP_REF)
10642                         { // we will say a deref of a null array yields a null ref
10643                             tiRetVal = typeInfo(TI_NULL);
10644                         }
10645                         else
10646                         {
10647                             tiRetVal = typeInfo(lclTyp);
10648                         }
10649                     }
10650                     else
10651                     {
10652                         tiRetVal             = verGetArrayElemType(tiArray);
10653                         typeInfo arrayElemTi = typeInfo(lclTyp);
10654 #ifdef _TARGET_64BIT_
10655                         if (opcode == CEE_LDELEM_I)
10656                         {
10657                             arrayElemTi = typeInfo::nativeInt();
10658                         }
10659
10660                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10661                         {
10662                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10663                         }
10664                         else
10665 #endif // _TARGET_64BIT_
10666                         {
10667                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10668                         }
10669                     }
10670                     tiRetVal.NormaliseForStack();
10671                 }
10672             ARR_LD_POST_VERIFY:
10673
10674                 /* Pull the index value and array address */
10675                 op2 = impPopStack().val;
10676                 op1 = impPopStack().val;
10677                 assertImp(op1->gtType == TYP_REF);
10678
10679                 /* Check for null pointer - in the inliner case we simply abort */
10680
10681                 if (compIsForInlining())
10682                 {
10683                     if (op1->gtOper == GT_CNS_INT)
10684                     {
10685                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10686                         return;
10687                     }
10688                 }
10689
10690                 op1 = impCheckForNullPointer(op1);
10691
10692                 /* Mark the block as containing an index expression */
10693
10694                 if (op1->gtOper == GT_LCL_VAR)
10695                 {
10696                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10697                     {
10698                         block->bbFlags |= BBF_HAS_IDX_LEN;
10699                         optMethodFlags |= OMF_HAS_ARRAYREF;
10700                     }
10701                 }
10702
10703                 /* Create the index node and push it on the stack */
10704
10705                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10706
10707                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10708
10709                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10710                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10711                 {
10712                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10713
10714                     // remember the element size
10715                     if (lclTyp == TYP_REF)
10716                     {
10717                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10718                     }
10719                     else
10720                     {
10721                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10722                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10723                         {
10724                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10725                         }
10726                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10727                         if (lclTyp == TYP_STRUCT)
10728                         {
10729                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10730                             op1->gtIndex.gtIndElemSize = size;
10731                             op1->gtType                = lclTyp;
10732                         }
10733                     }
10734
10735                     if ((opcode == CEE_LDELEMA) || ldstruct)
10736                     {
10737                         // wrap it in a &
10738                         lclTyp = TYP_BYREF;
10739
10740                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10741                     }
10742                     else
10743                     {
10744                         assert(lclTyp != TYP_STRUCT);
10745                     }
10746                 }
10747
10748                 if (ldstruct)
10749                 {
10750                     // Create an OBJ for the result
10751                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10752                     op1->gtFlags |= GTF_EXCEPT;
10753                 }
10754                 impPushOnStack(op1, tiRetVal);
10755                 break;
10756
10757             // stelem for reference and value types
10758             case CEE_STELEM:
10759
10760                 assertImp(sz == sizeof(unsigned));
10761
10762                 _impResolveToken(CORINFO_TOKENKIND_Class);
10763
10764                 JITDUMP(" %08X", resolvedToken.token);
10765
10766                 stelemClsHnd = resolvedToken.hClass;
10767
10768                 if (tiVerificationNeeded)
10769                 {
10770                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10771                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10772                     typeInfo tiValue = impStackTop().seTypeInfo;
10773
10774                     // As per ECMA 'index' specified can be either int32 or native int.
10775                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10776                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10777
10778                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10779                            "type operand incompatible with array element type");
10780                     arrayElem.NormaliseForStack();
10781                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10782                 }
10783
10784                 // If it's a reference type just behave as though it's a stelem.ref instruction
10785                 if (!eeIsValueClass(stelemClsHnd))
10786                 {
10787                     goto STELEM_REF_POST_VERIFY;
10788                 }
10789
10790                 // Otherwise extract the type
10791                 {
10792                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10793                     lclTyp             = JITtype2varType(jitTyp);
10794                     goto ARR_ST_POST_VERIFY;
10795                 }
10796
10797             case CEE_STELEM_REF:
10798
10799                 if (tiVerificationNeeded)
10800                 {
10801                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10802                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10803                     typeInfo tiValue = impStackTop().seTypeInfo;
10804
10805                     // As per ECMA 'index' specified can be either int32 or native int.
10806                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10807                     Verify(tiValue.IsObjRef(), "bad value");
10808
10809                     // we only check that it is an object referece, The helper does additional checks
10810                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10811                 }
10812
10813                 arrayNodeTo      = impStackTop(2).val;
10814                 arrayNodeToIndex = impStackTop(1).val;
10815                 arrayNodeFrom    = impStackTop().val;
10816
10817                 //
10818                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10819                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10820                 //
10821
10822                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10823                 // This does not need CORINFO_HELP_ARRADDR_ST
10824
10825                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10826                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10827                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10828                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10829                 {
10830                     lclTyp = TYP_REF;
10831                     goto ARR_ST_POST_VERIFY;
10832                 }
10833
10834                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10835
10836                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10837                 {
10838                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10839
10840                     lclTyp = TYP_REF;
10841                     goto ARR_ST_POST_VERIFY;
10842                 }
10843
10844             STELEM_REF_POST_VERIFY:
10845
10846                 /* Call a helper function to do the assignment */
10847                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10848
10849                 goto SPILL_APPEND;
10850
10851             case CEE_STELEM_I1:
10852                 lclTyp = TYP_BYTE;
10853                 goto ARR_ST;
10854             case CEE_STELEM_I2:
10855                 lclTyp = TYP_SHORT;
10856                 goto ARR_ST;
10857             case CEE_STELEM_I:
10858                 lclTyp = TYP_I_IMPL;
10859                 goto ARR_ST;
10860             case CEE_STELEM_I4:
10861                 lclTyp = TYP_INT;
10862                 goto ARR_ST;
10863             case CEE_STELEM_I8:
10864                 lclTyp = TYP_LONG;
10865                 goto ARR_ST;
10866             case CEE_STELEM_R4:
10867                 lclTyp = TYP_FLOAT;
10868                 goto ARR_ST;
10869             case CEE_STELEM_R8:
10870                 lclTyp = TYP_DOUBLE;
10871                 goto ARR_ST;
10872
10873             ARR_ST:
10874
10875                 if (tiVerificationNeeded)
10876                 {
10877                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10878                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10879                     typeInfo tiValue = impStackTop().seTypeInfo;
10880
10881                     // As per ECMA 'index' specified can be either int32 or native int.
10882                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10883                     typeInfo arrayElem = typeInfo(lclTyp);
10884 #ifdef _TARGET_64BIT_
10885                     if (opcode == CEE_STELEM_I)
10886                     {
10887                         arrayElem = typeInfo::nativeInt();
10888                     }
10889 #endif // _TARGET_64BIT_
10890                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10891                            "bad array");
10892
10893                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10894                            "bad value");
10895                 }
10896
10897             ARR_ST_POST_VERIFY:
10898                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10899                    range-check, and then assignment. However, codegen currently
10900                    does the range-check before evaluation the RHS-operands. So to
10901                    maintain strict ordering, we spill the stack. */
10902
10903                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10904                 {
10905                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10906                                                    "Strict ordering of exceptions for Array store"));
10907                 }
10908
10909                 /* Pull the new value from the stack */
10910                 op2 = impPopStack().val;
10911
10912                 /* Pull the index value */
10913                 op1 = impPopStack().val;
10914
10915                 /* Pull the array address */
10916                 op3 = impPopStack().val;
10917
10918                 assertImp(op3->gtType == TYP_REF);
10919                 if (op2->IsVarAddr())
10920                 {
10921                     op2->gtType = TYP_I_IMPL;
10922                 }
10923
10924                 op3 = impCheckForNullPointer(op3);
10925
10926                 // Mark the block as containing an index expression
10927
10928                 if (op3->gtOper == GT_LCL_VAR)
10929                 {
10930                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10931                     {
10932                         block->bbFlags |= BBF_HAS_IDX_LEN;
10933                         optMethodFlags |= OMF_HAS_ARRAYREF;
10934                     }
10935                 }
10936
10937                 /* Create the index node */
10938
10939                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10940
10941                 /* Create the assignment node and append it */
10942
10943                 if (lclTyp == TYP_STRUCT)
10944                 {
10945                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10946
10947                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10948                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10949                 }
10950                 if (varTypeIsStruct(op1))
10951                 {
10952                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10953                 }
10954                 else
10955                 {
10956                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10957                     op1 = gtNewAssignNode(op1, op2);
10958                 }
10959
10960                 /* Mark the expression as containing an assignment */
10961
10962                 op1->gtFlags |= GTF_ASG;
10963
10964                 goto SPILL_APPEND;
10965
10966             case CEE_ADD:
10967                 oper = GT_ADD;
10968                 goto MATH_OP2;
10969
10970             case CEE_ADD_OVF:
10971                 uns = false;
10972                 goto ADD_OVF;
10973             case CEE_ADD_OVF_UN:
10974                 uns = true;
10975                 goto ADD_OVF;
10976
10977             ADD_OVF:
10978                 ovfl     = true;
10979                 callNode = false;
10980                 oper     = GT_ADD;
10981                 goto MATH_OP2_FLAGS;
10982
10983             case CEE_SUB:
10984                 oper = GT_SUB;
10985                 goto MATH_OP2;
10986
10987             case CEE_SUB_OVF:
10988                 uns = false;
10989                 goto SUB_OVF;
10990             case CEE_SUB_OVF_UN:
10991                 uns = true;
10992                 goto SUB_OVF;
10993
10994             SUB_OVF:
10995                 ovfl     = true;
10996                 callNode = false;
10997                 oper     = GT_SUB;
10998                 goto MATH_OP2_FLAGS;
10999
11000             case CEE_MUL:
11001                 oper = GT_MUL;
11002                 goto MATH_MAYBE_CALL_NO_OVF;
11003
11004             case CEE_MUL_OVF:
11005                 uns = false;
11006                 goto MUL_OVF;
11007             case CEE_MUL_OVF_UN:
11008                 uns = true;
11009                 goto MUL_OVF;
11010
11011             MUL_OVF:
11012                 ovfl = true;
11013                 oper = GT_MUL;
11014                 goto MATH_MAYBE_CALL_OVF;
11015
11016             // Other binary math operations
11017
11018             case CEE_DIV:
11019                 oper = GT_DIV;
11020                 goto MATH_MAYBE_CALL_NO_OVF;
11021
11022             case CEE_DIV_UN:
11023                 oper = GT_UDIV;
11024                 goto MATH_MAYBE_CALL_NO_OVF;
11025
11026             case CEE_REM:
11027                 oper = GT_MOD;
11028                 goto MATH_MAYBE_CALL_NO_OVF;
11029
11030             case CEE_REM_UN:
11031                 oper = GT_UMOD;
11032                 goto MATH_MAYBE_CALL_NO_OVF;
11033
11034             MATH_MAYBE_CALL_NO_OVF:
11035                 ovfl = false;
11036             MATH_MAYBE_CALL_OVF:
11037                 // Morpher has some complex logic about when to turn different
11038                 // typed nodes on different platforms into helper calls. We
11039                 // need to either duplicate that logic here, or just
11040                 // pessimistically make all the nodes large enough to become
11041                 // call nodes.  Since call nodes aren't that much larger and
11042                 // these opcodes are infrequent enough I chose the latter.
11043                 callNode = true;
11044                 goto MATH_OP2_FLAGS;
11045
11046             case CEE_AND:
11047                 oper = GT_AND;
11048                 goto MATH_OP2;
11049             case CEE_OR:
11050                 oper = GT_OR;
11051                 goto MATH_OP2;
11052             case CEE_XOR:
11053                 oper = GT_XOR;
11054                 goto MATH_OP2;
11055
11056             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11057
11058                 ovfl     = false;
11059                 callNode = false;
11060
11061             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11062
11063                 /* Pull two values and push back the result */
11064
11065                 if (tiVerificationNeeded)
11066                 {
11067                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11068                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11069
11070                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11071                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11072                     {
11073                         Verify(tiOp1.IsNumberType(), "not number");
11074                     }
11075                     else
11076                     {
11077                         Verify(tiOp1.IsIntegerType(), "not integer");
11078                     }
11079
11080                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11081
11082                     tiRetVal = tiOp1;
11083
11084 #ifdef _TARGET_64BIT_
11085                     if (tiOp2.IsNativeIntType())
11086                     {
11087                         tiRetVal = tiOp2;
11088                     }
11089 #endif // _TARGET_64BIT_
11090                 }
11091
11092                 op2 = impPopStack().val;
11093                 op1 = impPopStack().val;
11094
11095 #if !CPU_HAS_FP_SUPPORT
11096                 if (varTypeIsFloating(op1->gtType))
11097                 {
11098                     callNode = true;
11099                 }
11100 #endif
11101                 /* Can't do arithmetic with references */
11102                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11103
11104                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11105                 // if it is in the stack)
11106                 impBashVarAddrsToI(op1, op2);
11107
11108                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11109
11110                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11111
11112                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11113
11114                 if (op2->gtOper == GT_CNS_INT)
11115                 {
11116                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11117                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11118
11119                     {
11120                         impPushOnStack(op1, tiRetVal);
11121                         break;
11122                     }
11123                 }
11124
11125 #if !FEATURE_X87_DOUBLES
11126                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11127                 //
11128                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11129                 {
11130                     if (op1->TypeGet() != type)
11131                     {
11132                         // We insert a cast of op1 to 'type'
11133                         op1 = gtNewCastNode(type, op1, type);
11134                     }
11135                     if (op2->TypeGet() != type)
11136                     {
11137                         // We insert a cast of op2 to 'type'
11138                         op2 = gtNewCastNode(type, op2, type);
11139                     }
11140                 }
11141 #endif // !FEATURE_X87_DOUBLES
11142
11143 #if SMALL_TREE_NODES
11144                 if (callNode)
11145                 {
11146                     /* These operators can later be transformed into 'GT_CALL' */
11147
11148                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11149 #ifndef _TARGET_ARM_
11150                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11151                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11152                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11153                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11154 #endif
11155                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11156                     // that we'll need to transform into a general large node, but rather specifically
11157                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11158                     // and a CALL is no longer the largest.
11159                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11160                     // than an "if".
11161                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11162                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11163                 }
11164                 else
11165 #endif // SMALL_TREE_NODES
11166                 {
11167                     op1 = gtNewOperNode(oper, type, op1, op2);
11168                 }
11169
11170                 /* Special case: integer/long division may throw an exception */
11171
11172                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11173                 {
11174                     op1->gtFlags |= GTF_EXCEPT;
11175                 }
11176
11177                 if (ovfl)
11178                 {
11179                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11180                     if (ovflType != TYP_UNKNOWN)
11181                     {
11182                         op1->gtType = ovflType;
11183                     }
11184                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11185                     if (uns)
11186                     {
11187                         op1->gtFlags |= GTF_UNSIGNED;
11188                     }
11189                 }
11190
11191                 impPushOnStack(op1, tiRetVal);
11192                 break;
11193
11194             case CEE_SHL:
11195                 oper = GT_LSH;
11196                 goto CEE_SH_OP2;
11197
11198             case CEE_SHR:
11199                 oper = GT_RSH;
11200                 goto CEE_SH_OP2;
11201             case CEE_SHR_UN:
11202                 oper = GT_RSZ;
11203                 goto CEE_SH_OP2;
11204
11205             CEE_SH_OP2:
11206                 if (tiVerificationNeeded)
11207                 {
11208                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11209                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11210                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11211                     tiRetVal = tiVal;
11212                 }
11213                 op2 = impPopStack().val;
11214                 op1 = impPopStack().val; // operand to be shifted
11215                 impBashVarAddrsToI(op1, op2);
11216
11217                 type = genActualType(op1->TypeGet());
11218                 op1  = gtNewOperNode(oper, type, op1, op2);
11219
11220                 impPushOnStack(op1, tiRetVal);
11221                 break;
11222
11223             case CEE_NOT:
11224                 if (tiVerificationNeeded)
11225                 {
11226                     tiRetVal = impStackTop().seTypeInfo;
11227                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11228                 }
11229
11230                 op1 = impPopStack().val;
11231                 impBashVarAddrsToI(op1, nullptr);
11232                 type = genActualType(op1->TypeGet());
11233                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11234                 break;
11235
11236             case CEE_CKFINITE:
11237                 if (tiVerificationNeeded)
11238                 {
11239                     tiRetVal = impStackTop().seTypeInfo;
11240                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11241                 }
11242                 op1  = impPopStack().val;
11243                 type = op1->TypeGet();
11244                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11245                 op1->gtFlags |= GTF_EXCEPT;
11246
11247                 impPushOnStack(op1, tiRetVal);
11248                 break;
11249
11250             case CEE_LEAVE:
11251
11252                 val     = getI4LittleEndian(codeAddr); // jump distance
11253                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11254                 goto LEAVE;
11255
11256             case CEE_LEAVE_S:
11257                 val     = getI1LittleEndian(codeAddr); // jump distance
11258                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11259
11260             LEAVE:
11261
11262                 if (compIsForInlining())
11263                 {
11264                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11265                     return;
11266                 }
11267
11268                 JITDUMP(" %04X", jmpAddr);
11269                 if (block->bbJumpKind != BBJ_LEAVE)
11270                 {
11271                     impResetLeaveBlock(block, jmpAddr);
11272                 }
11273
11274                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11275                 impImportLeave(block);
11276                 impNoteBranchOffs();
11277
11278                 break;
11279
11280             case CEE_BR:
11281             case CEE_BR_S:
11282                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11283
11284                 if (compIsForInlining() && jmpDist == 0)
11285                 {
11286                     break; /* NOP */
11287                 }
11288
11289                 impNoteBranchOffs();
11290                 break;
11291
11292             case CEE_BRTRUE:
11293             case CEE_BRTRUE_S:
11294             case CEE_BRFALSE:
11295             case CEE_BRFALSE_S:
11296
11297                 /* Pop the comparand (now there's a neat term) from the stack */
11298                 if (tiVerificationNeeded)
11299                 {
11300                     typeInfo& tiVal = impStackTop().seTypeInfo;
11301                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11302                            "bad value");
11303                 }
11304
11305                 op1  = impPopStack().val;
11306                 type = op1->TypeGet();
11307
11308                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11309                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11310                 {
11311                     block->bbJumpKind = BBJ_NONE;
11312
11313                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11314                     {
11315                         op1 = gtUnusedValNode(op1);
11316                         goto SPILL_APPEND;
11317                     }
11318                     else
11319                     {
11320                         break;
11321                     }
11322                 }
11323
11324                 if (op1->OperIsCompare())
11325                 {
11326                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11327                     {
11328                         // Flip the sense of the compare
11329
11330                         op1 = gtReverseCond(op1);
11331                     }
11332                 }
11333                 else
11334                 {
11335                     /* We'll compare against an equally-sized integer 0 */
11336                     /* For small types, we always compare against int   */
11337                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11338
11339                     /* Create the comparison operator and try to fold it */
11340
11341                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11342                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11343                 }
11344
11345             // fall through
11346
11347             COND_JUMP:
11348
11349                 /* Fold comparison if we can */
11350
11351                 op1 = gtFoldExpr(op1);
11352
11353                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11354                 /* Don't make any blocks unreachable in import only mode */
11355
11356                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11357                 {
11358                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11359                        unreachable under compDbgCode */
11360                     assert(!opts.compDbgCode);
11361
11362                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11363                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11364                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11365                                                                          // block for the second time
11366
11367                     block->bbJumpKind = foldedJumpKind;
11368 #ifdef DEBUG
11369                     if (verbose)
11370                     {
11371                         if (op1->gtIntCon.gtIconVal)
11372                         {
11373                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11374                                    block->bbJumpDest->bbNum);
11375                         }
11376                         else
11377                         {
11378                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11379                         }
11380                     }
11381 #endif
11382                     break;
11383                 }
11384
11385                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11386
11387                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11388                    in impImportBlock(block). For correct line numbers, spill stack. */
11389
11390                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11391                 {
11392                     impSpillStackEnsure(true);
11393                 }
11394
11395                 goto SPILL_APPEND;
11396
11397             case CEE_CEQ:
11398                 oper = GT_EQ;
11399                 uns  = false;
11400                 goto CMP_2_OPs;
11401             case CEE_CGT_UN:
11402                 oper = GT_GT;
11403                 uns  = true;
11404                 goto CMP_2_OPs;
11405             case CEE_CGT:
11406                 oper = GT_GT;
11407                 uns  = false;
11408                 goto CMP_2_OPs;
11409             case CEE_CLT_UN:
11410                 oper = GT_LT;
11411                 uns  = true;
11412                 goto CMP_2_OPs;
11413             case CEE_CLT:
11414                 oper = GT_LT;
11415                 uns  = false;
11416                 goto CMP_2_OPs;
11417
11418             CMP_2_OPs:
11419                 if (tiVerificationNeeded)
11420                 {
11421                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11422                     tiRetVal = typeInfo(TI_INT);
11423                 }
11424
11425                 op2 = impPopStack().val;
11426                 op1 = impPopStack().val;
11427
11428 #ifdef _TARGET_64BIT_
11429                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11430                 {
11431                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11432                 }
11433                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11434                 {
11435                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11436                 }
11437 #endif // _TARGET_64BIT_
11438
11439                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11440                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11441                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11442
11443                 /* Create the comparison node */
11444
11445                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11446
11447                 /* TODO: setting both flags when only one is appropriate */
11448                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11449                 {
11450                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11451                 }
11452
11453                 impPushOnStack(op1, tiRetVal);
11454                 break;
11455
11456             case CEE_BEQ_S:
11457             case CEE_BEQ:
11458                 oper = GT_EQ;
11459                 goto CMP_2_OPs_AND_BR;
11460
11461             case CEE_BGE_S:
11462             case CEE_BGE:
11463                 oper = GT_GE;
11464                 goto CMP_2_OPs_AND_BR;
11465
11466             case CEE_BGE_UN_S:
11467             case CEE_BGE_UN:
11468                 oper = GT_GE;
11469                 goto CMP_2_OPs_AND_BR_UN;
11470
11471             case CEE_BGT_S:
11472             case CEE_BGT:
11473                 oper = GT_GT;
11474                 goto CMP_2_OPs_AND_BR;
11475
11476             case CEE_BGT_UN_S:
11477             case CEE_BGT_UN:
11478                 oper = GT_GT;
11479                 goto CMP_2_OPs_AND_BR_UN;
11480
11481             case CEE_BLE_S:
11482             case CEE_BLE:
11483                 oper = GT_LE;
11484                 goto CMP_2_OPs_AND_BR;
11485
11486             case CEE_BLE_UN_S:
11487             case CEE_BLE_UN:
11488                 oper = GT_LE;
11489                 goto CMP_2_OPs_AND_BR_UN;
11490
11491             case CEE_BLT_S:
11492             case CEE_BLT:
11493                 oper = GT_LT;
11494                 goto CMP_2_OPs_AND_BR;
11495
11496             case CEE_BLT_UN_S:
11497             case CEE_BLT_UN:
11498                 oper = GT_LT;
11499                 goto CMP_2_OPs_AND_BR_UN;
11500
11501             case CEE_BNE_UN_S:
11502             case CEE_BNE_UN:
11503                 oper = GT_NE;
11504                 goto CMP_2_OPs_AND_BR_UN;
11505
11506             CMP_2_OPs_AND_BR_UN:
11507                 uns       = true;
11508                 unordered = true;
11509                 goto CMP_2_OPs_AND_BR_ALL;
11510             CMP_2_OPs_AND_BR:
11511                 uns       = false;
11512                 unordered = false;
11513                 goto CMP_2_OPs_AND_BR_ALL;
11514             CMP_2_OPs_AND_BR_ALL:
11515
11516                 if (tiVerificationNeeded)
11517                 {
11518                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11519                 }
11520
11521                 /* Pull two values */
11522                 op2 = impPopStack().val;
11523                 op1 = impPopStack().val;
11524
11525 #ifdef _TARGET_64BIT_
11526                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11527                 {
11528                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11529                 }
11530                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11531                 {
11532                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11533                 }
11534 #endif // _TARGET_64BIT_
11535
11536                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11537                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11538                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11539
11540                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11541                 {
11542                     block->bbJumpKind = BBJ_NONE;
11543
11544                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11545                     {
11546                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11547                                                        "Branch to next Optimization, op1 side effect"));
11548                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11549                     }
11550                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11551                     {
11552                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11553                                                        "Branch to next Optimization, op2 side effect"));
11554                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11555                     }
11556
11557 #ifdef DEBUG
11558                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11559                     {
11560                         impNoteLastILoffs();
11561                     }
11562 #endif
11563                     break;
11564                 }
11565 #if !FEATURE_X87_DOUBLES
11566                 // We can generate an compare of different sized floating point op1 and op2
11567                 // We insert a cast
11568                 //
11569                 if (varTypeIsFloating(op1->TypeGet()))
11570                 {
11571                     if (op1->TypeGet() != op2->TypeGet())
11572                     {
11573                         assert(varTypeIsFloating(op2->TypeGet()));
11574
11575                         // say op1=double, op2=float. To avoid loss of precision
11576                         // while comparing, op2 is converted to double and double
11577                         // comparison is done.
11578                         if (op1->TypeGet() == TYP_DOUBLE)
11579                         {
11580                             // We insert a cast of op2 to TYP_DOUBLE
11581                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11582                         }
11583                         else if (op2->TypeGet() == TYP_DOUBLE)
11584                         {
11585                             // We insert a cast of op1 to TYP_DOUBLE
11586                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11587                         }
11588                     }
11589                 }
11590 #endif // !FEATURE_X87_DOUBLES
11591
11592                 /* Create and append the operator */
11593
11594                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11595
11596                 if (uns)
11597                 {
11598                     op1->gtFlags |= GTF_UNSIGNED;
11599                 }
11600
11601                 if (unordered)
11602                 {
11603                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11604                 }
11605
11606                 goto COND_JUMP;
11607
11608             case CEE_SWITCH:
11609                 assert(!compIsForInlining());
11610
11611                 if (tiVerificationNeeded)
11612                 {
11613                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11614                 }
11615                 /* Pop the switch value off the stack */
11616                 op1 = impPopStack().val;
11617                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11618
11619 #ifdef _TARGET_64BIT_
11620                 // Widen 'op1' on 64-bit targets
11621                 if (op1->TypeGet() != TYP_I_IMPL)
11622                 {
11623                     if (op1->OperGet() == GT_CNS_INT)
11624                     {
11625                         op1->gtType = TYP_I_IMPL;
11626                     }
11627                     else
11628                     {
11629                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11630                     }
11631                 }
11632 #endif // _TARGET_64BIT_
11633                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11634
11635                 /* We can create a switch node */
11636
11637                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11638
11639                 val = (int)getU4LittleEndian(codeAddr);
11640                 codeAddr += 4 + val * 4; // skip over the switch-table
11641
11642                 goto SPILL_APPEND;
11643
11644             /************************** Casting OPCODES ***************************/
11645
11646             case CEE_CONV_OVF_I1:
11647                 lclTyp = TYP_BYTE;
11648                 goto CONV_OVF;
11649             case CEE_CONV_OVF_I2:
11650                 lclTyp = TYP_SHORT;
11651                 goto CONV_OVF;
11652             case CEE_CONV_OVF_I:
11653                 lclTyp = TYP_I_IMPL;
11654                 goto CONV_OVF;
11655             case CEE_CONV_OVF_I4:
11656                 lclTyp = TYP_INT;
11657                 goto CONV_OVF;
11658             case CEE_CONV_OVF_I8:
11659                 lclTyp = TYP_LONG;
11660                 goto CONV_OVF;
11661
11662             case CEE_CONV_OVF_U1:
11663                 lclTyp = TYP_UBYTE;
11664                 goto CONV_OVF;
11665             case CEE_CONV_OVF_U2:
11666                 lclTyp = TYP_CHAR;
11667                 goto CONV_OVF;
11668             case CEE_CONV_OVF_U:
11669                 lclTyp = TYP_U_IMPL;
11670                 goto CONV_OVF;
11671             case CEE_CONV_OVF_U4:
11672                 lclTyp = TYP_UINT;
11673                 goto CONV_OVF;
11674             case CEE_CONV_OVF_U8:
11675                 lclTyp = TYP_ULONG;
11676                 goto CONV_OVF;
11677
11678             case CEE_CONV_OVF_I1_UN:
11679                 lclTyp = TYP_BYTE;
11680                 goto CONV_OVF_UN;
11681             case CEE_CONV_OVF_I2_UN:
11682                 lclTyp = TYP_SHORT;
11683                 goto CONV_OVF_UN;
11684             case CEE_CONV_OVF_I_UN:
11685                 lclTyp = TYP_I_IMPL;
11686                 goto CONV_OVF_UN;
11687             case CEE_CONV_OVF_I4_UN:
11688                 lclTyp = TYP_INT;
11689                 goto CONV_OVF_UN;
11690             case CEE_CONV_OVF_I8_UN:
11691                 lclTyp = TYP_LONG;
11692                 goto CONV_OVF_UN;
11693
11694             case CEE_CONV_OVF_U1_UN:
11695                 lclTyp = TYP_UBYTE;
11696                 goto CONV_OVF_UN;
11697             case CEE_CONV_OVF_U2_UN:
11698                 lclTyp = TYP_CHAR;
11699                 goto CONV_OVF_UN;
11700             case CEE_CONV_OVF_U_UN:
11701                 lclTyp = TYP_U_IMPL;
11702                 goto CONV_OVF_UN;
11703             case CEE_CONV_OVF_U4_UN:
11704                 lclTyp = TYP_UINT;
11705                 goto CONV_OVF_UN;
11706             case CEE_CONV_OVF_U8_UN:
11707                 lclTyp = TYP_ULONG;
11708                 goto CONV_OVF_UN;
11709
11710             CONV_OVF_UN:
11711                 uns = true;
11712                 goto CONV_OVF_COMMON;
11713             CONV_OVF:
11714                 uns = false;
11715                 goto CONV_OVF_COMMON;
11716
11717             CONV_OVF_COMMON:
11718                 ovfl = true;
11719                 goto _CONV;
11720
11721             case CEE_CONV_I1:
11722                 lclTyp = TYP_BYTE;
11723                 goto CONV;
11724             case CEE_CONV_I2:
11725                 lclTyp = TYP_SHORT;
11726                 goto CONV;
11727             case CEE_CONV_I:
11728                 lclTyp = TYP_I_IMPL;
11729                 goto CONV;
11730             case CEE_CONV_I4:
11731                 lclTyp = TYP_INT;
11732                 goto CONV;
11733             case CEE_CONV_I8:
11734                 lclTyp = TYP_LONG;
11735                 goto CONV;
11736
11737             case CEE_CONV_U1:
11738                 lclTyp = TYP_UBYTE;
11739                 goto CONV;
11740             case CEE_CONV_U2:
11741                 lclTyp = TYP_CHAR;
11742                 goto CONV;
11743 #if (REGSIZE_BYTES == 8)
11744             case CEE_CONV_U:
11745                 lclTyp = TYP_U_IMPL;
11746                 goto CONV_UN;
11747 #else
11748             case CEE_CONV_U:
11749                 lclTyp = TYP_U_IMPL;
11750                 goto CONV;
11751 #endif
11752             case CEE_CONV_U4:
11753                 lclTyp = TYP_UINT;
11754                 goto CONV;
11755             case CEE_CONV_U8:
11756                 lclTyp = TYP_ULONG;
11757                 goto CONV_UN;
11758
11759             case CEE_CONV_R4:
11760                 lclTyp = TYP_FLOAT;
11761                 goto CONV;
11762             case CEE_CONV_R8:
11763                 lclTyp = TYP_DOUBLE;
11764                 goto CONV;
11765
11766             case CEE_CONV_R_UN:
11767                 lclTyp = TYP_DOUBLE;
11768                 goto CONV_UN;
11769
11770             CONV_UN:
11771                 uns  = true;
11772                 ovfl = false;
11773                 goto _CONV;
11774
11775             CONV:
11776                 uns  = false;
11777                 ovfl = false;
11778                 goto _CONV;
11779
11780             _CONV:
11781                 // just check that we have a number on the stack
11782                 if (tiVerificationNeeded)
11783                 {
11784                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11785                     Verify(tiVal.IsNumberType(), "bad arg");
11786
11787 #ifdef _TARGET_64BIT_
11788                     bool isNative = false;
11789
11790                     switch (opcode)
11791                     {
11792                         case CEE_CONV_OVF_I:
11793                         case CEE_CONV_OVF_I_UN:
11794                         case CEE_CONV_I:
11795                         case CEE_CONV_OVF_U:
11796                         case CEE_CONV_OVF_U_UN:
11797                         case CEE_CONV_U:
11798                             isNative = true;
11799                         default:
11800                             // leave 'isNative' = false;
11801                             break;
11802                     }
11803                     if (isNative)
11804                     {
11805                         tiRetVal = typeInfo::nativeInt();
11806                     }
11807                     else
11808 #endif // _TARGET_64BIT_
11809                     {
11810                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11811                     }
11812                 }
11813
11814                 // only converts from FLOAT or DOUBLE to an integer type
11815                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11816
11817                 if (varTypeIsFloating(lclTyp))
11818                 {
11819                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11820 #ifdef _TARGET_64BIT_
11821                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11822                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11823                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11824                                // and generate SSE2 code instead of going through helper calls.
11825                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11826 #endif
11827                         ;
11828                 }
11829                 else
11830                 {
11831                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11832                 }
11833
11834                 // At this point uns, ovf, callNode all set
11835
11836                 op1 = impPopStack().val;
11837                 impBashVarAddrsToI(op1);
11838
11839                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11840                 {
11841                     op2 = op1->gtOp.gtOp2;
11842
11843                     if (op2->gtOper == GT_CNS_INT)
11844                     {
11845                         ssize_t ival = op2->gtIntCon.gtIconVal;
11846                         ssize_t mask, umask;
11847
11848                         switch (lclTyp)
11849                         {
11850                             case TYP_BYTE:
11851                             case TYP_UBYTE:
11852                                 mask  = 0x00FF;
11853                                 umask = 0x007F;
11854                                 break;
11855                             case TYP_CHAR:
11856                             case TYP_SHORT:
11857                                 mask  = 0xFFFF;
11858                                 umask = 0x7FFF;
11859                                 break;
11860
11861                             default:
11862                                 assert(!"unexpected type");
11863                                 return;
11864                         }
11865
11866                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11867                         {
11868                             /* Toss the cast, it's a waste of time */
11869
11870                             impPushOnStack(op1, tiRetVal);
11871                             break;
11872                         }
11873                         else if (ival == mask)
11874                         {
11875                             /* Toss the masking, it's a waste of time, since
11876                                we sign-extend from the small value anyways */
11877
11878                             op1 = op1->gtOp.gtOp1;
11879                         }
11880                     }
11881                 }
11882
11883                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11884                     since the result of a cast to one of the 'small' integer
11885                     types is an integer.
11886                  */
11887
11888                 type = genActualType(lclTyp);
11889
11890 #if SMALL_TREE_NODES
11891                 if (callNode)
11892                 {
11893                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11894                 }
11895                 else
11896 #endif // SMALL_TREE_NODES
11897                 {
11898                     op1 = gtNewCastNode(type, op1, lclTyp);
11899                 }
11900
11901                 if (ovfl)
11902                 {
11903                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11904                 }
11905                 if (uns)
11906                 {
11907                     op1->gtFlags |= GTF_UNSIGNED;
11908                 }
11909                 impPushOnStack(op1, tiRetVal);
11910                 break;
11911
11912             case CEE_NEG:
11913                 if (tiVerificationNeeded)
11914                 {
11915                     tiRetVal = impStackTop().seTypeInfo;
11916                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11917                 }
11918
11919                 op1 = impPopStack().val;
11920                 impBashVarAddrsToI(op1, nullptr);
11921                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11922                 break;
11923
11924             case CEE_POP:
11925                 if (tiVerificationNeeded)
11926                 {
11927                     impStackTop(0);
11928                 }
11929
11930                 /* Pull the top value from the stack */
11931
11932                 op1 = impPopStack(clsHnd).val;
11933
11934                 /* Get hold of the type of the value being duplicated */
11935
11936                 lclTyp = genActualType(op1->gtType);
11937
11938                 /* Does the value have any side effects? */
11939
11940                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11941                 {
11942                     // Since we are throwing away the value, just normalize
11943                     // it to its address.  This is more efficient.
11944
11945                     if (varTypeIsStruct(op1))
11946                     {
11947 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11948                         // Non-calls, such as obj or ret_expr, have to go through this.
11949                         // Calls with large struct return value have to go through this.
11950                         // Helper calls with small struct return value also have to go
11951                         // through this since they do not follow Unix calling convention.
11952                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11953                             op1->AsCall()->gtCallType == CT_HELPER)
11954 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11955                         {
11956                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11957                         }
11958                     }
11959
11960                     // If op1 is non-overflow cast, throw it away since it is useless.
11961                     // Another reason for throwing away the useless cast is in the context of
11962                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11963                     // The cast gets added as part of importing GT_CALL, which gets in the way
11964                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11965                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11966                     {
11967                         op1 = op1->gtOp.gtOp1;
11968                     }
11969
11970                     // If 'op1' is an expression, create an assignment node.
11971                     // Helps analyses (like CSE) to work fine.
11972
11973                     if (op1->gtOper != GT_CALL)
11974                     {
11975                         op1 = gtUnusedValNode(op1);
11976                     }
11977
11978                     /* Append the value to the tree list */
11979                     goto SPILL_APPEND;
11980                 }
11981
11982                 /* No side effects - just throw the <BEEP> thing away */
11983                 break;
11984
11985             case CEE_DUP:
11986
11987                 if (tiVerificationNeeded)
11988                 {
11989                     // Dup could start the begining of delegate creation sequence, remember that
11990                     delegateCreateStart = codeAddr - 1;
11991                     impStackTop(0);
11992                 }
11993
11994                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11995                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11996                 //   This helps eliminate a redundant bounds check in cases such as:
11997                 //       ariba[i+3] += some_value;
11998                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11999
12000                 if (codeAddr < codeEndp)
12001                 {
12002                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
12003                     if (impIsAnySTLOC(nextOpcode))
12004                     {
12005                         if (!opts.compDbgCode)
12006                         {
12007                             insertLdloc = true;
12008                             break;
12009                         }
12010                         GenTree* stackTop = impStackTop().val;
12011                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
12012                         {
12013                             insertLdloc = true;
12014                             break;
12015                         }
12016                     }
12017                 }
12018
12019                 /* Pull the top value from the stack */
12020                 op1 = impPopStack(tiRetVal);
12021
12022                 /* Clone the value */
12023                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12024                                    nullptr DEBUGARG("DUP instruction"));
12025
12026                 /* Either the tree started with no global effects, or impCloneExpr
12027                    evaluated the tree to a temp and returned two copies of that
12028                    temp. Either way, neither op1 nor op2 should have side effects.
12029                 */
12030                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12031
12032                 /* Push the tree/temp back on the stack */
12033                 impPushOnStack(op1, tiRetVal);
12034
12035                 /* Push the copy on the stack */
12036                 impPushOnStack(op2, tiRetVal);
12037
12038                 break;
12039
12040             case CEE_STIND_I1:
12041                 lclTyp = TYP_BYTE;
12042                 goto STIND;
12043             case CEE_STIND_I2:
12044                 lclTyp = TYP_SHORT;
12045                 goto STIND;
12046             case CEE_STIND_I4:
12047                 lclTyp = TYP_INT;
12048                 goto STIND;
12049             case CEE_STIND_I8:
12050                 lclTyp = TYP_LONG;
12051                 goto STIND;
12052             case CEE_STIND_I:
12053                 lclTyp = TYP_I_IMPL;
12054                 goto STIND;
12055             case CEE_STIND_REF:
12056                 lclTyp = TYP_REF;
12057                 goto STIND;
12058             case CEE_STIND_R4:
12059                 lclTyp = TYP_FLOAT;
12060                 goto STIND;
12061             case CEE_STIND_R8:
12062                 lclTyp = TYP_DOUBLE;
12063                 goto STIND;
12064             STIND:
12065
12066                 if (tiVerificationNeeded)
12067                 {
12068                     typeInfo instrType(lclTyp);
12069 #ifdef _TARGET_64BIT_
12070                     if (opcode == CEE_STIND_I)
12071                     {
12072                         instrType = typeInfo::nativeInt();
12073                     }
12074 #endif // _TARGET_64BIT_
12075                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12076                 }
12077                 else
12078                 {
12079                     compUnsafeCastUsed = true; // Have to go conservative
12080                 }
12081
12082             STIND_POST_VERIFY:
12083
12084                 op2 = impPopStack().val; // value to store
12085                 op1 = impPopStack().val; // address to store to
12086
12087                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12088                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12089
12090                 impBashVarAddrsToI(op1, op2);
12091
12092                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12093
12094 #ifdef _TARGET_64BIT_
12095                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12096                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12097                 {
12098                     op2->gtType = TYP_I_IMPL;
12099                 }
12100                 else
12101                 {
12102                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12103                     //
12104                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12105                     {
12106                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12107                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12108                     }
12109                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12110                     //
12111                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12112                     {
12113                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12114                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12115                     }
12116                 }
12117 #endif // _TARGET_64BIT_
12118
12119                 if (opcode == CEE_STIND_REF)
12120                 {
12121                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12122                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12123                     lclTyp = genActualType(op2->TypeGet());
12124                 }
12125
12126 // Check target type.
12127 #ifdef DEBUG
12128                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12129                 {
12130                     if (op2->gtType == TYP_BYREF)
12131                     {
12132                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12133                     }
12134                     else if (lclTyp == TYP_BYREF)
12135                     {
12136                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12137                     }
12138                 }
12139                 else
12140                 {
12141                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12142                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12143                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12144                 }
12145 #endif
12146
12147                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12148
12149                 // stind could point anywhere, example a boxed class static int
12150                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12151
12152                 if (prefixFlags & PREFIX_VOLATILE)
12153                 {
12154                     assert(op1->OperGet() == GT_IND);
12155                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12156                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12157                     op1->gtFlags |= GTF_IND_VOLATILE;
12158                 }
12159
12160                 if (prefixFlags & PREFIX_UNALIGNED)
12161                 {
12162                     assert(op1->OperGet() == GT_IND);
12163                     op1->gtFlags |= GTF_IND_UNALIGNED;
12164                 }
12165
12166                 op1 = gtNewAssignNode(op1, op2);
12167                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12168
12169                 // Spill side-effects AND global-data-accesses
12170                 if (verCurrentState.esStackDepth > 0)
12171                 {
12172                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12173                 }
12174
12175                 goto APPEND;
12176
12177             case CEE_LDIND_I1:
12178                 lclTyp = TYP_BYTE;
12179                 goto LDIND;
12180             case CEE_LDIND_I2:
12181                 lclTyp = TYP_SHORT;
12182                 goto LDIND;
12183             case CEE_LDIND_U4:
12184             case CEE_LDIND_I4:
12185                 lclTyp = TYP_INT;
12186                 goto LDIND;
12187             case CEE_LDIND_I8:
12188                 lclTyp = TYP_LONG;
12189                 goto LDIND;
12190             case CEE_LDIND_REF:
12191                 lclTyp = TYP_REF;
12192                 goto LDIND;
12193             case CEE_LDIND_I:
12194                 lclTyp = TYP_I_IMPL;
12195                 goto LDIND;
12196             case CEE_LDIND_R4:
12197                 lclTyp = TYP_FLOAT;
12198                 goto LDIND;
12199             case CEE_LDIND_R8:
12200                 lclTyp = TYP_DOUBLE;
12201                 goto LDIND;
12202             case CEE_LDIND_U1:
12203                 lclTyp = TYP_UBYTE;
12204                 goto LDIND;
12205             case CEE_LDIND_U2:
12206                 lclTyp = TYP_CHAR;
12207                 goto LDIND;
12208             LDIND:
12209
12210                 if (tiVerificationNeeded)
12211                 {
12212                     typeInfo lclTiType(lclTyp);
12213 #ifdef _TARGET_64BIT_
12214                     if (opcode == CEE_LDIND_I)
12215                     {
12216                         lclTiType = typeInfo::nativeInt();
12217                     }
12218 #endif // _TARGET_64BIT_
12219                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12220                     tiRetVal.NormaliseForStack();
12221                 }
12222                 else
12223                 {
12224                     compUnsafeCastUsed = true; // Have to go conservative
12225                 }
12226
12227             LDIND_POST_VERIFY:
12228
12229                 op1 = impPopStack().val; // address to load from
12230                 impBashVarAddrsToI(op1);
12231
12232 #ifdef _TARGET_64BIT_
12233                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12234                 //
12235                 if (genActualType(op1->gtType) == TYP_INT)
12236                 {
12237                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12238                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12239                 }
12240 #endif
12241
12242                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12243
12244                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12245
12246                 // ldind could point anywhere, example a boxed class static int
12247                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12248
12249                 if (prefixFlags & PREFIX_VOLATILE)
12250                 {
12251                     assert(op1->OperGet() == GT_IND);
12252                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12253                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12254                     op1->gtFlags |= GTF_IND_VOLATILE;
12255                 }
12256
12257                 if (prefixFlags & PREFIX_UNALIGNED)
12258                 {
12259                     assert(op1->OperGet() == GT_IND);
12260                     op1->gtFlags |= GTF_IND_UNALIGNED;
12261                 }
12262
12263                 impPushOnStack(op1, tiRetVal);
12264
12265                 break;
12266
12267             case CEE_UNALIGNED:
12268
12269                 assert(sz == 1);
12270                 val = getU1LittleEndian(codeAddr);
12271                 ++codeAddr;
12272                 JITDUMP(" %u", val);
12273                 if ((val != 1) && (val != 2) && (val != 4))
12274                 {
12275                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12276                 }
12277
12278                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12279                 prefixFlags |= PREFIX_UNALIGNED;
12280
12281                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12282
12283             PREFIX:
12284                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12285                 codeAddr += sizeof(__int8);
12286                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12287                 goto DECODE_OPCODE;
12288
12289             case CEE_VOLATILE:
12290
12291                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12292                 prefixFlags |= PREFIX_VOLATILE;
12293
12294                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12295
12296                 assert(sz == 0);
12297                 goto PREFIX;
12298
12299             case CEE_LDFTN:
12300             {
12301                 // Need to do a lookup here so that we perform an access check
12302                 // and do a NOWAY if protections are violated
12303                 _impResolveToken(CORINFO_TOKENKIND_Method);
12304
12305                 JITDUMP(" %08X", resolvedToken.token);
12306
12307                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12308                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12309                               &callInfo);
12310
12311                 // This check really only applies to intrinsic Array.Address methods
12312                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12313                 {
12314                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12315                 }
12316
12317                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12318                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12319
12320                 if (tiVerificationNeeded)
12321                 {
12322                     // LDFTN could start the begining of delegate creation sequence, remember that
12323                     delegateCreateStart = codeAddr - 2;
12324
12325                     // check any constraints on the callee's class and type parameters
12326                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12327                                    "method has unsatisfied class constraints");
12328                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12329                                                                                 resolvedToken.hMethod),
12330                                    "method has unsatisfied method constraints");
12331
12332                     mflags = callInfo.verMethodFlags;
12333                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12334                 }
12335
12336             DO_LDFTN:
12337                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12338                 if (compDonotInline())
12339                 {
12340                     return;
12341                 }
12342
12343                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12344
12345                 break;
12346             }
12347
12348             case CEE_LDVIRTFTN:
12349             {
12350                 /* Get the method token */
12351
12352                 _impResolveToken(CORINFO_TOKENKIND_Method);
12353
12354                 JITDUMP(" %08X", resolvedToken.token);
12355
12356                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12357                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12358                                                     CORINFO_CALLINFO_CALLVIRT)),
12359                               &callInfo);
12360
12361                 // This check really only applies to intrinsic Array.Address methods
12362                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12363                 {
12364                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12365                 }
12366
12367                 mflags = callInfo.methodFlags;
12368
12369                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12370
12371                 if (compIsForInlining())
12372                 {
12373                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12374                     {
12375                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12376                         return;
12377                     }
12378                 }
12379
12380                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12381
12382                 if (tiVerificationNeeded)
12383                 {
12384
12385                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12386                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12387
12388                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12389                     typeInfo declType =
12390                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12391
12392                     typeInfo arg = impStackTop().seTypeInfo;
12393                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12394                            "bad ldvirtftn");
12395
12396                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12397                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12398                     {
12399                         instanceClassHnd = arg.GetClassHandleForObjRef();
12400                     }
12401
12402                     // check any constraints on the method's class and type parameters
12403                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12404                                    "method has unsatisfied class constraints");
12405                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12406                                                                                 resolvedToken.hMethod),
12407                                    "method has unsatisfied method constraints");
12408
12409                     if (mflags & CORINFO_FLG_PROTECTED)
12410                     {
12411                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12412                                "Accessing protected method through wrong type.");
12413                     }
12414                 }
12415
12416                 /* Get the object-ref */
12417                 op1 = impPopStack().val;
12418                 assertImp(op1->gtType == TYP_REF);
12419
12420                 if (opts.IsReadyToRun())
12421                 {
12422                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12423                     {
12424                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12425                         {
12426                             op1 = gtUnusedValNode(op1);
12427                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12428                         }
12429                         goto DO_LDFTN;
12430                     }
12431                 }
12432                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12433                 {
12434                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12435                     {
12436                         op1 = gtUnusedValNode(op1);
12437                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12438                     }
12439                     goto DO_LDFTN;
12440                 }
12441
12442                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12443                 if (compDonotInline())
12444                 {
12445                     return;
12446                 }
12447
12448                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12449
12450                 break;
12451             }
12452
12453             case CEE_CONSTRAINED:
12454
12455                 assertImp(sz == sizeof(unsigned));
12456                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12457                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12458                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12459
12460                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12461                 prefixFlags |= PREFIX_CONSTRAINED;
12462
12463                 {
12464                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12465                     if (actualOpcode != CEE_CALLVIRT)
12466                     {
12467                         BADCODE("constrained. has to be followed by callvirt");
12468                     }
12469                 }
12470
12471                 goto PREFIX;
12472
12473             case CEE_READONLY:
12474                 JITDUMP(" readonly.");
12475
12476                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12477                 prefixFlags |= PREFIX_READONLY;
12478
12479                 {
12480                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12481                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12482                     {
12483                         BADCODE("readonly. has to be followed by ldelema or call");
12484                     }
12485                 }
12486
12487                 assert(sz == 0);
12488                 goto PREFIX;
12489
12490             case CEE_TAILCALL:
12491                 JITDUMP(" tail.");
12492
12493                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12494                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12495
12496                 {
12497                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12498                     if (!impOpcodeIsCallOpcode(actualOpcode))
12499                     {
12500                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12501                     }
12502                 }
12503                 assert(sz == 0);
12504                 goto PREFIX;
12505
12506             case CEE_NEWOBJ:
12507
12508                 /* Since we will implicitly insert newObjThisPtr at the start of the
12509                    argument list, spill any GTF_ORDER_SIDEEFF */
12510                 impSpillSpecialSideEff();
12511
12512                 /* NEWOBJ does not respond to TAIL */
12513                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12514
12515                 /* NEWOBJ does not respond to CONSTRAINED */
12516                 prefixFlags &= ~PREFIX_CONSTRAINED;
12517
12518 #if COR_JIT_EE_VERSION > 460
12519                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12520 #else
12521                 _impResolveToken(CORINFO_TOKENKIND_Method);
12522 #endif
12523
12524                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12525                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12526                               &callInfo);
12527
12528                 if (compIsForInlining())
12529                 {
12530                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12531                     {
12532                         // Check to see if this call violates the boundary.
12533                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12534                         return;
12535                     }
12536                 }
12537
12538                 mflags = callInfo.methodFlags;
12539
12540                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12541                 {
12542                     BADCODE("newobj on static or abstract method");
12543                 }
12544
12545                 // Insert the security callout before any actual code is generated
12546                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12547
12548                 // There are three different cases for new
12549                 // Object size is variable (depends on arguments)
12550                 //      1) Object is an array (arrays treated specially by the EE)
12551                 //      2) Object is some other variable sized object (e.g. String)
12552                 //      3) Class Size can be determined beforehand (normal case)
12553                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12554                 // in the second case we call the constructor with a '0' this pointer
12555                 // In the third case we alloc the memory, then call the constuctor
12556
12557                 clsFlags = callInfo.classFlags;
12558                 if (clsFlags & CORINFO_FLG_ARRAY)
12559                 {
12560                     if (tiVerificationNeeded)
12561                     {
12562                         CORINFO_CLASS_HANDLE elemTypeHnd;
12563                         INDEBUG(CorInfoType corType =)
12564                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12565                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12566                         Verify(elemTypeHnd == nullptr ||
12567                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12568                                "newarr of byref-like objects");
12569                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12570                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12571                                       &callInfo DEBUGARG(info.compFullName));
12572                     }
12573                     // Arrays need to call the NEWOBJ helper.
12574                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12575
12576                     impImportNewObjArray(&resolvedToken, &callInfo);
12577                     if (compDonotInline())
12578                     {
12579                         return;
12580                     }
12581
12582                     callTyp = TYP_REF;
12583                     break;
12584                 }
12585                 // At present this can only be String
12586                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12587                 {
12588                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12589                     {
12590                         // The dummy argument does not exist in CoreRT
12591                         newObjThisPtr = nullptr;
12592                     }
12593                     else
12594                     {
12595                         // This is the case for variable-sized objects that are not
12596                         // arrays.  In this case, call the constructor with a null 'this'
12597                         // pointer
12598                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12599                     }
12600
12601                     /* Remember that this basic block contains 'new' of an object */
12602                     block->bbFlags |= BBF_HAS_NEWOBJ;
12603                     optMethodFlags |= OMF_HAS_NEWOBJ;
12604                 }
12605                 else
12606                 {
12607                     // This is the normal case where the size of the object is
12608                     // fixed.  Allocate the memory and call the constructor.
12609
12610                     // Note: We cannot add a peep to avoid use of temp here
12611                     // becase we don't have enough interference info to detect when
12612                     // sources and destination interfere, example: s = new S(ref);
12613
12614                     // TODO: We find the correct place to introduce a general
12615                     // reverse copy prop for struct return values from newobj or
12616                     // any function returning structs.
12617
12618                     /* get a temporary for the new object */
12619                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12620
12621                     // In the value class case we only need clsHnd for size calcs.
12622                     //
12623                     // The lookup of the code pointer will be handled by CALL in this case
12624                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12625                     {
12626                         if (compIsForInlining())
12627                         {
12628                             // If value class has GC fields, inform the inliner. It may choose to
12629                             // bail out on the inline.
12630                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12631                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12632                             {
12633                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12634                                 if (compInlineResult->IsFailure())
12635                                 {
12636                                     return;
12637                                 }
12638
12639                                 // Do further notification in the case where the call site is rare;
12640                                 // some policies do not track the relative hotness of call sites for
12641                                 // "always" inline cases.
12642                                 if (impInlineInfo->iciBlock->isRunRarely())
12643                                 {
12644                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12645                                     if (compInlineResult->IsFailure())
12646                                     {
12647                                         return;
12648                                     }
12649                                 }
12650                             }
12651                         }
12652
12653                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12654                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12655
12656                         if (impIsPrimitive(jitTyp))
12657                         {
12658                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12659                         }
12660                         else
12661                         {
12662                             // The local variable itself is the allocated space.
12663                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12664                             // and potentially exploitable.
12665                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12666                         }
12667
12668                         // Append a tree to zero-out the temp
12669                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12670
12671                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12672                                                        gtNewIconNode(0), // Value
12673                                                        size,             // Size
12674                                                        false,            // isVolatile
12675                                                        false);           // not copyBlock
12676                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12677
12678                         // Obtain the address of the temp
12679                         newObjThisPtr =
12680                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12681                     }
12682                     else
12683                     {
12684 #ifdef FEATURE_READYTORUN_COMPILER
12685                         if (opts.IsReadyToRun())
12686                         {
12687                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12688                             usingReadyToRunHelper = (op1 != nullptr);
12689                         }
12690
12691                         if (!usingReadyToRunHelper)
12692 #endif
12693                         {
12694                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12695                             if (op1 == nullptr)
12696                             { // compDonotInline()
12697                                 return;
12698                             }
12699
12700                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12701                             // and the newfast call with a single call to a dynamic R2R cell that will:
12702                             //      1) Load the context
12703                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12704                             //      stub
12705                             //      3) Allocate and return the new object
12706                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12707
12708                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12709                                                     resolvedToken.hClass, TYP_REF, op1);
12710                         }
12711
12712                         // Remember that this basic block contains 'new' of an object
12713                         block->bbFlags |= BBF_HAS_NEWOBJ;
12714                         optMethodFlags |= OMF_HAS_NEWOBJ;
12715
12716                         // Append the assignment to the temp/local. Dont need to spill
12717                         // at all as we are just calling an EE-Jit helper which can only
12718                         // cause an (async) OutOfMemoryException.
12719
12720                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12721                         // to a temp. Note that the pattern "temp = allocObj" is required
12722                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12723                         // without exhaustive walk over all expressions.
12724
12725                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12726
12727                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12728                     }
12729                 }
12730                 goto CALL;
12731
12732             case CEE_CALLI:
12733
12734                 /* CALLI does not respond to CONSTRAINED */
12735                 prefixFlags &= ~PREFIX_CONSTRAINED;
12736
12737                 if (compIsForInlining())
12738                 {
12739                     // CALLI doesn't have a method handle, so assume the worst.
12740                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12741                     {
12742                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12743                         return;
12744                     }
12745                 }
12746
12747             // fall through
12748
12749             case CEE_CALLVIRT:
12750             case CEE_CALL:
12751
12752                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12753                 // many other places.  We unfortunately embed that knowledge here.
12754                 if (opcode != CEE_CALLI)
12755                 {
12756                     _impResolveToken(CORINFO_TOKENKIND_Method);
12757
12758                     eeGetCallInfo(&resolvedToken,
12759                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12760                                   // this is how impImportCall invokes getCallInfo
12761                                   addVerifyFlag(
12762                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12763                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12764                                                                        : CORINFO_CALLINFO_NONE)),
12765                                   &callInfo);
12766                 }
12767                 else
12768                 {
12769                     // Suppress uninitialized use warning.
12770                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12771                     memset(&callInfo, 0, sizeof(callInfo));
12772
12773                     resolvedToken.token = getU4LittleEndian(codeAddr);
12774                 }
12775
12776             CALL: // memberRef should be set.
12777                 // newObjThisPtr should be set for CEE_NEWOBJ
12778
12779                 JITDUMP(" %08X", resolvedToken.token);
12780                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12781
12782                 bool newBBcreatedForTailcallStress;
12783
12784                 newBBcreatedForTailcallStress = false;
12785
12786                 if (compIsForInlining())
12787                 {
12788                     if (compDonotInline())
12789                     {
12790                         return;
12791                     }
12792                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12793                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12794                 }
12795                 else
12796                 {
12797                     if (compTailCallStress())
12798                     {
12799                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12800                         // Tail call stress only recognizes call+ret patterns and forces them to be
12801                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12802                         // doesn't import 'ret' opcode following the call into the basic block containing
12803                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12804                         // is already checking that there is an opcode following call and hence it is
12805                         // safe here to read next opcode without bounds check.
12806                         newBBcreatedForTailcallStress =
12807                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12808                                                              // make it jump to RET.
12809                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12810
12811                         if (newBBcreatedForTailcallStress &&
12812                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12813                             verCheckTailCallConstraint(opcode, &resolvedToken,
12814                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12815                                                        true) // Is it legal to do talcall?
12816                             )
12817                         {
12818                             // Stress the tailcall.
12819                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12820                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12821                         }
12822                     }
12823                 }
12824
12825                 // This is split up to avoid goto flow warnings.
12826                 bool isRecursive;
12827                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
12828
12829                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12830                 // hence will not be considered for implicit tail calling.
12831                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12832                 {
12833                     if (compIsForInlining())
12834                     {
12835 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
12836                         // Are we inlining at an implicit tail call site? If so the we can flag
12837                         // implicit tail call sites in the inline body. These call sites
12838                         // often end up in non BBJ_RETURN blocks, so only flag them when
12839                         // we're able to handle shared returns.
12840                         if (impInlineInfo->iciCall->IsImplicitTailCall())
12841                         {
12842                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12843                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12844                         }
12845 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
12846                     }
12847                     else
12848                     {
12849                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12850                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12851                     }
12852                 }
12853
12854                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12855                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12856                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12857
12858                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12859                 {
12860                     // All calls and delegates need a security callout.
12861                     // For delegates, this is the call to the delegate constructor, not the access check on the
12862                     // LD(virt)FTN.
12863                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12864
12865 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12866
12867                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12868                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12869                 // ldtoken <filed token>, and we now check accessibility
12870                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12871                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12872                 {
12873                     if (prevOpcode != CEE_LDTOKEN)
12874                     {
12875                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12876                     }
12877                     else
12878                     {
12879                         assert(lastLoadToken != NULL);
12880                         // Now that we know we have a token, verify that it is accessible for loading
12881                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12882                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12883                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12884                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12885                     }
12886                 }
12887
12888 #endif // DevDiv 410397
12889                 }
12890
12891                 if (tiVerificationNeeded)
12892                 {
12893                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12894                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12895                                   &callInfo DEBUGARG(info.compFullName));
12896                 }
12897
12898                 // Insert delegate callout here.
12899                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12900                 {
12901 #ifdef DEBUG
12902                     // We should do this only if verification is enabled
12903                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12904                     if (tiVerificationNeeded)
12905                     {
12906                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12907                         // We should get here only for well formed delegate creation.
12908                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12909                     }
12910 #endif
12911
12912 #ifdef FEATURE_CORECLR
12913                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12914                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12915                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12916
12917                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12918 #endif // FEATURE_CORECLR
12919                 }
12920
12921                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12922                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12923                 if (compDonotInline())
12924                 {
12925                     return;
12926                 }
12927
12928                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12929                                                                        // have created a new BB after the "call"
12930                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12931                 {
12932                     assert(!compIsForInlining());
12933                     goto RET;
12934                 }
12935
12936                 break;
12937
12938             case CEE_LDFLD:
12939             case CEE_LDSFLD:
12940             case CEE_LDFLDA:
12941             case CEE_LDSFLDA:
12942             {
12943
12944                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12945                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12946
12947                 /* Get the CP_Fieldref index */
12948                 assertImp(sz == sizeof(unsigned));
12949
12950                 _impResolveToken(CORINFO_TOKENKIND_Field);
12951
12952                 JITDUMP(" %08X", resolvedToken.token);
12953
12954                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12955
12956                 GenTreePtr           obj     = nullptr;
12957                 typeInfo*            tiObj   = nullptr;
12958                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12959
12960                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12961                 {
12962                     tiObj = &impStackTop().seTypeInfo;
12963                     obj   = impPopStack(objType).val;
12964
12965                     if (impIsThis(obj))
12966                     {
12967                         aflags |= CORINFO_ACCESS_THIS;
12968
12969                         // An optimization for Contextful classes:
12970                         // we unwrap the proxy when we have a 'this reference'
12971
12972                         if (info.compUnwrapContextful)
12973                         {
12974                             aflags |= CORINFO_ACCESS_UNWRAP;
12975                         }
12976                     }
12977                 }
12978
12979                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12980
12981                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12982                 // handle
12983                 CorInfoType ciType = fieldInfo.fieldType;
12984                 clsHnd             = fieldInfo.structType;
12985
12986                 lclTyp = JITtype2varType(ciType);
12987
12988 #ifdef _TARGET_AMD64
12989                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12990 #endif // _TARGET_AMD64
12991
12992                 if (compIsForInlining())
12993                 {
12994                     switch (fieldInfo.fieldAccessor)
12995                     {
12996                         case CORINFO_FIELD_INSTANCE_HELPER:
12997                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12998                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12999                         case CORINFO_FIELD_STATIC_TLS:
13000
13001                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13002                             return;
13003
13004                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13005 #if COR_JIT_EE_VERSION > 460
13006                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13007 #endif
13008                             /* We may be able to inline the field accessors in specific instantiations of generic
13009                              * methods */
13010                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13011                             return;
13012
13013                         default:
13014                             break;
13015                     }
13016
13017                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13018                         clsHnd)
13019                     {
13020                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13021                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13022                         {
13023                             // Loading a static valuetype field usually will cause a JitHelper to be called
13024                             // for the static base. This will bloat the code.
13025                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13026
13027                             if (compInlineResult->IsFailure())
13028                             {
13029                                 return;
13030                             }
13031                         }
13032                     }
13033                 }
13034
13035                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13036                 if (isLoadAddress)
13037                 {
13038                     tiRetVal.MakeByRef();
13039                 }
13040                 else
13041                 {
13042                     tiRetVal.NormaliseForStack();
13043                 }
13044
13045                 // Perform this check always to ensure that we get field access exceptions even with
13046                 // SkipVerification.
13047                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13048
13049                 if (tiVerificationNeeded)
13050                 {
13051                     // You can also pass the unboxed struct to  LDFLD
13052                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13053                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13054                     {
13055                         bAllowPlainValueTypeAsThis = TRUE;
13056                     }
13057
13058                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13059
13060                     // If we're doing this on a heap object or from a 'safe' byref
13061                     // then the result is a safe byref too
13062                     if (isLoadAddress) // load address
13063                     {
13064                         if (fieldInfo.fieldFlags &
13065                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13066                         {
13067                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13068                             {
13069                                 tiRetVal.SetIsPermanentHomeByRef();
13070                             }
13071                         }
13072                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13073                         {
13074                             // ldflda of byref is safe if done on a gc object or on  a
13075                             // safe byref
13076                             tiRetVal.SetIsPermanentHomeByRef();
13077                         }
13078                     }
13079                 }
13080                 else
13081                 {
13082                     // tiVerificationNeeded is false.
13083                     // Raise InvalidProgramException if static load accesses non-static field
13084                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13085                     {
13086                         BADCODE("static access on an instance field");
13087                     }
13088                 }
13089
13090                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13091                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13092                 {
13093                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13094                     {
13095                         obj = gtUnusedValNode(obj);
13096                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13097                     }
13098                     obj = nullptr;
13099                 }
13100
13101                 /* Preserve 'small' int types */
13102                 if (lclTyp > TYP_INT)
13103                 {
13104                     lclTyp = genActualType(lclTyp);
13105                 }
13106
13107                 bool usesHelper = false;
13108
13109                 switch (fieldInfo.fieldAccessor)
13110                 {
13111                     case CORINFO_FIELD_INSTANCE:
13112 #ifdef FEATURE_READYTORUN_COMPILER
13113                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13114 #endif
13115                     {
13116                         bool nullcheckNeeded = false;
13117
13118                         obj = impCheckForNullPointer(obj);
13119
13120                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13121                         {
13122                             nullcheckNeeded = true;
13123                         }
13124
13125                         // If the object is a struct, what we really want is
13126                         // for the field to operate on the address of the struct.
13127                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13128                         {
13129                             assert(opcode == CEE_LDFLD && objType != nullptr);
13130
13131                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13132                         }
13133
13134                         /* Create the data member node */
13135                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13136
13137 #ifdef FEATURE_READYTORUN_COMPILER
13138                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13139                         {
13140                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13141                         }
13142 #endif
13143
13144                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13145
13146                         if (fgAddrCouldBeNull(obj))
13147                         {
13148                             op1->gtFlags |= GTF_EXCEPT;
13149                         }
13150
13151                         // If gtFldObj is a BYREF then our target is a value class and
13152                         // it could point anywhere, example a boxed class static int
13153                         if (obj->gtType == TYP_BYREF)
13154                         {
13155                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13156                         }
13157
13158                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13159                         if (StructHasOverlappingFields(typeFlags))
13160                         {
13161                             op1->gtField.gtFldMayOverlap = true;
13162                         }
13163
13164                         // wrap it in a address of operator if necessary
13165                         if (isLoadAddress)
13166                         {
13167                             op1 = gtNewOperNode(GT_ADDR,
13168                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13169                         }
13170                         else
13171                         {
13172                             if (compIsForInlining() &&
13173                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13174                                                                                    impInlineInfo->inlArgInfo))
13175                             {
13176                                 impInlineInfo->thisDereferencedFirst = true;
13177                             }
13178                         }
13179                     }
13180                     break;
13181
13182                     case CORINFO_FIELD_STATIC_TLS:
13183 #ifdef _TARGET_X86_
13184                         // Legacy TLS access is implemented as intrinsic on x86 only
13185
13186                         /* Create the data member node */
13187                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13188                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13189
13190                         if (isLoadAddress)
13191                         {
13192                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13193                         }
13194                         break;
13195 #else
13196                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13197
13198                         __fallthrough;
13199 #endif
13200
13201                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13202                     case CORINFO_FIELD_INSTANCE_HELPER:
13203                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13204                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13205                                                clsHnd, nullptr);
13206                         usesHelper = true;
13207                         break;
13208
13209                     case CORINFO_FIELD_STATIC_ADDRESS:
13210                         // Replace static read-only fields with constant if possible
13211                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13212                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13213                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13214                         {
13215                             CorInfoInitClassResult initClassResult =
13216                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13217                                                             impTokenLookupContextHandle);
13218
13219                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13220                             {
13221                                 void** pFldAddr = nullptr;
13222                                 void*  fldAddr =
13223                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13224
13225                                 // We should always be able to access this static's address directly
13226                                 assert(pFldAddr == nullptr);
13227
13228                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13229                                 goto FIELD_DONE;
13230                             }
13231                         }
13232
13233                         __fallthrough;
13234
13235                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13236                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13237                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13238 #if COR_JIT_EE_VERSION > 460
13239                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13240 #endif
13241                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13242                                                          lclTyp);
13243                         break;
13244
13245                     case CORINFO_FIELD_INTRINSIC_ZERO:
13246                     {
13247                         assert(aflags & CORINFO_ACCESS_GET);
13248                         op1 = gtNewIconNode(0, lclTyp);
13249                         goto FIELD_DONE;
13250                     }
13251                     break;
13252
13253                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13254                     {
13255                         assert(aflags & CORINFO_ACCESS_GET);
13256
13257                         LPVOID         pValue;
13258                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13259                         op1                = gtNewStringLiteralNode(iat, pValue);
13260                         goto FIELD_DONE;
13261                     }
13262                     break;
13263
13264                     default:
13265                         assert(!"Unexpected fieldAccessor");
13266                 }
13267
13268                 if (!isLoadAddress)
13269                 {
13270
13271                     if (prefixFlags & PREFIX_VOLATILE)
13272                     {
13273                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13274                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13275
13276                         if (!usesHelper)
13277                         {
13278                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13279                                    (op1->OperGet() == GT_OBJ));
13280                             op1->gtFlags |= GTF_IND_VOLATILE;
13281                         }
13282                     }
13283
13284                     if (prefixFlags & PREFIX_UNALIGNED)
13285                     {
13286                         if (!usesHelper)
13287                         {
13288                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13289                                    (op1->OperGet() == GT_OBJ));
13290                             op1->gtFlags |= GTF_IND_UNALIGNED;
13291                         }
13292                     }
13293                 }
13294
13295                 /* Check if the class needs explicit initialization */
13296
13297                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13298                 {
13299                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13300                     if (compDonotInline())
13301                     {
13302                         return;
13303                     }
13304                     if (helperNode != nullptr)
13305                     {
13306                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13307                     }
13308                 }
13309
13310             FIELD_DONE:
13311                 impPushOnStack(op1, tiRetVal);
13312             }
13313             break;
13314
13315             case CEE_STFLD:
13316             case CEE_STSFLD:
13317             {
13318
13319                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13320
13321                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13322
13323                 /* Get the CP_Fieldref index */
13324
13325                 assertImp(sz == sizeof(unsigned));
13326
13327                 _impResolveToken(CORINFO_TOKENKIND_Field);
13328
13329                 JITDUMP(" %08X", resolvedToken.token);
13330
13331                 int        aflags = CORINFO_ACCESS_SET;
13332                 GenTreePtr obj    = nullptr;
13333                 typeInfo*  tiObj  = nullptr;
13334                 typeInfo   tiVal;
13335
13336                 /* Pull the value from the stack */
13337                 op2    = impPopStack(tiVal);
13338                 clsHnd = tiVal.GetClassHandle();
13339
13340                 if (opcode == CEE_STFLD)
13341                 {
13342                     tiObj = &impStackTop().seTypeInfo;
13343                     obj   = impPopStack().val;
13344
13345                     if (impIsThis(obj))
13346                     {
13347                         aflags |= CORINFO_ACCESS_THIS;
13348
13349                         // An optimization for Contextful classes:
13350                         // we unwrap the proxy when we have a 'this reference'
13351
13352                         if (info.compUnwrapContextful)
13353                         {
13354                             aflags |= CORINFO_ACCESS_UNWRAP;
13355                         }
13356                     }
13357                 }
13358
13359                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13360
13361                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13362                 // handle
13363                 CorInfoType ciType = fieldInfo.fieldType;
13364                 fieldClsHnd        = fieldInfo.structType;
13365
13366                 lclTyp = JITtype2varType(ciType);
13367
13368                 if (compIsForInlining())
13369                 {
13370                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13371                      * per-inst static? */
13372
13373                     switch (fieldInfo.fieldAccessor)
13374                     {
13375                         case CORINFO_FIELD_INSTANCE_HELPER:
13376                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13377                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13378                         case CORINFO_FIELD_STATIC_TLS:
13379
13380                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13381                             return;
13382
13383                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13384 #if COR_JIT_EE_VERSION > 460
13385                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13386 #endif
13387
13388                             /* We may be able to inline the field accessors in specific instantiations of generic
13389                              * methods */
13390                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13391                             return;
13392
13393                         default:
13394                             break;
13395                     }
13396                 }
13397
13398                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13399
13400                 if (tiVerificationNeeded)
13401                 {
13402                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13403                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13404                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13405                 }
13406                 else
13407                 {
13408                     // tiVerificationNeed is false.
13409                     // Raise InvalidProgramException if static store accesses non-static field
13410                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13411                     {
13412                         BADCODE("static access on an instance field");
13413                     }
13414                 }
13415
13416                 // We are using stfld on a static field.
13417                 // We allow it, but need to eval any side-effects for obj
13418                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13419                 {
13420                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13421                     {
13422                         obj = gtUnusedValNode(obj);
13423                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13424                     }
13425                     obj = nullptr;
13426                 }
13427
13428                 /* Preserve 'small' int types */
13429                 if (lclTyp > TYP_INT)
13430                 {
13431                     lclTyp = genActualType(lclTyp);
13432                 }
13433
13434                 switch (fieldInfo.fieldAccessor)
13435                 {
13436                     case CORINFO_FIELD_INSTANCE:
13437 #ifdef FEATURE_READYTORUN_COMPILER
13438                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13439 #endif
13440                     {
13441                         obj = impCheckForNullPointer(obj);
13442
13443                         /* Create the data member node */
13444                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13445                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13446                         if (StructHasOverlappingFields(typeFlags))
13447                         {
13448                             op1->gtField.gtFldMayOverlap = true;
13449                         }
13450
13451 #ifdef FEATURE_READYTORUN_COMPILER
13452                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13453                         {
13454                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13455                         }
13456 #endif
13457
13458                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13459
13460                         if (fgAddrCouldBeNull(obj))
13461                         {
13462                             op1->gtFlags |= GTF_EXCEPT;
13463                         }
13464
13465                         // If gtFldObj is a BYREF then our target is a value class and
13466                         // it could point anywhere, example a boxed class static int
13467                         if (obj->gtType == TYP_BYREF)
13468                         {
13469                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13470                         }
13471
13472                         if (compIsForInlining() &&
13473                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13474                         {
13475                             impInlineInfo->thisDereferencedFirst = true;
13476                         }
13477                     }
13478                     break;
13479
13480                     case CORINFO_FIELD_STATIC_TLS:
13481 #ifdef _TARGET_X86_
13482                         // Legacy TLS access is implemented as intrinsic on x86 only
13483
13484                         /* Create the data member node */
13485                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13486                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13487
13488                         break;
13489 #else
13490                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13491
13492                         __fallthrough;
13493 #endif
13494
13495                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13496                     case CORINFO_FIELD_INSTANCE_HELPER:
13497                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13498                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13499                                                clsHnd, op2);
13500                         goto SPILL_APPEND;
13501
13502                     case CORINFO_FIELD_STATIC_ADDRESS:
13503                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13504                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13505                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13506 #if COR_JIT_EE_VERSION > 460
13507                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13508 #endif
13509                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13510                                                          lclTyp);
13511                         break;
13512
13513                     default:
13514                         assert(!"Unexpected fieldAccessor");
13515                 }
13516
13517                 // Create the member assignment, unless we have a struct.
13518                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13519                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13520
13521                 if (!deferStructAssign)
13522                 {
13523                     if (prefixFlags & PREFIX_VOLATILE)
13524                     {
13525                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13526                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13527                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13528                         op1->gtFlags |= GTF_IND_VOLATILE;
13529                     }
13530                     if (prefixFlags & PREFIX_UNALIGNED)
13531                     {
13532                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13533                         op1->gtFlags |= GTF_IND_UNALIGNED;
13534                     }
13535
13536                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13537                        trust
13538                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13539                        importation
13540                        and reads from the union as if it were a long during code generation. Though this can potentially
13541                        read garbage, one can get lucky to have this working correctly.
13542
13543                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13544                        /O2
13545                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13546                        on
13547                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13548                        correctly
13549                        always.
13550
13551                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13552                        V4.0.
13553                     */
13554                     CLANG_FORMAT_COMMENT_ANCHOR;
13555
13556 #ifdef _TARGET_X86_
13557                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13558                         varTypeIsLong(op1->TypeGet()))
13559                     {
13560                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13561                     }
13562 #endif
13563
13564 #ifdef _TARGET_64BIT_
13565                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13566                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13567                     {
13568                         op2->gtType = TYP_I_IMPL;
13569                     }
13570                     else
13571                     {
13572                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13573                         //
13574                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13575                         {
13576                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13577                         }
13578                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13579                         //
13580                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13581                         {
13582                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13583                         }
13584                     }
13585 #endif
13586
13587 #if !FEATURE_X87_DOUBLES
13588                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13589                     // We insert a cast to the dest 'op1' type
13590                     //
13591                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13592                         varTypeIsFloating(op2->gtType))
13593                     {
13594                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13595                     }
13596 #endif // !FEATURE_X87_DOUBLES
13597
13598                     op1 = gtNewAssignNode(op1, op2);
13599
13600                     /* Mark the expression as containing an assignment */
13601
13602                     op1->gtFlags |= GTF_ASG;
13603                 }
13604
13605                 /* Check if the class needs explicit initialization */
13606
13607                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13608                 {
13609                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13610                     if (compDonotInline())
13611                     {
13612                         return;
13613                     }
13614                     if (helperNode != nullptr)
13615                     {
13616                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13617                     }
13618                 }
13619
13620                 /* stfld can interfere with value classes (consider the sequence
13621                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13622                    spill all value class references from the stack. */
13623
13624                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13625                 {
13626                     assert(tiObj);
13627
13628                     if (impIsValueType(tiObj))
13629                     {
13630                         impSpillEvalStack();
13631                     }
13632                     else
13633                     {
13634                         impSpillValueClasses();
13635                     }
13636                 }
13637
13638                 /* Spill any refs to the same member from the stack */
13639
13640                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13641
13642                 /* stsfld also interferes with indirect accesses (for aliased
13643                    statics) and calls. But don't need to spill other statics
13644                    as we have explicitly spilled this particular static field. */
13645
13646                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13647
13648                 if (deferStructAssign)
13649                 {
13650                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13651                 }
13652             }
13653                 goto APPEND;
13654
13655             case CEE_NEWARR:
13656             {
13657
13658                 /* Get the class type index operand */
13659
13660                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13661
13662                 JITDUMP(" %08X", resolvedToken.token);
13663
13664                 if (!opts.IsReadyToRun())
13665                 {
13666                     // Need to restore array classes before creating array objects on the heap
13667                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13668                     if (op1 == nullptr)
13669                     { // compDonotInline()
13670                         return;
13671                     }
13672                 }
13673
13674                 if (tiVerificationNeeded)
13675                 {
13676                     // As per ECMA 'numElems' specified can be either int32 or native int.
13677                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13678
13679                     CORINFO_CLASS_HANDLE elemTypeHnd;
13680                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13681                     Verify(elemTypeHnd == nullptr ||
13682                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13683                            "array of byref-like type");
13684                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13685                 }
13686
13687                 accessAllowedResult =
13688                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13689                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13690
13691                 /* Form the arglist: array class handle, size */
13692                 op2 = impPopStack().val;
13693                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13694
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696                 if (opts.IsReadyToRun())
13697                 {
13698                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13699                                                     gtNewArgList(op2));
13700                     usingReadyToRunHelper = (op1 != nullptr);
13701
13702                     if (!usingReadyToRunHelper)
13703                     {
13704                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13705                         // and the newarr call with a single call to a dynamic R2R cell that will:
13706                         //      1) Load the context
13707                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13708                         //      3) Allocate the new array
13709                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13710
13711                         // Need to restore array classes before creating array objects on the heap
13712                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13713                         if (op1 == nullptr)
13714                         { // compDonotInline()
13715                             return;
13716                         }
13717                     }
13718                 }
13719
13720                 if (!usingReadyToRunHelper)
13721 #endif
13722                 {
13723                     args = gtNewArgList(op1, op2);
13724
13725                     /* Create a call to 'new' */
13726
13727                     // Note that this only works for shared generic code because the same helper is used for all
13728                     // reference array types
13729                     op1 =
13730                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13731                 }
13732
13733                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13734
13735                 /* Remember that this basic block contains 'new' of an sd array */
13736
13737                 block->bbFlags |= BBF_HAS_NEWARRAY;
13738                 optMethodFlags |= OMF_HAS_NEWARRAY;
13739
13740                 /* Push the result of the call on the stack */
13741
13742                 impPushOnStack(op1, tiRetVal);
13743
13744                 callTyp = TYP_REF;
13745             }
13746             break;
13747
13748             case CEE_LOCALLOC:
13749                 assert(!compIsForInlining());
13750
13751                 if (tiVerificationNeeded)
13752                 {
13753                     Verify(false, "bad opcode");
13754                 }
13755
13756                 // We don't allow locallocs inside handlers
13757                 if (block->hasHndIndex())
13758                 {
13759                     BADCODE("Localloc can't be inside handler");
13760                 }
13761
13762                 /* The FP register may not be back to the original value at the end
13763                    of the method, even if the frame size is 0, as localloc may
13764                    have modified it. So we will HAVE to reset it */
13765
13766                 compLocallocUsed = true;
13767                 setNeedsGSSecurityCookie();
13768
13769                 // Get the size to allocate
13770
13771                 op2 = impPopStack().val;
13772                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13773
13774                 if (verCurrentState.esStackDepth != 0)
13775                 {
13776                     BADCODE("Localloc can only be used when the stack is empty");
13777                 }
13778
13779                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13780
13781                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13782
13783                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13784
13785                 impPushOnStack(op1, tiRetVal);
13786                 break;
13787
13788             case CEE_ISINST:
13789
13790                 /* Get the type token */
13791                 assertImp(sz == sizeof(unsigned));
13792
13793                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13794
13795                 JITDUMP(" %08X", resolvedToken.token);
13796
13797                 if (!opts.IsReadyToRun())
13798                 {
13799                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13800                     if (op2 == nullptr)
13801                     { // compDonotInline()
13802                         return;
13803                     }
13804                 }
13805
13806                 if (tiVerificationNeeded)
13807                 {
13808                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13809                     // Even if this is a value class, we know it is boxed.
13810                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13811                 }
13812                 accessAllowedResult =
13813                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13814                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13815
13816                 op1 = impPopStack().val;
13817
13818 #ifdef FEATURE_READYTORUN_COMPILER
13819                 if (opts.IsReadyToRun())
13820                 {
13821                     GenTreePtr opLookup =
13822                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13823                                                   gtNewArgList(op1));
13824                     usingReadyToRunHelper = (opLookup != nullptr);
13825                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13826
13827                     if (!usingReadyToRunHelper)
13828                     {
13829                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13830                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13831                         //      1) Load the context
13832                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13833                         //      3) Perform the 'is instance' check on the input object
13834                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13835
13836                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13837                         if (op2 == nullptr)
13838                         { // compDonotInline()
13839                             return;
13840                         }
13841                     }
13842                 }
13843
13844                 if (!usingReadyToRunHelper)
13845 #endif
13846                 {
13847                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13848                 }
13849                 if (compDonotInline())
13850                 {
13851                     return;
13852                 }
13853
13854                 impPushOnStack(op1, tiRetVal);
13855
13856                 break;
13857
13858             case CEE_REFANYVAL:
13859
13860                 // get the class handle and make a ICON node out of it
13861
13862                 _impResolveToken(CORINFO_TOKENKIND_Class);
13863
13864                 JITDUMP(" %08X", resolvedToken.token);
13865
13866                 op2 = impTokenToHandle(&resolvedToken);
13867                 if (op2 == nullptr)
13868                 { // compDonotInline()
13869                     return;
13870                 }
13871
13872                 if (tiVerificationNeeded)
13873                 {
13874                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13875                            "need refany");
13876                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13877                 }
13878
13879                 op1 = impPopStack().val;
13880                 // make certain it is normalized;
13881                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13882
13883                 // Call helper GETREFANY(classHandle, op1);
13884                 args = gtNewArgList(op2, op1);
13885                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13886
13887                 impPushOnStack(op1, tiRetVal);
13888                 break;
13889
13890             case CEE_REFANYTYPE:
13891
13892                 if (tiVerificationNeeded)
13893                 {
13894                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13895                            "need refany");
13896                 }
13897
13898                 op1 = impPopStack().val;
13899
13900                 // make certain it is normalized;
13901                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13902
13903                 if (op1->gtOper == GT_OBJ)
13904                 {
13905                     // Get the address of the refany
13906                     op1 = op1->gtOp.gtOp1;
13907
13908                     // Fetch the type from the correct slot
13909                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13910                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13911                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13912                 }
13913                 else
13914                 {
13915                     assertImp(op1->gtOper == GT_MKREFANY);
13916
13917                     // The pointer may have side-effects
13918                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13919                     {
13920                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13921 #ifdef DEBUG
13922                         impNoteLastILoffs();
13923 #endif
13924                     }
13925
13926                     // We already have the class handle
13927                     op1 = op1->gtOp.gtOp2;
13928                 }
13929
13930                 // convert native TypeHandle to RuntimeTypeHandle
13931                 {
13932                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13933
13934                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13935                                               helperArgs);
13936
13937                     // The handle struct is returned in register
13938                     op1->gtCall.gtReturnType = TYP_REF;
13939
13940                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13941                 }
13942
13943                 impPushOnStack(op1, tiRetVal);
13944                 break;
13945
13946             case CEE_LDTOKEN:
13947             {
13948                 /* Get the Class index */
13949                 assertImp(sz == sizeof(unsigned));
13950                 lastLoadToken = codeAddr;
13951                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13952
13953                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13954
13955                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13956                 if (op1 == nullptr)
13957                 { // compDonotInline()
13958                     return;
13959                 }
13960
13961                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13962                 assert(resolvedToken.hClass != nullptr);
13963
13964                 if (resolvedToken.hMethod != nullptr)
13965                 {
13966                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13967                 }
13968                 else if (resolvedToken.hField != nullptr)
13969                 {
13970                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13971                 }
13972
13973                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13974
13975                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13976
13977                 // The handle struct is returned in register
13978                 op1->gtCall.gtReturnType = TYP_REF;
13979
13980                 tiRetVal = verMakeTypeInfo(tokenType);
13981                 impPushOnStack(op1, tiRetVal);
13982             }
13983             break;
13984
13985             case CEE_UNBOX:
13986             case CEE_UNBOX_ANY:
13987             {
13988                 /* Get the Class index */
13989                 assertImp(sz == sizeof(unsigned));
13990
13991                 _impResolveToken(CORINFO_TOKENKIND_Class);
13992
13993                 JITDUMP(" %08X", resolvedToken.token);
13994
13995                 BOOL runtimeLookup;
13996                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13997                 if (op2 == nullptr)
13998                 { // compDonotInline()
13999                     return;
14000                 }
14001
14002                 // Run this always so we can get access exceptions even with SkipVerification.
14003                 accessAllowedResult =
14004                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14005                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14006
14007                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14008                 {
14009                     if (tiVerificationNeeded)
14010                     {
14011                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14012                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14013                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14014                         tiRetVal.NormaliseForStack();
14015                     }
14016                     op1 = impPopStack().val;
14017                     goto CASTCLASS;
14018                 }
14019
14020                 /* Pop the object and create the unbox helper call */
14021                 /* You might think that for UNBOX_ANY we need to push a different */
14022                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14023                 /* for the intermediate pointer which we then transfer onto the OBJ */
14024                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14025                 if (tiVerificationNeeded)
14026                 {
14027                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14028                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14029
14030                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14031                     Verify(tiRetVal.IsValueClass(), "not value class");
14032                     tiRetVal.MakeByRef();
14033
14034                     // We always come from an objref, so this is safe byref
14035                     tiRetVal.SetIsPermanentHomeByRef();
14036                     tiRetVal.SetIsReadonlyByRef();
14037                 }
14038
14039                 op1 = impPopStack().val;
14040                 assertImp(op1->gtType == TYP_REF);
14041
14042                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14043                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14044
14045                 // We only want to expand inline the normal UNBOX helper;
14046                 expandInline = (helper == CORINFO_HELP_UNBOX);
14047
14048                 if (expandInline)
14049                 {
14050                     if (compCurBB->isRunRarely())
14051                     {
14052                         expandInline = false; // not worth the code expansion
14053                     }
14054                 }
14055
14056                 if (expandInline)
14057                 {
14058                     // we are doing normal unboxing
14059                     // inline the common case of the unbox helper
14060                     // UNBOX(exp) morphs into
14061                     // clone = pop(exp);
14062                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14063                     // push(clone + sizeof(void*))
14064                     //
14065                     GenTreePtr cloneOperand;
14066                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14067                                        nullptr DEBUGARG("inline UNBOX clone1"));
14068                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14069
14070                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14071
14072                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14073                                        nullptr DEBUGARG("inline UNBOX clone2"));
14074                     op2 = impTokenToHandle(&resolvedToken);
14075                     if (op2 == nullptr)
14076                     { // compDonotInline()
14077                         return;
14078                     }
14079                     args = gtNewArgList(op2, op1);
14080                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14081
14082                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14083                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14084                     condBox->gtFlags |= GTF_RELOP_QMARK;
14085
14086                     // QMARK nodes cannot reside on the evaluation stack. Because there
14087                     // may be other trees on the evaluation stack that side-effect the
14088                     // sources of the UNBOX operation we must spill the stack.
14089
14090                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14091
14092                     // Create the address-expression to reference past the object header
14093                     // to the beginning of the value-type. Today this means adjusting
14094                     // past the base of the objects vtable field which is pointer sized.
14095
14096                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14097                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14098                 }
14099                 else
14100                 {
14101                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14102
14103                     // Don't optimize, just call the helper and be done with it
14104                     args = gtNewArgList(op2, op1);
14105                     op1  = gtNewHelperCallNode(helper,
14106                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14107                                               callFlags, args);
14108                 }
14109
14110                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14111                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14112                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14113                        );
14114
14115                 /*
14116                   ----------------------------------------------------------------------
14117                   | \ helper  |                         |                              |
14118                   |   \       |                         |                              |
14119                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14120                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14121                   | opcode  \ |                         |                              |
14122                   |---------------------------------------------------------------------
14123                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14124                   |           |                         | push the BYREF to this local |
14125                   |---------------------------------------------------------------------
14126                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14127                   |           | the BYREF               | For Linux when the           |
14128                   |           |                         |  struct is returned in two   |
14129                   |           |                         |  registers create a temp     |
14130                   |           |                         |  which address is passed to  |
14131                   |           |                         |  the unbox_nullable helper.  |
14132                   |---------------------------------------------------------------------
14133                 */
14134
14135                 if (opcode == CEE_UNBOX)
14136                 {
14137                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14138                     {
14139                         // Unbox nullable helper returns a struct type.
14140                         // We need to spill it to a temp so than can take the address of it.
14141                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14142                         // further along and potetially be exploitable.
14143
14144                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14145                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14146
14147                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14148                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14149                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14150
14151                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14152                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14153                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14154                     }
14155
14156                     assert(op1->gtType == TYP_BYREF);
14157                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14158                 }
14159                 else
14160                 {
14161                     assert(opcode == CEE_UNBOX_ANY);
14162
14163                     if (helper == CORINFO_HELP_UNBOX)
14164                     {
14165                         // Normal unbox helper returns a TYP_BYREF.
14166                         impPushOnStack(op1, tiRetVal);
14167                         oper = GT_OBJ;
14168                         goto OBJ;
14169                     }
14170
14171                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14172
14173 #if FEATURE_MULTIREG_RET
14174
14175                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14176                     {
14177                         // Unbox nullable helper returns a TYP_STRUCT.
14178                         // For the multi-reg case we need to spill it to a temp so that
14179                         // we can pass the address to the unbox_nullable jit helper.
14180
14181                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14182                         lvaTable[tmp].lvIsMultiRegArg = true;
14183                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14184
14185                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14186                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14187                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14188
14189                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14190                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14191                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14192
14193                         // In this case the return value of the unbox helper is TYP_BYREF.
14194                         // Make sure the right type is placed on the operand type stack.
14195                         impPushOnStack(op1, tiRetVal);
14196
14197                         // Load the struct.
14198                         oper = GT_OBJ;
14199
14200                         assert(op1->gtType == TYP_BYREF);
14201                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14202
14203                         goto OBJ;
14204                     }
14205                     else
14206
14207 #endif // !FEATURE_MULTIREG_RET
14208
14209                     {
14210                         // If non register passable struct we have it materialized in the RetBuf.
14211                         assert(op1->gtType == TYP_STRUCT);
14212                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14213                         assert(tiRetVal.IsValueClass());
14214                     }
14215                 }
14216
14217                 impPushOnStack(op1, tiRetVal);
14218             }
14219             break;
14220
14221             case CEE_BOX:
14222             {
14223                 /* Get the Class index */
14224                 assertImp(sz == sizeof(unsigned));
14225
14226                 _impResolveToken(CORINFO_TOKENKIND_Box);
14227
14228                 JITDUMP(" %08X", resolvedToken.token);
14229
14230                 if (tiVerificationNeeded)
14231                 {
14232                     typeInfo tiActual = impStackTop().seTypeInfo;
14233                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14234
14235                     Verify(verIsBoxable(tiBox), "boxable type expected");
14236
14237                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14238                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14239                            "boxed type has unsatisfied class constraints");
14240
14241                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14242
14243                     // Observation: the following code introduces a boxed value class on the stack, but,
14244                     // according to the ECMA spec, one would simply expect: tiRetVal =
14245                     // typeInfo(TI_REF,impGetObjectClass());
14246
14247                     // Push the result back on the stack,
14248                     // even if clsHnd is a value class we want the TI_REF
14249                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14250                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14251                 }
14252
14253                 accessAllowedResult =
14254                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14255                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14256
14257                 // Note BOX can be used on things that are not value classes, in which
14258                 // case we get a NOP.  However the verifier's view of the type on the
14259                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14260                 if (!eeIsValueClass(resolvedToken.hClass))
14261                 {
14262                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14263                     break;
14264                 }
14265
14266                 // Look ahead for unbox.any
14267                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14268                 {
14269                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14270                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14271                     {
14272                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14273
14274                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14275
14276                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14277                         {
14278                             // Skip the next unbox.any instruction
14279                             sz += sizeof(mdToken) + 1;
14280                             break;
14281                         }
14282                     }
14283                 }
14284
14285                 impImportAndPushBox(&resolvedToken);
14286                 if (compDonotInline())
14287                 {
14288                     return;
14289                 }
14290             }
14291             break;
14292
14293             case CEE_SIZEOF:
14294
14295                 /* Get the Class index */
14296                 assertImp(sz == sizeof(unsigned));
14297
14298                 _impResolveToken(CORINFO_TOKENKIND_Class);
14299
14300                 JITDUMP(" %08X", resolvedToken.token);
14301
14302                 if (tiVerificationNeeded)
14303                 {
14304                     tiRetVal = typeInfo(TI_INT);
14305                 }
14306
14307                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14308                 impPushOnStack(op1, tiRetVal);
14309                 break;
14310
14311             case CEE_CASTCLASS:
14312
14313                 /* Get the Class index */
14314
14315                 assertImp(sz == sizeof(unsigned));
14316
14317                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14318
14319                 JITDUMP(" %08X", resolvedToken.token);
14320
14321                 if (!opts.IsReadyToRun())
14322                 {
14323                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14324                     if (op2 == nullptr)
14325                     { // compDonotInline()
14326                         return;
14327                     }
14328                 }
14329
14330                 if (tiVerificationNeeded)
14331                 {
14332                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14333                     // box it
14334                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14335                 }
14336
14337                 accessAllowedResult =
14338                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14339                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14340
14341                 op1 = impPopStack().val;
14342
14343             /* Pop the address and create the 'checked cast' helper call */
14344
14345             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14346             // and op2 to contain code that creates the type handle corresponding to typeRef
14347             CASTCLASS:
14348
14349 #ifdef FEATURE_READYTORUN_COMPILER
14350                 if (opts.IsReadyToRun())
14351                 {
14352                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14353                                                                     TYP_REF, gtNewArgList(op1));
14354                     usingReadyToRunHelper = (opLookup != nullptr);
14355                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14356
14357                     if (!usingReadyToRunHelper)
14358                     {
14359                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14360                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14361                         //      1) Load the context
14362                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14363                         //      3) Check the object on the stack for the type-cast
14364                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14365
14366                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14367                         if (op2 == nullptr)
14368                         { // compDonotInline()
14369                             return;
14370                         }
14371                     }
14372                 }
14373
14374                 if (!usingReadyToRunHelper)
14375 #endif
14376                 {
14377                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14378                 }
14379                 if (compDonotInline())
14380                 {
14381                     return;
14382                 }
14383
14384                 /* Push the result back on the stack */
14385                 impPushOnStack(op1, tiRetVal);
14386                 break;
14387
14388             case CEE_THROW:
14389
14390                 if (compIsForInlining())
14391                 {
14392                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14393                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14394                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14395
14396                     /* Do we have just the exception on the stack ?*/
14397
14398                     if (verCurrentState.esStackDepth != 1)
14399                     {
14400                         /* if not, just don't inline the method */
14401
14402                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14403                         return;
14404                     }
14405                 }
14406
14407                 if (tiVerificationNeeded)
14408                 {
14409                     tiRetVal = impStackTop().seTypeInfo;
14410                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14411                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14412                     {
14413                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14414                     }
14415                 }
14416
14417                 block->bbSetRunRarely(); // any block with a throw is rare
14418                 /* Pop the exception object and create the 'throw' helper call */
14419
14420                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14421
14422             EVAL_APPEND:
14423                 if (verCurrentState.esStackDepth > 0)
14424                 {
14425                     impEvalSideEffects();
14426                 }
14427
14428                 assert(verCurrentState.esStackDepth == 0);
14429
14430                 goto APPEND;
14431
14432             case CEE_RETHROW:
14433
14434                 assert(!compIsForInlining());
14435
14436                 if (info.compXcptnsCount == 0)
14437                 {
14438                     BADCODE("rethrow outside catch");
14439                 }
14440
14441                 if (tiVerificationNeeded)
14442                 {
14443                     Verify(block->hasHndIndex(), "rethrow outside catch");
14444                     if (block->hasHndIndex())
14445                     {
14446                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14447                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14448                         if (HBtab->HasFilter())
14449                         {
14450                             // we better be in the handler clause part, not the filter part
14451                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14452                                    "rethrow in filter");
14453                         }
14454                     }
14455                 }
14456
14457                 /* Create the 'rethrow' helper call */
14458
14459                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14460
14461                 goto EVAL_APPEND;
14462
14463             case CEE_INITOBJ:
14464
14465                 assertImp(sz == sizeof(unsigned));
14466
14467                 _impResolveToken(CORINFO_TOKENKIND_Class);
14468
14469                 JITDUMP(" %08X", resolvedToken.token);
14470
14471                 if (tiVerificationNeeded)
14472                 {
14473                     typeInfo tiTo    = impStackTop().seTypeInfo;
14474                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14475
14476                     Verify(tiTo.IsByRef(), "byref expected");
14477                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14478
14479                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14480                            "type operand incompatible with type of address");
14481                 }
14482
14483                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14484                 op2  = gtNewIconNode(0);                                     // Value
14485                 op1  = impPopStack().val;                                    // Dest
14486                 op1  = gtNewBlockVal(op1, size);
14487                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14488                 goto SPILL_APPEND;
14489
14490             case CEE_INITBLK:
14491
14492                 if (tiVerificationNeeded)
14493                 {
14494                     Verify(false, "bad opcode");
14495                 }
14496
14497                 op3 = impPopStack().val; // Size
14498                 op2 = impPopStack().val; // Value
14499                 op1 = impPopStack().val; // Dest
14500
14501                 if (op3->IsCnsIntOrI())
14502                 {
14503                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14504                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14505                 }
14506                 else
14507                 {
14508                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14509                     size = 0;
14510                 }
14511                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14512
14513                 goto SPILL_APPEND;
14514
14515             case CEE_CPBLK:
14516
14517                 if (tiVerificationNeeded)
14518                 {
14519                     Verify(false, "bad opcode");
14520                 }
14521                 op3 = impPopStack().val; // Size
14522                 op2 = impPopStack().val; // Src
14523                 op1 = impPopStack().val; // Dest
14524
14525                 if (op3->IsCnsIntOrI())
14526                 {
14527                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14528                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14529                 }
14530                 else
14531                 {
14532                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14533                     size = 0;
14534                 }
14535                 if (op2->OperGet() == GT_ADDR)
14536                 {
14537                     op2 = op2->gtOp.gtOp1;
14538                 }
14539                 else
14540                 {
14541                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14542                 }
14543
14544                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14545                 goto SPILL_APPEND;
14546
14547             case CEE_CPOBJ:
14548
14549                 assertImp(sz == sizeof(unsigned));
14550
14551                 _impResolveToken(CORINFO_TOKENKIND_Class);
14552
14553                 JITDUMP(" %08X", resolvedToken.token);
14554
14555                 if (tiVerificationNeeded)
14556                 {
14557                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14558                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14559                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14560
14561                     Verify(tiFrom.IsByRef(), "expected byref source");
14562                     Verify(tiTo.IsByRef(), "expected byref destination");
14563
14564                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14565                            "type of source address incompatible with type operand");
14566                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14567                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14568                            "type operand incompatible with type of destination address");
14569                 }
14570
14571                 if (!eeIsValueClass(resolvedToken.hClass))
14572                 {
14573                     op1 = impPopStack().val; // address to load from
14574
14575                     impBashVarAddrsToI(op1);
14576
14577                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14578
14579                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14580                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14581
14582                     impPushOnStackNoType(op1);
14583                     opcode = CEE_STIND_REF;
14584                     lclTyp = TYP_REF;
14585                     goto STIND_POST_VERIFY;
14586                 }
14587
14588                 op2 = impPopStack().val; // Src
14589                 op1 = impPopStack().val; // Dest
14590                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14591                 goto SPILL_APPEND;
14592
14593             case CEE_STOBJ:
14594             {
14595                 assertImp(sz == sizeof(unsigned));
14596
14597                 _impResolveToken(CORINFO_TOKENKIND_Class);
14598
14599                 JITDUMP(" %08X", resolvedToken.token);
14600
14601                 if (eeIsValueClass(resolvedToken.hClass))
14602                 {
14603                     lclTyp = TYP_STRUCT;
14604                 }
14605                 else
14606                 {
14607                     lclTyp = TYP_REF;
14608                 }
14609
14610                 if (tiVerificationNeeded)
14611                 {
14612
14613                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14614
14615                     // Make sure we have a good looking byref
14616                     Verify(tiPtr.IsByRef(), "pointer not byref");
14617                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14618                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14619                     {
14620                         compUnsafeCastUsed = true;
14621                     }
14622
14623                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14624                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14625
14626                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14627                     {
14628                         Verify(false, "type of value incompatible with type operand");
14629                         compUnsafeCastUsed = true;
14630                     }
14631
14632                     if (!tiCompatibleWith(argVal, ptrVal, false))
14633                     {
14634                         Verify(false, "type operand incompatible with type of address");
14635                         compUnsafeCastUsed = true;
14636                     }
14637                 }
14638                 else
14639                 {
14640                     compUnsafeCastUsed = true;
14641                 }
14642
14643                 if (lclTyp == TYP_REF)
14644                 {
14645                     opcode = CEE_STIND_REF;
14646                     goto STIND_POST_VERIFY;
14647                 }
14648
14649                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14650                 if (impIsPrimitive(jitTyp))
14651                 {
14652                     lclTyp = JITtype2varType(jitTyp);
14653                     goto STIND_POST_VERIFY;
14654                 }
14655
14656                 op2 = impPopStack().val; // Value
14657                 op1 = impPopStack().val; // Ptr
14658
14659                 assertImp(varTypeIsStruct(op2));
14660
14661                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14662                 goto SPILL_APPEND;
14663             }
14664
14665             case CEE_MKREFANY:
14666
14667                 assert(!compIsForInlining());
14668
14669                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14670                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14671
14672                 JITDUMP("disabling struct promotion because of mkrefany\n");
14673                 fgNoStructPromotion = true;
14674
14675                 oper = GT_MKREFANY;
14676                 assertImp(sz == sizeof(unsigned));
14677
14678                 _impResolveToken(CORINFO_TOKENKIND_Class);
14679
14680                 JITDUMP(" %08X", resolvedToken.token);
14681
14682                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14683                 if (op2 == nullptr)
14684                 { // compDonotInline()
14685                     return;
14686                 }
14687
14688                 if (tiVerificationNeeded)
14689                 {
14690                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14691                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14692
14693                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14694                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14695                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14696                 }
14697
14698                 accessAllowedResult =
14699                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14700                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14701
14702                 op1 = impPopStack().val;
14703
14704                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14705                 // But JIT32 allowed it, so we continue to allow it.
14706                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14707
14708                 // MKREFANY returns a struct.  op2 is the class token.
14709                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14710
14711                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14712                 break;
14713
14714             case CEE_LDOBJ:
14715             {
14716                 oper = GT_OBJ;
14717                 assertImp(sz == sizeof(unsigned));
14718
14719                 _impResolveToken(CORINFO_TOKENKIND_Class);
14720
14721                 JITDUMP(" %08X", resolvedToken.token);
14722
14723             OBJ:
14724
14725                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14726
14727                 if (tiVerificationNeeded)
14728                 {
14729                     typeInfo tiPtr = impStackTop().seTypeInfo;
14730
14731                     // Make sure we have a byref
14732                     if (!tiPtr.IsByRef())
14733                     {
14734                         Verify(false, "pointer not byref");
14735                         compUnsafeCastUsed = true;
14736                     }
14737                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14738
14739                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14740                     {
14741                         Verify(false, "type of address incompatible with type operand");
14742                         compUnsafeCastUsed = true;
14743                     }
14744                     tiRetVal.NormaliseForStack();
14745                 }
14746                 else
14747                 {
14748                     compUnsafeCastUsed = true;
14749                 }
14750
14751                 if (eeIsValueClass(resolvedToken.hClass))
14752                 {
14753                     lclTyp = TYP_STRUCT;
14754                 }
14755                 else
14756                 {
14757                     lclTyp = TYP_REF;
14758                     opcode = CEE_LDIND_REF;
14759                     goto LDIND_POST_VERIFY;
14760                 }
14761
14762                 op1 = impPopStack().val;
14763
14764                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14765
14766                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14767                 if (impIsPrimitive(jitTyp))
14768                 {
14769                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14770
14771                     // Could point anywhere, example a boxed class static int
14772                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14773                     assertImp(varTypeIsArithmetic(op1->gtType));
14774                 }
14775                 else
14776                 {
14777                     // OBJ returns a struct
14778                     // and an inline argument which is the class token of the loaded obj
14779                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14780                 }
14781                 op1->gtFlags |= GTF_EXCEPT;
14782
14783                 impPushOnStack(op1, tiRetVal);
14784                 break;
14785             }
14786
14787             case CEE_LDLEN:
14788                 if (tiVerificationNeeded)
14789                 {
14790                     typeInfo tiArray = impStackTop().seTypeInfo;
14791                     Verify(verIsSDArray(tiArray), "bad array");
14792                     tiRetVal = typeInfo(TI_INT);
14793                 }
14794
14795                 op1 = impPopStack().val;
14796                 if (!opts.MinOpts() && !opts.compDbgCode)
14797                 {
14798                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14799                     GenTreeArrLen* arrLen =
14800                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14801
14802                     /* Mark the block as containing a length expression */
14803
14804                     if (op1->gtOper == GT_LCL_VAR)
14805                     {
14806                         block->bbFlags |= BBF_HAS_IDX_LEN;
14807                     }
14808
14809                     op1 = arrLen;
14810                 }
14811                 else
14812                 {
14813                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14814                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14815                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14816                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14817                     op1->gtFlags |= GTF_IND_ARR_LEN;
14818                 }
14819
14820                 /* An indirection will cause a GPF if the address is null */
14821                 op1->gtFlags |= GTF_EXCEPT;
14822
14823                 /* Push the result back on the stack */
14824                 impPushOnStack(op1, tiRetVal);
14825                 break;
14826
14827             case CEE_BREAK:
14828                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14829                 goto SPILL_APPEND;
14830
14831             case CEE_NOP:
14832                 if (opts.compDbgCode)
14833                 {
14834                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14835                     goto SPILL_APPEND;
14836                 }
14837                 break;
14838
14839             /******************************** NYI *******************************/
14840
14841             case 0xCC:
14842                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14843
14844             case CEE_ILLEGAL:
14845             case CEE_MACRO_END:
14846
14847             default:
14848                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14849         }
14850
14851         codeAddr += sz;
14852         prevOpcode = opcode;
14853
14854         prefixFlags = 0;
14855         assert(!insertLdloc || opcode == CEE_DUP);
14856     }
14857
14858     assert(!insertLdloc);
14859
14860     return;
14861 #undef _impResolveToken
14862 }
14863 #ifdef _PREFAST_
14864 #pragma warning(pop)
14865 #endif
14866
14867 // Push a local/argument treeon the operand stack
14868 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14869 {
14870     tiRetVal.NormaliseForStack();
14871
14872     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14873     {
14874         tiRetVal.SetUninitialisedObjRef();
14875     }
14876
14877     impPushOnStack(op, tiRetVal);
14878 }
14879
14880 // Load a local/argument on the operand stack
14881 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14882 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14883 {
14884     var_types lclTyp;
14885
14886     if (lvaTable[lclNum].lvNormalizeOnLoad())
14887     {
14888         lclTyp = lvaGetRealType(lclNum);
14889     }
14890     else
14891     {
14892         lclTyp = lvaGetActualType(lclNum);
14893     }
14894
14895     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14896 }
14897
14898 // Load an argument on the operand stack
14899 // Shared by the various CEE_LDARG opcodes
14900 // ilArgNum is the argument index as specified in IL.
14901 // It will be mapped to the correct lvaTable index
14902 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14903 {
14904     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14905
14906     if (compIsForInlining())
14907     {
14908         if (ilArgNum >= info.compArgsCount)
14909         {
14910             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14911             return;
14912         }
14913
14914         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14915                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14916     }
14917     else
14918     {
14919         if (ilArgNum >= info.compArgsCount)
14920         {
14921             BADCODE("Bad IL");
14922         }
14923
14924         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14925
14926         if (lclNum == info.compThisArg)
14927         {
14928             lclNum = lvaArg0Var;
14929         }
14930
14931         impLoadVar(lclNum, offset);
14932     }
14933 }
14934
14935 // Load a local on the operand stack
14936 // Shared by the various CEE_LDLOC opcodes
14937 // ilLclNum is the local index as specified in IL.
14938 // It will be mapped to the correct lvaTable index
14939 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14940 {
14941     if (tiVerificationNeeded)
14942     {
14943         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14944         Verify(info.compInitMem, "initLocals not set");
14945     }
14946
14947     if (compIsForInlining())
14948     {
14949         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14950         {
14951             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14952             return;
14953         }
14954
14955         // Get the local type
14956         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14957
14958         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14959
14960         /* Have we allocated a temp for this local? */
14961
14962         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14963
14964         // All vars of inlined methods should be !lvNormalizeOnLoad()
14965
14966         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14967         lclTyp = genActualType(lclTyp);
14968
14969         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14970     }
14971     else
14972     {
14973         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14974         {
14975             BADCODE("Bad IL");
14976         }
14977
14978         unsigned lclNum = info.compArgsCount + ilLclNum;
14979
14980         impLoadVar(lclNum, offset);
14981     }
14982 }
14983
14984 #ifdef _TARGET_ARM_
14985 /**************************************************************************************
14986  *
14987  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14988  *  dst struct, because struct promotion will turn it into a float/double variable while
14989  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14990  *  a float, but there is nothing that might prevent us from doing so. The tree however
14991  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14992  *
14993  *  tmpNum - the lcl dst variable num that is a struct.
14994  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14995  *  hClass - the type handle for the struct variable.
14996  *
14997  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14998  *        however, we could do a codegen of transferring from int to float registers
14999  *        (transfer, not a cast.)
15000  *
15001  */
15002 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15003 {
15004     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15005     {
15006         int       hfaSlots = GetHfaCount(hClass);
15007         var_types hfaType  = GetHfaType(hClass);
15008
15009         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15010         // type: struct/float at importer because the ABI calls out return in integer registers.
15011         // We don't want struct promotion to replace an expression like this:
15012         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15013         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15014         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15015             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15016         {
15017             // Make sure this struct type stays as struct so we can receive the call in a struct.
15018             lvaTable[tmpNum].lvIsMultiRegRet = true;
15019         }
15020     }
15021 }
15022 #endif // _TARGET_ARM_
15023
15024 #if FEATURE_MULTIREG_RET
15025 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15026 {
15027     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15028     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
15029     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15030
15031     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15032     ret->gtFlags |= GTF_DONT_CSE;
15033
15034     assert(IsMultiRegReturnedType(hClass));
15035
15036     // Mark the var so that fields are not promoted and stay together.
15037     lvaTable[tmpNum].lvIsMultiRegRet = true;
15038
15039     return ret;
15040 }
15041 #endif // FEATURE_MULTIREG_RET
15042
15043 // do import for a return
15044 // returns false if inlining was aborted
15045 // opcode can be ret or call in the case of a tail.call
15046 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15047 {
15048     if (tiVerificationNeeded)
15049     {
15050         verVerifyThisPtrInitialised();
15051
15052         unsigned expectedStack = 0;
15053         if (info.compRetType != TYP_VOID)
15054         {
15055             typeInfo tiVal = impStackTop().seTypeInfo;
15056             typeInfo tiDeclared =
15057                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15058
15059             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15060
15061             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15062             expectedStack = 1;
15063         }
15064         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15065     }
15066
15067     GenTree*             op2       = nullptr;
15068     GenTree*             op1       = nullptr;
15069     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15070
15071     if (info.compRetType != TYP_VOID)
15072     {
15073         StackEntry se = impPopStack(retClsHnd);
15074         op2           = se.val;
15075
15076         if (!compIsForInlining())
15077         {
15078             impBashVarAddrsToI(op2);
15079             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15080             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15081             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15082                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15083                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15084                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15085                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15086
15087 #ifdef DEBUG
15088             if (opts.compGcChecks && info.compRetType == TYP_REF)
15089             {
15090                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15091                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15092                 // one-return BB.
15093
15094                 assert(op2->gtType == TYP_REF);
15095
15096                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15097                 GenTreeArgList* args = gtNewArgList(op2);
15098                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15099
15100                 if (verbose)
15101                 {
15102                     printf("\ncompGcChecks tree:\n");
15103                     gtDispTree(op2);
15104                 }
15105             }
15106 #endif
15107         }
15108         else
15109         {
15110             // inlinee's stack should be empty now.
15111             assert(verCurrentState.esStackDepth == 0);
15112
15113 #ifdef DEBUG
15114             if (verbose)
15115             {
15116                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15117                 gtDispTree(op2);
15118             }
15119 #endif
15120
15121             // Make sure the type matches the original call.
15122
15123             var_types returnType       = genActualType(op2->gtType);
15124             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15125             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15126             {
15127                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15128             }
15129
15130             if (returnType != originalCallType)
15131             {
15132                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15133                 return false;
15134             }
15135
15136             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15137             // expression. At this point, retExpr could already be set if there are multiple
15138             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15139             // the other blocks already set it. If there is only a single return block,
15140             // retExpr shouldn't be set. However, this is not true if we reimport a block
15141             // with a return. In that case, retExpr will be set, then the block will be
15142             // reimported, but retExpr won't get cleared as part of setting the block to
15143             // be reimported. The reimported retExpr value should be the same, so even if
15144             // we don't unconditionally overwrite it, it shouldn't matter.
15145             if (info.compRetNativeType != TYP_STRUCT)
15146             {
15147                 // compRetNativeType is not TYP_STRUCT.
15148                 // This implies it could be either a scalar type or SIMD vector type or
15149                 // a struct type that can be normalized to a scalar type.
15150
15151                 if (varTypeIsStruct(info.compRetType))
15152                 {
15153                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15154                     // adjust the type away from struct to integral
15155                     // and no normalizing
15156                     op2 = impFixupStructReturnType(op2, retClsHnd);
15157                 }
15158                 else
15159                 {
15160                     // Do we have to normalize?
15161                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15162                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15163                         fgCastNeeded(op2, fncRealRetType))
15164                     {
15165                         // Small-typed return values are normalized by the callee
15166                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15167                     }
15168                 }
15169
15170                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15171                 {
15172                     assert(info.compRetNativeType != TYP_VOID &&
15173                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15174
15175                     // This is a bit of a workaround...
15176                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15177                     // not a struct (for example, the struct is composed of exactly one int, and the native
15178                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15179                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15180                     // to the *native* return type), and at least one of the return blocks is the result of
15181                     // a call, then we have a problem. The situation is like this (from a failed test case):
15182                     //
15183                     // inliner:
15184                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15185                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15186                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15187                     //
15188                     // inlinee:
15189                     //      ...
15190                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15191                     //      ret
15192                     //      ...
15193                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15194                     //      object&, class System.Func`1<!!0>)
15195                     //      ret
15196                     //
15197                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15198                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15199                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15200                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15201                     //
15202                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15203                     // native return type, which is what it will be set to eventually. We generate the
15204                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15205                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15206
15207                     bool restoreType = false;
15208                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15209                     {
15210                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15211                         op2->gtType = info.compRetNativeType;
15212                         restoreType = true;
15213                     }
15214
15215                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15216                                      (unsigned)CHECK_SPILL_ALL);
15217
15218                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15219
15220                     if (restoreType)
15221                     {
15222                         op2->gtType = TYP_STRUCT; // restore it to what it was
15223                     }
15224
15225                     op2 = tmpOp2;
15226
15227 #ifdef DEBUG
15228                     if (impInlineInfo->retExpr)
15229                     {
15230                         // Some other block(s) have seen the CEE_RET first.
15231                         // Better they spilled to the same temp.
15232                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15233                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15234                     }
15235 #endif
15236                 }
15237
15238 #ifdef DEBUG
15239                 if (verbose)
15240                 {
15241                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15242                     gtDispTree(op2);
15243                 }
15244 #endif
15245
15246                 // Report the return expression
15247                 impInlineInfo->retExpr = op2;
15248             }
15249             else
15250             {
15251                 // compRetNativeType is TYP_STRUCT.
15252                 // This implies that struct return via RetBuf arg or multi-reg struct return
15253
15254                 GenTreePtr iciCall = impInlineInfo->iciCall;
15255                 assert(iciCall->gtOper == GT_CALL);
15256
15257                 // Assign the inlinee return into a spill temp.
15258                 // spill temp only exists if there are multiple return points
15259                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15260                 {
15261                     // in this case we have to insert multiple struct copies to the temp
15262                     // and the retexpr is just the temp.
15263                     assert(info.compRetNativeType != TYP_VOID);
15264                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15265
15266                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15267                                      (unsigned)CHECK_SPILL_ALL);
15268                 }
15269
15270 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15271 #if defined(_TARGET_ARM_)
15272                 // TODO-ARM64-NYI: HFA
15273                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15274                 // next ifdefs could be refactored in a single method with the ifdef inside.
15275                 if (IsHfa(retClsHnd))
15276                 {
15277 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15278 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15279                 ReturnTypeDesc retTypeDesc;
15280                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15281                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15282
15283                 if (retRegCount != 0)
15284                 {
15285                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15286                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15287                     // max allowed.)
15288                     assert(retRegCount == MAX_RET_REG_COUNT);
15289                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15290                     CLANG_FORMAT_COMMENT_ANCHOR;
15291 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15292
15293                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15294                     {
15295                         if (!impInlineInfo->retExpr)
15296                         {
15297 #if defined(_TARGET_ARM_)
15298                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15299 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15300                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15301                             impInlineInfo->retExpr =
15302                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15303 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15304                         }
15305                     }
15306                     else
15307                     {
15308                         impInlineInfo->retExpr = op2;
15309                     }
15310                 }
15311                 else
15312 #elif defined(_TARGET_ARM64_)
15313                 ReturnTypeDesc retTypeDesc;
15314                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15315                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15316
15317                 if (retRegCount != 0)
15318                 {
15319                     assert(!iciCall->AsCall()->HasRetBufArg());
15320                     assert(retRegCount >= 2);
15321                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15322                     {
15323                         if (!impInlineInfo->retExpr)
15324                         {
15325                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15326                             impInlineInfo->retExpr =
15327                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15328                         }
15329                     }
15330                     else
15331                     {
15332                         impInlineInfo->retExpr = op2;
15333                     }
15334                 }
15335                 else
15336 #endif // defined(_TARGET_ARM64_)
15337                 {
15338                     assert(iciCall->AsCall()->HasRetBufArg());
15339                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15340                     // spill temp only exists if there are multiple return points
15341                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15342                     {
15343                         // if this is the first return we have seen set the retExpr
15344                         if (!impInlineInfo->retExpr)
15345                         {
15346                             impInlineInfo->retExpr =
15347                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15348                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15349                         }
15350                     }
15351                     else
15352                     {
15353                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15354                     }
15355                 }
15356             }
15357         }
15358     }
15359
15360     if (compIsForInlining())
15361     {
15362         return true;
15363     }
15364
15365     if (info.compRetType == TYP_VOID)
15366     {
15367         // return void
15368         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15369     }
15370     else if (info.compRetBuffArg != BAD_VAR_NUM)
15371     {
15372         // Assign value to return buff (first param)
15373         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15374
15375         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15376         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15377
15378         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15379         CLANG_FORMAT_COMMENT_ANCHOR;
15380
15381 #if defined(_TARGET_AMD64_)
15382
15383         // x64 (System V and Win64) calling convention requires to
15384         // return the implicit return buffer explicitly (in RAX).
15385         // Change the return type to be BYREF.
15386         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15387 #else  // !defined(_TARGET_AMD64_)
15388         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15389         // In such case the return value of the function is changed to BYREF.
15390         // If profiler hook is not needed the return type of the function is TYP_VOID.
15391         if (compIsProfilerHookNeeded())
15392         {
15393             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15394         }
15395         else
15396         {
15397             // return void
15398             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15399         }
15400 #endif // !defined(_TARGET_AMD64_)
15401     }
15402     else if (varTypeIsStruct(info.compRetType))
15403     {
15404 #if !FEATURE_MULTIREG_RET
15405         // For both ARM architectures the HFA native types are maintained as structs.
15406         // Also on System V AMD64 the multireg structs returns are also left as structs.
15407         noway_assert(info.compRetNativeType != TYP_STRUCT);
15408 #endif
15409         op2 = impFixupStructReturnType(op2, retClsHnd);
15410         // return op2
15411         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15412     }
15413     else
15414     {
15415         // return op2
15416         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15417     }
15418
15419     // We must have imported a tailcall and jumped to RET
15420     if (prefixFlags & PREFIX_TAILCALL)
15421     {
15422 #ifndef _TARGET_AMD64_
15423         // Jit64 compat:
15424         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15425         //      tail.call
15426         //      pop
15427         //      ret
15428         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15429 #endif
15430
15431         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15432
15433         // impImportCall() would have already appended TYP_VOID calls
15434         if (info.compRetType == TYP_VOID)
15435         {
15436             return true;
15437         }
15438     }
15439
15440     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15441 #ifdef DEBUG
15442     // Remember at which BC offset the tree was finished
15443     impNoteLastILoffs();
15444 #endif
15445     return true;
15446 }
15447
15448 /*****************************************************************************
15449  *  Mark the block as unimported.
15450  *  Note that the caller is responsible for calling impImportBlockPending(),
15451  *  with the appropriate stack-state
15452  */
15453
15454 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15455 {
15456 #ifdef DEBUG
15457     if (verbose && (block->bbFlags & BBF_IMPORTED))
15458     {
15459         printf("\nBB%02u will be reimported\n", block->bbNum);
15460     }
15461 #endif
15462
15463     block->bbFlags &= ~BBF_IMPORTED;
15464 }
15465
15466 /*****************************************************************************
15467  *  Mark the successors of the given block as unimported.
15468  *  Note that the caller is responsible for calling impImportBlockPending()
15469  *  for all the successors, with the appropriate stack-state.
15470  */
15471
15472 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15473 {
15474     for (unsigned i = 0; i < block->NumSucc(); i++)
15475     {
15476         impReimportMarkBlock(block->GetSucc(i));
15477     }
15478 }
15479
15480 /*****************************************************************************
15481  *
15482  *  Filter wrapper to handle only passed in exception code
15483  *  from it).
15484  */
15485
15486 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15487 {
15488     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15489     {
15490         return EXCEPTION_EXECUTE_HANDLER;
15491     }
15492
15493     return EXCEPTION_CONTINUE_SEARCH;
15494 }
15495
15496 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15497 {
15498     assert(block->hasTryIndex());
15499     assert(!compIsForInlining());
15500
15501     unsigned  tryIndex = block->getTryIndex();
15502     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15503
15504     if (isTryStart)
15505     {
15506         assert(block->bbFlags & BBF_TRY_BEG);
15507
15508         // The Stack must be empty
15509         //
15510         if (block->bbStkDepth != 0)
15511         {
15512             BADCODE("Evaluation stack must be empty on entry into a try block");
15513         }
15514     }
15515
15516     // Save the stack contents, we'll need to restore it later
15517     //
15518     SavedStack blockState;
15519     impSaveStackState(&blockState, false);
15520
15521     while (HBtab != nullptr)
15522     {
15523         if (isTryStart)
15524         {
15525             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15526             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15527             //
15528             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15529             {
15530                 // We  trigger an invalid program exception here unless we have a try/fault region.
15531                 //
15532                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15533                 {
15534                     BADCODE(
15535                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15536                 }
15537                 else
15538                 {
15539                     // Allow a try/fault region to proceed.
15540                     assert(HBtab->HasFaultHandler());
15541                 }
15542             }
15543
15544             /* Recursively process the handler block */
15545             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15546
15547             //  Construct the proper verification stack state
15548             //   either empty or one that contains just
15549             //   the Exception Object that we are dealing with
15550             //
15551             verCurrentState.esStackDepth = 0;
15552
15553             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15554             {
15555                 CORINFO_CLASS_HANDLE clsHnd;
15556
15557                 if (HBtab->HasFilter())
15558                 {
15559                     clsHnd = impGetObjectClass();
15560                 }
15561                 else
15562                 {
15563                     CORINFO_RESOLVED_TOKEN resolvedToken;
15564
15565                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15566                     resolvedToken.tokenScope   = info.compScopeHnd;
15567                     resolvedToken.token        = HBtab->ebdTyp;
15568                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15569                     info.compCompHnd->resolveToken(&resolvedToken);
15570
15571                     clsHnd = resolvedToken.hClass;
15572                 }
15573
15574                 // push catch arg the stack, spill to a temp if necessary
15575                 // Note: can update HBtab->ebdHndBeg!
15576                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15577             }
15578
15579             // Queue up the handler for importing
15580             //
15581             impImportBlockPending(hndBegBB);
15582
15583             if (HBtab->HasFilter())
15584             {
15585                 /* @VERIFICATION : Ideally the end of filter state should get
15586                    propagated to the catch handler, this is an incompleteness,
15587                    but is not a security/compliance issue, since the only
15588                    interesting state is the 'thisInit' state.
15589                    */
15590
15591                 verCurrentState.esStackDepth = 0;
15592
15593                 BasicBlock* filterBB = HBtab->ebdFilter;
15594
15595                 // push catch arg the stack, spill to a temp if necessary
15596                 // Note: can update HBtab->ebdFilter!
15597                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15598
15599                 impImportBlockPending(filterBB);
15600             }
15601         }
15602         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15603         {
15604             /* Recursively process the handler block */
15605
15606             verCurrentState.esStackDepth = 0;
15607
15608             // Queue up the fault handler for importing
15609             //
15610             impImportBlockPending(HBtab->ebdHndBeg);
15611         }
15612
15613         // Now process our enclosing try index (if any)
15614         //
15615         tryIndex = HBtab->ebdEnclosingTryIndex;
15616         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15617         {
15618             HBtab = nullptr;
15619         }
15620         else
15621         {
15622             HBtab = ehGetDsc(tryIndex);
15623         }
15624     }
15625
15626     // Restore the stack contents
15627     impRestoreStackState(&blockState);
15628 }
15629
15630 //***************************************************************
15631 // Import the instructions for the given basic block.  Perform
15632 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15633 // time, or whose verification pre-state is changed.
15634
15635 #ifdef _PREFAST_
15636 #pragma warning(push)
15637 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15638 #endif
15639 void Compiler::impImportBlock(BasicBlock* block)
15640 {
15641     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15642     // handle them specially. In particular, there is no IL to import for them, but we do need
15643     // to mark them as imported and put their successors on the pending import list.
15644     if (block->bbFlags & BBF_INTERNAL)
15645     {
15646         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15647         block->bbFlags |= BBF_IMPORTED;
15648
15649         for (unsigned i = 0; i < block->NumSucc(); i++)
15650         {
15651             impImportBlockPending(block->GetSucc(i));
15652         }
15653
15654         return;
15655     }
15656
15657     bool markImport;
15658
15659     assert(block);
15660
15661     /* Make the block globaly available */
15662
15663     compCurBB = block;
15664
15665 #ifdef DEBUG
15666     /* Initialize the debug variables */
15667     impCurOpcName = "unknown";
15668     impCurOpcOffs = block->bbCodeOffs;
15669 #endif
15670
15671     /* Set the current stack state to the merged result */
15672     verResetCurrentState(block, &verCurrentState);
15673
15674     /* Now walk the code and import the IL into GenTrees */
15675
15676     struct FilterVerificationExceptionsParam
15677     {
15678         Compiler*   pThis;
15679         BasicBlock* block;
15680     };
15681     FilterVerificationExceptionsParam param;
15682
15683     param.pThis = this;
15684     param.block = block;
15685
15686     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15687     {
15688         /* @VERIFICATION : For now, the only state propagation from try
15689            to it's handler is "thisInit" state (stack is empty at start of try).
15690            In general, for state that we track in verification, we need to
15691            model the possibility that an exception might happen at any IL
15692            instruction, so we really need to merge all states that obtain
15693            between IL instructions in a try block into the start states of
15694            all handlers.
15695
15696            However we do not allow the 'this' pointer to be uninitialized when
15697            entering most kinds try regions (only try/fault are allowed to have
15698            an uninitialized this pointer on entry to the try)
15699
15700            Fortunately, the stack is thrown away when an exception
15701            leads to a handler, so we don't have to worry about that.
15702            We DO, however, have to worry about the "thisInit" state.
15703            But only for the try/fault case.
15704
15705            The only allowed transition is from TIS_Uninit to TIS_Init.
15706
15707            So for a try/fault region for the fault handler block
15708            we will merge the start state of the try begin
15709            and the post-state of each block that is part of this try region
15710         */
15711
15712         // merge the start state of the try begin
15713         //
15714         if (pParam->block->bbFlags & BBF_TRY_BEG)
15715         {
15716             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15717         }
15718
15719         pParam->pThis->impImportBlockCode(pParam->block);
15720
15721         // As discussed above:
15722         // merge the post-state of each block that is part of this try region
15723         //
15724         if (pParam->block->hasTryIndex())
15725         {
15726             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15727         }
15728     }
15729     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15730     {
15731         verHandleVerificationFailure(block DEBUGARG(false));
15732     }
15733     PAL_ENDTRY
15734
15735     if (compDonotInline())
15736     {
15737         return;
15738     }
15739
15740     assert(!compDonotInline());
15741
15742     markImport = false;
15743
15744 SPILLSTACK:
15745
15746     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15747     bool        reimportSpillClique = false;
15748     BasicBlock* tgtBlock            = nullptr;
15749
15750     /* If the stack is non-empty, we might have to spill its contents */
15751
15752     if (verCurrentState.esStackDepth != 0)
15753     {
15754         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15755                                   // on the stack, its lifetime is hard to determine, simply
15756                                   // don't reuse such temps.
15757
15758         GenTreePtr addStmt = nullptr;
15759
15760         /* Do the successors of 'block' have any other predecessors ?
15761            We do not want to do some of the optimizations related to multiRef
15762            if we can reimport blocks */
15763
15764         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15765
15766         switch (block->bbJumpKind)
15767         {
15768             case BBJ_COND:
15769
15770                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15771
15772                 assert(impTreeLast);
15773                 assert(impTreeLast->gtOper == GT_STMT);
15774                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15775
15776                 addStmt     = impTreeLast;
15777                 impTreeLast = impTreeLast->gtPrev;
15778
15779                 /* Note if the next block has more than one ancestor */
15780
15781                 multRef |= block->bbNext->bbRefs;
15782
15783                 /* Does the next block have temps assigned? */
15784
15785                 baseTmp  = block->bbNext->bbStkTempsIn;
15786                 tgtBlock = block->bbNext;
15787
15788                 if (baseTmp != NO_BASE_TMP)
15789                 {
15790                     break;
15791                 }
15792
15793                 /* Try the target of the jump then */
15794
15795                 multRef |= block->bbJumpDest->bbRefs;
15796                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15797                 tgtBlock = block->bbJumpDest;
15798                 break;
15799
15800             case BBJ_ALWAYS:
15801                 multRef |= block->bbJumpDest->bbRefs;
15802                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15803                 tgtBlock = block->bbJumpDest;
15804                 break;
15805
15806             case BBJ_NONE:
15807                 multRef |= block->bbNext->bbRefs;
15808                 baseTmp  = block->bbNext->bbStkTempsIn;
15809                 tgtBlock = block->bbNext;
15810                 break;
15811
15812             case BBJ_SWITCH:
15813
15814                 BasicBlock** jmpTab;
15815                 unsigned     jmpCnt;
15816
15817                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15818
15819                 assert(impTreeLast);
15820                 assert(impTreeLast->gtOper == GT_STMT);
15821                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15822
15823                 addStmt     = impTreeLast;
15824                 impTreeLast = impTreeLast->gtPrev;
15825
15826                 jmpCnt = block->bbJumpSwt->bbsCount;
15827                 jmpTab = block->bbJumpSwt->bbsDstTab;
15828
15829                 do
15830                 {
15831                     tgtBlock = (*jmpTab);
15832
15833                     multRef |= tgtBlock->bbRefs;
15834
15835                     // Thanks to spill cliques, we should have assigned all or none
15836                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15837                     baseTmp = tgtBlock->bbStkTempsIn;
15838                     if (multRef > 1)
15839                     {
15840                         break;
15841                     }
15842                 } while (++jmpTab, --jmpCnt);
15843
15844                 break;
15845
15846             case BBJ_CALLFINALLY:
15847             case BBJ_EHCATCHRET:
15848             case BBJ_RETURN:
15849             case BBJ_EHFINALLYRET:
15850             case BBJ_EHFILTERRET:
15851             case BBJ_THROW:
15852                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15853                 break;
15854
15855             default:
15856                 noway_assert(!"Unexpected bbJumpKind");
15857                 break;
15858         }
15859
15860         assert(multRef >= 1);
15861
15862         /* Do we have a base temp number? */
15863
15864         bool newTemps = (baseTmp == NO_BASE_TMP);
15865
15866         if (newTemps)
15867         {
15868             /* Grab enough temps for the whole stack */
15869             baseTmp = impGetSpillTmpBase(block);
15870         }
15871
15872         /* Spill all stack entries into temps */
15873         unsigned level, tempNum;
15874
15875         JITDUMP("\nSpilling stack entries into temps\n");
15876         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15877         {
15878             GenTreePtr tree = verCurrentState.esStack[level].val;
15879
15880             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15881                the other. This should merge to a byref in unverifiable code.
15882                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15883                successor would be imported assuming there was a TYP_I_IMPL on
15884                the stack. Thus the value would not get GC-tracked. Hence,
15885                change the temp to TYP_BYREF and reimport the successors.
15886                Note: We should only allow this in unverifiable code.
15887             */
15888             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15889             {
15890                 lvaTable[tempNum].lvType = TYP_BYREF;
15891                 impReimportMarkSuccessors(block);
15892                 markImport = true;
15893             }
15894
15895 #ifdef _TARGET_64BIT_
15896             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15897             {
15898                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15899                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15900                 {
15901                     // Merge the current state into the entry state of block;
15902                     // the call to verMergeEntryStates must have changed
15903                     // the entry state of the block by merging the int local var
15904                     // and the native-int stack entry.
15905                     bool changed = false;
15906                     if (verMergeEntryStates(tgtBlock, &changed))
15907                     {
15908                         impRetypeEntryStateTemps(tgtBlock);
15909                         impReimportBlockPending(tgtBlock);
15910                         assert(changed);
15911                     }
15912                     else
15913                     {
15914                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15915                         break;
15916                     }
15917                 }
15918
15919                 // Some other block in the spill clique set this to "int", but now we have "native int".
15920                 // Change the type and go back to re-import any blocks that used the wrong type.
15921                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15922                 reimportSpillClique      = true;
15923             }
15924             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15925             {
15926                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15927                 // Insert a sign-extension to "native int" so we match the clique.
15928                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15929             }
15930
15931             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15932             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15933             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15934             // behavior instead of asserting and then generating bad code (where we save/restore the
15935             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15936             // imported already, we need to change the type of the local and reimport the spill clique.
15937             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15938             // the 'byref' size.
15939             if (!tiVerificationNeeded)
15940             {
15941                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15942                 {
15943                     // Some other block in the spill clique set this to "int", but now we have "byref".
15944                     // Change the type and go back to re-import any blocks that used the wrong type.
15945                     lvaTable[tempNum].lvType = TYP_BYREF;
15946                     reimportSpillClique      = true;
15947                 }
15948                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15949                 {
15950                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15951                     // Insert a sign-extension to "native int" so we match the clique size.
15952                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15953                 }
15954             }
15955 #endif // _TARGET_64BIT_
15956
15957 #if FEATURE_X87_DOUBLES
15958             // X87 stack doesn't differentiate between float/double
15959             // so promoting is no big deal.
15960             // For everybody else keep it as float until we have a collision and then promote
15961             // Just like for x64's TYP_INT<->TYP_I_IMPL
15962
15963             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15964             {
15965                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15966             }
15967
15968 #else // !FEATURE_X87_DOUBLES
15969
15970             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15971             {
15972                 // Some other block in the spill clique set this to "float", but now we have "double".
15973                 // Change the type and go back to re-import any blocks that used the wrong type.
15974                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15975                 reimportSpillClique      = true;
15976             }
15977             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15978             {
15979                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15980                 // Insert a cast to "double" so we match the clique.
15981                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15982             }
15983
15984 #endif // FEATURE_X87_DOUBLES
15985
15986             /* If addStmt has a reference to tempNum (can only happen if we
15987                are spilling to the temps already used by a previous block),
15988                we need to spill addStmt */
15989
15990             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15991             {
15992                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15993
15994                 if (addTree->gtOper == GT_JTRUE)
15995                 {
15996                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15997                     assert(relOp->OperIsCompare());
15998
15999                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16000
16001                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16002                     {
16003                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16004                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16005                         type              = genActualType(lvaTable[temp].TypeGet());
16006                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16007                     }
16008
16009                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16010                     {
16011                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16012                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16013                         type              = genActualType(lvaTable[temp].TypeGet());
16014                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16015                     }
16016                 }
16017                 else
16018                 {
16019                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
16020
16021                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16022                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16023                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
16024                 }
16025             }
16026
16027             /* Spill the stack entry, and replace with the temp */
16028
16029             if (!impSpillStackEntry(level, tempNum
16030 #ifdef DEBUG
16031                                     ,
16032                                     true, "Spill Stack Entry"
16033 #endif
16034                                     ))
16035             {
16036                 if (markImport)
16037                 {
16038                     BADCODE("bad stack state");
16039                 }
16040
16041                 // Oops. Something went wrong when spilling. Bad code.
16042                 verHandleVerificationFailure(block DEBUGARG(true));
16043
16044                 goto SPILLSTACK;
16045             }
16046         }
16047
16048         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16049
16050         if (addStmt)
16051         {
16052             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16053         }
16054     }
16055
16056     // Some of the append/spill logic works on compCurBB
16057
16058     assert(compCurBB == block);
16059
16060     /* Save the tree list in the block */
16061     impEndTreeList(block);
16062
16063     // impEndTreeList sets BBF_IMPORTED on the block
16064     // We do *NOT* want to set it later than this because
16065     // impReimportSpillClique might clear it if this block is both a
16066     // predecessor and successor in the current spill clique
16067     assert(block->bbFlags & BBF_IMPORTED);
16068
16069     // If we had a int/native int, or float/double collision, we need to re-import
16070     if (reimportSpillClique)
16071     {
16072         // This will re-import all the successors of block (as well as each of their predecessors)
16073         impReimportSpillClique(block);
16074
16075         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16076         for (unsigned i = 0; i < block->NumSucc(); i++)
16077         {
16078             BasicBlock* succ = block->GetSucc(i);
16079             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16080             {
16081                 impImportBlockPending(succ);
16082             }
16083         }
16084     }
16085     else // the normal case
16086     {
16087         // otherwise just import the successors of block
16088
16089         /* Does this block jump to any other blocks? */
16090         for (unsigned i = 0; i < block->NumSucc(); i++)
16091         {
16092             impImportBlockPending(block->GetSucc(i));
16093         }
16094     }
16095 }
16096 #ifdef _PREFAST_
16097 #pragma warning(pop)
16098 #endif
16099
16100 /*****************************************************************************/
16101 //
16102 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16103 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16104 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16105 // (its "pre-state").
16106
16107 void Compiler::impImportBlockPending(BasicBlock* block)
16108 {
16109 #ifdef DEBUG
16110     if (verbose)
16111     {
16112         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16113     }
16114 #endif
16115
16116     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16117     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16118     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16119
16120     // If the block has not been imported, add to pending set.
16121     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16122
16123     // Initialize bbEntryState just the first time we try to add this block to the pending list
16124     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16125     // We use NULL to indicate the 'common' state to avoid memory allocation
16126     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16127         (impGetPendingBlockMember(block) == 0))
16128     {
16129         verInitBBEntryState(block, &verCurrentState);
16130         assert(block->bbStkDepth == 0);
16131         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16132         assert(addToPending);
16133         assert(impGetPendingBlockMember(block) == 0);
16134     }
16135     else
16136     {
16137         // The stack should have the same height on entry to the block from all its predecessors.
16138         if (block->bbStkDepth != verCurrentState.esStackDepth)
16139         {
16140 #ifdef DEBUG
16141             char buffer[400];
16142             sprintf_s(buffer, sizeof(buffer),
16143                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16144                       "Previous depth was %d, current depth is %d",
16145                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16146                       verCurrentState.esStackDepth);
16147             buffer[400 - 1] = 0;
16148             NO_WAY(buffer);
16149 #else
16150             NO_WAY("Block entered with different stack depths");
16151 #endif
16152         }
16153
16154         // Additionally, if we need to verify, merge the verification state.
16155         if (tiVerificationNeeded)
16156         {
16157             // Merge the current state into the entry state of block; if this does not change the entry state
16158             // by merging, do not add the block to the pending-list.
16159             bool changed = false;
16160             if (!verMergeEntryStates(block, &changed))
16161             {
16162                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16163                 addToPending = true; // We will pop it off, and check the flag set above.
16164             }
16165             else if (changed)
16166             {
16167                 addToPending = true;
16168
16169                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16170             }
16171         }
16172
16173         if (!addToPending)
16174         {
16175             return;
16176         }
16177
16178         if (block->bbStkDepth > 0)
16179         {
16180             // We need to fix the types of any spill temps that might have changed:
16181             //   int->native int, float->double, int->byref, etc.
16182             impRetypeEntryStateTemps(block);
16183         }
16184
16185         // OK, we must add to the pending list, if it's not already in it.
16186         if (impGetPendingBlockMember(block) != 0)
16187         {
16188             return;
16189         }
16190     }
16191
16192     // Get an entry to add to the pending list
16193
16194     PendingDsc* dsc;
16195
16196     if (impPendingFree)
16197     {
16198         // We can reuse one of the freed up dscs.
16199         dsc            = impPendingFree;
16200         impPendingFree = dsc->pdNext;
16201     }
16202     else
16203     {
16204         // We have to create a new dsc
16205         dsc = new (this, CMK_Unknown) PendingDsc;
16206     }
16207
16208     dsc->pdBB                 = block;
16209     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16210     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16211
16212     // Save the stack trees for later
16213
16214     if (verCurrentState.esStackDepth)
16215     {
16216         impSaveStackState(&dsc->pdSavedStack, false);
16217     }
16218
16219     // Add the entry to the pending list
16220
16221     dsc->pdNext    = impPendingList;
16222     impPendingList = dsc;
16223     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16224
16225     // Various assertions require us to now to consider the block as not imported (at least for
16226     // the final time...)
16227     block->bbFlags &= ~BBF_IMPORTED;
16228
16229 #ifdef DEBUG
16230     if (verbose && 0)
16231     {
16232         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16233     }
16234 #endif
16235 }
16236
16237 /*****************************************************************************/
16238 //
16239 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16240 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16241 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16242
16243 void Compiler::impReimportBlockPending(BasicBlock* block)
16244 {
16245     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16246
16247     assert(block->bbFlags & BBF_IMPORTED);
16248
16249     // OK, we must add to the pending list, if it's not already in it.
16250     if (impGetPendingBlockMember(block) != 0)
16251     {
16252         return;
16253     }
16254
16255     // Get an entry to add to the pending list
16256
16257     PendingDsc* dsc;
16258
16259     if (impPendingFree)
16260     {
16261         // We can reuse one of the freed up dscs.
16262         dsc            = impPendingFree;
16263         impPendingFree = dsc->pdNext;
16264     }
16265     else
16266     {
16267         // We have to create a new dsc
16268         dsc = new (this, CMK_ImpStack) PendingDsc;
16269     }
16270
16271     dsc->pdBB = block;
16272
16273     if (block->bbEntryState)
16274     {
16275         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16276         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16277         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16278     }
16279     else
16280     {
16281         dsc->pdThisPtrInit        = TIS_Bottom;
16282         dsc->pdSavedStack.ssDepth = 0;
16283         dsc->pdSavedStack.ssTrees = nullptr;
16284     }
16285
16286     // Add the entry to the pending list
16287
16288     dsc->pdNext    = impPendingList;
16289     impPendingList = dsc;
16290     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16291
16292     // Various assertions require us to now to consider the block as not imported (at least for
16293     // the final time...)
16294     block->bbFlags &= ~BBF_IMPORTED;
16295
16296 #ifdef DEBUG
16297     if (verbose && 0)
16298     {
16299         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16300     }
16301 #endif
16302 }
16303
16304 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16305 {
16306     if (comp->impBlockListNodeFreeList == nullptr)
16307     {
16308         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16309     }
16310     else
16311     {
16312         BlockListNode* res             = comp->impBlockListNodeFreeList;
16313         comp->impBlockListNodeFreeList = res->m_next;
16314         return res;
16315     }
16316 }
16317
16318 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16319 {
16320     node->m_next             = impBlockListNodeFreeList;
16321     impBlockListNodeFreeList = node;
16322 }
16323
16324 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16325 {
16326     bool toDo = true;
16327
16328     noway_assert(!fgComputePredsDone);
16329     if (!fgCheapPredsValid)
16330     {
16331         fgComputeCheapPreds();
16332     }
16333
16334     BlockListNode* succCliqueToDo = nullptr;
16335     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16336     while (toDo)
16337     {
16338         toDo = false;
16339         // Look at the successors of every member of the predecessor to-do list.
16340         while (predCliqueToDo != nullptr)
16341         {
16342             BlockListNode* node = predCliqueToDo;
16343             predCliqueToDo      = node->m_next;
16344             BasicBlock* blk     = node->m_blk;
16345             FreeBlockListNode(node);
16346
16347             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16348             {
16349                 BasicBlock* succ = blk->GetSucc(succNum);
16350                 // If it's not already in the clique, add it, and also add it
16351                 // as a member of the successor "toDo" set.
16352                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16353                 {
16354                     callback->Visit(SpillCliqueSucc, succ);
16355                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16356                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16357                     toDo           = true;
16358                 }
16359             }
16360         }
16361         // Look at the predecessors of every member of the successor to-do list.
16362         while (succCliqueToDo != nullptr)
16363         {
16364             BlockListNode* node = succCliqueToDo;
16365             succCliqueToDo      = node->m_next;
16366             BasicBlock* blk     = node->m_blk;
16367             FreeBlockListNode(node);
16368
16369             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16370             {
16371                 BasicBlock* predBlock = pred->block;
16372                 // If it's not already in the clique, add it, and also add it
16373                 // as a member of the predecessor "toDo" set.
16374                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16375                 {
16376                     callback->Visit(SpillCliquePred, predBlock);
16377                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16378                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16379                     toDo           = true;
16380                 }
16381             }
16382         }
16383     }
16384
16385     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16386     // miss walking back to include the predecessor we started from.
16387     // This most likely cause: missing or out of date bbPreds
16388     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16389 }
16390
16391 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16392 {
16393     if (predOrSucc == SpillCliqueSucc)
16394     {
16395         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16396         blk->bbStkTempsIn = m_baseTmp;
16397     }
16398     else
16399     {
16400         assert(predOrSucc == SpillCliquePred);
16401         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16402         blk->bbStkTempsOut = m_baseTmp;
16403     }
16404 }
16405
16406 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16407 {
16408     // For Preds we could be a little smarter and just find the existing store
16409     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16410     // just re-import the whole block (just like we do for successors)
16411
16412     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16413     {
16414         // If we haven't imported this block and we're not going to (because it isn't on
16415         // the pending list) then just ignore it for now.
16416
16417         // This block has either never been imported (EntryState == NULL) or it failed
16418         // verification. Neither state requires us to force it to be imported now.
16419         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16420         return;
16421     }
16422
16423     // For successors we have a valid verCurrentState, so just mark them for reimport
16424     // the 'normal' way
16425     // Unlike predecessors, we *DO* need to reimport the current block because the
16426     // initial import had the wrong entry state types.
16427     // Similarly, blocks that are currently on the pending list, still need to call
16428     // impImportBlockPending to fixup their entry state.
16429     if (predOrSucc == SpillCliqueSucc)
16430     {
16431         m_pComp->impReimportMarkBlock(blk);
16432
16433         // Set the current stack state to that of the blk->bbEntryState
16434         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16435         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16436
16437         m_pComp->impImportBlockPending(blk);
16438     }
16439     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16440     {
16441         // As described above, we are only visiting predecessors so they can
16442         // add the appropriate casts, since we have already done that for the current
16443         // block, it does not need to be reimported.
16444         // Nor do we need to reimport blocks that are still pending, but not yet
16445         // imported.
16446         //
16447         // For predecessors, we have no state to seed the EntryState, so we just have
16448         // to assume the existing one is correct.
16449         // If the block is also a successor, it will get the EntryState properly
16450         // updated when it is visited as a successor in the above "if" block.
16451         assert(predOrSucc == SpillCliquePred);
16452         m_pComp->impReimportBlockPending(blk);
16453     }
16454 }
16455
16456 // Re-type the incoming lclVar nodes to match the varDsc.
16457 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16458 {
16459     if (blk->bbEntryState != nullptr)
16460     {
16461         EntryState* es = blk->bbEntryState;
16462         for (unsigned level = 0; level < es->esStackDepth; level++)
16463         {
16464             GenTreePtr tree = es->esStack[level].val;
16465             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16466             {
16467                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16468                 noway_assert(lclNum < lvaCount);
16469                 LclVarDsc* varDsc              = lvaTable + lclNum;
16470                 es->esStack[level].val->gtType = varDsc->TypeGet();
16471             }
16472         }
16473     }
16474 }
16475
16476 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16477 {
16478     if (block->bbStkTempsOut != NO_BASE_TMP)
16479     {
16480         return block->bbStkTempsOut;
16481     }
16482
16483 #ifdef DEBUG
16484     if (verbose)
16485     {
16486         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16487     }
16488 #endif // DEBUG
16489
16490     // Otherwise, choose one, and propagate to all members of the spill clique.
16491     // Grab enough temps for the whole stack.
16492     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16493     SetSpillTempsBase callback(baseTmp);
16494
16495     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16496     // to one spill clique, and similarly can only be the sucessor to one spill clique
16497     impWalkSpillCliqueFromPred(block, &callback);
16498
16499     return baseTmp;
16500 }
16501
16502 void Compiler::impReimportSpillClique(BasicBlock* block)
16503 {
16504 #ifdef DEBUG
16505     if (verbose)
16506     {
16507         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16508     }
16509 #endif // DEBUG
16510
16511     // If we get here, it is because this block is already part of a spill clique
16512     // and one predecessor had an outgoing live stack slot of type int, and this
16513     // block has an outgoing live stack slot of type native int.
16514     // We need to reset these before traversal because they have already been set
16515     // by the previous walk to determine all the members of the spill clique.
16516     impInlineRoot()->impSpillCliquePredMembers.Reset();
16517     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16518
16519     ReimportSpillClique callback(this);
16520
16521     impWalkSpillCliqueFromPred(block, &callback);
16522 }
16523
16524 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16525 // a copy of "srcState", cloning tree pointers as required.
16526 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16527 {
16528     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16529     {
16530         block->bbEntryState = nullptr;
16531         return;
16532     }
16533
16534     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16535
16536     // block->bbEntryState.esRefcount = 1;
16537
16538     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16539     block->bbEntryState->thisInitialized = TIS_Bottom;
16540
16541     if (srcState->esStackDepth > 0)
16542     {
16543         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16544         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16545
16546         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16547         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16548         {
16549             GenTreePtr tree                         = srcState->esStack[level].val;
16550             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16551         }
16552     }
16553
16554     if (verTrackObjCtorInitState)
16555     {
16556         verSetThisInit(block, srcState->thisInitialized);
16557     }
16558
16559     return;
16560 }
16561
16562 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16563 {
16564     assert(tis != TIS_Bottom); // Precondition.
16565     if (block->bbEntryState == nullptr)
16566     {
16567         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16568     }
16569
16570     block->bbEntryState->thisInitialized = tis;
16571 }
16572
16573 /*
16574  * Resets the current state to the state at the start of the basic block
16575  */
16576 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16577 {
16578
16579     if (block->bbEntryState == nullptr)
16580     {
16581         destState->esStackDepth    = 0;
16582         destState->thisInitialized = TIS_Bottom;
16583         return;
16584     }
16585
16586     destState->esStackDepth = block->bbEntryState->esStackDepth;
16587
16588     if (destState->esStackDepth > 0)
16589     {
16590         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16591
16592         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16593     }
16594
16595     destState->thisInitialized = block->bbThisOnEntry();
16596
16597     return;
16598 }
16599
16600 ThisInitState BasicBlock::bbThisOnEntry()
16601 {
16602     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16603 }
16604
16605 unsigned BasicBlock::bbStackDepthOnEntry()
16606 {
16607     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16608 }
16609
16610 void BasicBlock::bbSetStack(void* stackBuffer)
16611 {
16612     assert(bbEntryState);
16613     assert(stackBuffer);
16614     bbEntryState->esStack = (StackEntry*)stackBuffer;
16615 }
16616
16617 StackEntry* BasicBlock::bbStackOnEntry()
16618 {
16619     assert(bbEntryState);
16620     return bbEntryState->esStack;
16621 }
16622
16623 void Compiler::verInitCurrentState()
16624 {
16625     verTrackObjCtorInitState        = FALSE;
16626     verCurrentState.thisInitialized = TIS_Bottom;
16627
16628     if (tiVerificationNeeded)
16629     {
16630         // Track this ptr initialization
16631         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16632         {
16633             verTrackObjCtorInitState        = TRUE;
16634             verCurrentState.thisInitialized = TIS_Uninit;
16635         }
16636     }
16637
16638     // initialize stack info
16639
16640     verCurrentState.esStackDepth = 0;
16641     assert(verCurrentState.esStack != nullptr);
16642
16643     // copy current state to entry state of first BB
16644     verInitBBEntryState(fgFirstBB, &verCurrentState);
16645 }
16646
16647 Compiler* Compiler::impInlineRoot()
16648 {
16649     if (impInlineInfo == nullptr)
16650     {
16651         return this;
16652     }
16653     else
16654     {
16655         return impInlineInfo->InlineRoot;
16656     }
16657 }
16658
16659 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16660 {
16661     if (predOrSucc == SpillCliquePred)
16662     {
16663         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16664     }
16665     else
16666     {
16667         assert(predOrSucc == SpillCliqueSucc);
16668         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16669     }
16670 }
16671
16672 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16673 {
16674     if (predOrSucc == SpillCliquePred)
16675     {
16676         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16677     }
16678     else
16679     {
16680         assert(predOrSucc == SpillCliqueSucc);
16681         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16682     }
16683 }
16684
16685 /*****************************************************************************
16686  *
16687  *  Convert the instrs ("import") into our internal format (trees). The
16688  *  basic flowgraph has already been constructed and is passed in.
16689  */
16690
16691 void Compiler::impImport(BasicBlock* method)
16692 {
16693 #ifdef DEBUG
16694     if (verbose)
16695     {
16696         printf("*************** In impImport() for %s\n", info.compFullName);
16697     }
16698 #endif
16699
16700     /* Allocate the stack contents */
16701
16702     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16703     {
16704         /* Use local variable, don't waste time allocating on the heap */
16705
16706         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16707         verCurrentState.esStack = impSmallStack;
16708     }
16709     else
16710     {
16711         impStkSize              = info.compMaxStack;
16712         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16713     }
16714
16715     // initialize the entry state at start of method
16716     verInitCurrentState();
16717
16718     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16719     Compiler* inlineRoot = impInlineRoot();
16720     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16721     {
16722         // We have initialized these previously, but to size 0.  Make them larger.
16723         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16724         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16725         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16726     }
16727     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16728     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16729     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16730     impBlockListNodeFreeList = nullptr;
16731
16732 #ifdef DEBUG
16733     impLastILoffsStmt   = nullptr;
16734     impNestedStackSpill = false;
16735 #endif
16736     impBoxTemp = BAD_VAR_NUM;
16737
16738     impPendingList = impPendingFree = nullptr;
16739
16740     /* Add the entry-point to the worker-list */
16741
16742     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16743     // from EH normalization.
16744     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16745     // out.
16746     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16747     {
16748         // Treat these as imported.
16749         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16750         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16751         method->bbFlags |= BBF_IMPORTED;
16752     }
16753
16754     impImportBlockPending(method);
16755
16756     /* Import blocks in the worker-list until there are no more */
16757
16758     while (impPendingList)
16759     {
16760         /* Remove the entry at the front of the list */
16761
16762         PendingDsc* dsc = impPendingList;
16763         impPendingList  = impPendingList->pdNext;
16764         impSetPendingBlockMember(dsc->pdBB, 0);
16765
16766         /* Restore the stack state */
16767
16768         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16769         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16770         if (verCurrentState.esStackDepth)
16771         {
16772             impRestoreStackState(&dsc->pdSavedStack);
16773         }
16774
16775         /* Add the entry to the free list for reuse */
16776
16777         dsc->pdNext    = impPendingFree;
16778         impPendingFree = dsc;
16779
16780         /* Now import the block */
16781
16782         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16783         {
16784
16785 #ifdef _TARGET_64BIT_
16786             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16787             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16788             // method for further explanation on why we raise this exception instead of making the jitted
16789             // code throw the verification exception during execution.
16790             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16791             {
16792                 BADCODE("Basic block marked as not verifiable");
16793             }
16794             else
16795 #endif // _TARGET_64BIT_
16796             {
16797                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16798                 impEndTreeList(dsc->pdBB);
16799             }
16800         }
16801         else
16802         {
16803             impImportBlock(dsc->pdBB);
16804
16805             if (compDonotInline())
16806             {
16807                 return;
16808             }
16809             if (compIsForImportOnly() && !tiVerificationNeeded)
16810             {
16811                 return;
16812             }
16813         }
16814     }
16815
16816 #ifdef DEBUG
16817     if (verbose && info.compXcptnsCount)
16818     {
16819         printf("\nAfter impImport() added block for try,catch,finally");
16820         fgDispBasicBlocks();
16821         printf("\n");
16822     }
16823
16824     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16825     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16826     {
16827         block->bbFlags &= ~BBF_VISITED;
16828     }
16829 #endif
16830
16831     assert(!compIsForInlining() || !tiVerificationNeeded);
16832 }
16833
16834 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16835 // The invariant here is that if it's not a ref or a method and has a class handle
16836 // it's a valuetype
16837 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16838 {
16839     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16840     {
16841         return true;
16842     }
16843     else
16844     {
16845         return false;
16846     }
16847 }
16848
16849 /*****************************************************************************
16850  *  Check to see if the tree is the address of a local or
16851     the address of a field in a local.
16852
16853     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16854
16855  */
16856
16857 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16858 {
16859     if (tree->gtOper != GT_ADDR)
16860     {
16861         return FALSE;
16862     }
16863
16864     GenTreePtr op = tree->gtOp.gtOp1;
16865     while (op->gtOper == GT_FIELD)
16866     {
16867         op = op->gtField.gtFldObj;
16868         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16869         {
16870             op = op->gtOp.gtOp1;
16871         }
16872         else
16873         {
16874             return false;
16875         }
16876     }
16877
16878     if (op->gtOper == GT_LCL_VAR)
16879     {
16880         *lclVarTreeOut = op;
16881         return TRUE;
16882     }
16883     else
16884     {
16885         return FALSE;
16886     }
16887 }
16888
16889 //------------------------------------------------------------------------
16890 // impMakeDiscretionaryInlineObservations: make observations that help
16891 // determine the profitability of a discretionary inline
16892 //
16893 // Arguments:
16894 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16895 //    inlineResult -- InlineResult accumulating information about this inline
16896 //
16897 // Notes:
16898 //    If inlining or prejitting the root, this method also makes
16899 //    various observations about the method that factor into inline
16900 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16901
16902 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16903 {
16904     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16905            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16906            );
16907
16908     // If we're really inlining, we should just have one result in play.
16909     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16910
16911     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16912     // to the trouble of estimating the native code size. Even if it did, it
16913     // shouldn't be relying on the result of this method.
16914     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16915
16916     // Note if the caller contains NEWOBJ or NEWARR.
16917     Compiler* rootCompiler = impInlineRoot();
16918
16919     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16920     {
16921         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16922     }
16923
16924     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16925     {
16926         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16927     }
16928
16929     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16930     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16931
16932     if (isSpecialMethod)
16933     {
16934         if (calleeIsStatic)
16935         {
16936             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16937         }
16938         else
16939         {
16940             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16941         }
16942     }
16943     else if (!calleeIsStatic)
16944     {
16945         // Callee is an instance method.
16946         //
16947         // Check if the callee has the same 'this' as the root.
16948         if (pInlineInfo != nullptr)
16949         {
16950             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16951             assert(thisArg);
16952             bool isSameThis = impIsThis(thisArg);
16953             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16954         }
16955     }
16956
16957     // Note if the callee's class is a promotable struct
16958     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16959     {
16960         lvaStructPromotionInfo structPromotionInfo;
16961         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16962         if (structPromotionInfo.canPromote)
16963         {
16964             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16965         }
16966     }
16967
16968 #ifdef FEATURE_SIMD
16969
16970     // Note if this method is has SIMD args or return value
16971     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16972     {
16973         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16974     }
16975
16976 #endif // FEATURE_SIMD
16977
16978     // Roughly classify callsite frequency.
16979     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16980
16981     // If this is a prejit root, or a maximally hot block...
16982     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16983     {
16984         frequency = InlineCallsiteFrequency::HOT;
16985     }
16986     // No training data.  Look for loop-like things.
16987     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16988     // However, give it to things nearby.
16989     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16990              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16991     {
16992         frequency = InlineCallsiteFrequency::LOOP;
16993     }
16994     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16995     {
16996         frequency = InlineCallsiteFrequency::WARM;
16997     }
16998     // Now modify the multiplier based on where we're called from.
16999     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17000     {
17001         frequency = InlineCallsiteFrequency::RARE;
17002     }
17003     else
17004     {
17005         frequency = InlineCallsiteFrequency::BORING;
17006     }
17007
17008     // Also capture the block weight of the call site.  In the prejit
17009     // root case, assume there's some hot call site for this method.
17010     unsigned weight = 0;
17011
17012     if (pInlineInfo != nullptr)
17013     {
17014         weight = pInlineInfo->iciBlock->bbWeight;
17015     }
17016     else
17017     {
17018         weight = BB_MAX_WEIGHT;
17019     }
17020
17021     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17022     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17023 }
17024
17025 /*****************************************************************************
17026  This method makes STATIC inlining decision based on the IL code.
17027  It should not make any inlining decision based on the context.
17028  If forceInline is true, then the inlining decision should not depend on
17029  performance heuristics (code size, etc.).
17030  */
17031
17032 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17033                               CORINFO_METHOD_INFO*  methInfo,
17034                               bool                  forceInline,
17035                               InlineResult*         inlineResult)
17036 {
17037     unsigned codeSize = methInfo->ILCodeSize;
17038
17039     // We shouldn't have made up our minds yet...
17040     assert(!inlineResult->IsDecided());
17041
17042     if (methInfo->EHcount)
17043     {
17044         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17045         return;
17046     }
17047
17048     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17049     {
17050         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17051         return;
17052     }
17053
17054     // For now we don't inline varargs (import code can't handle it)
17055
17056     if (methInfo->args.isVarArg())
17057     {
17058         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17059         return;
17060     }
17061
17062     // Reject if it has too many locals.
17063     // This is currently an implementation limit due to fixed-size arrays in the
17064     // inline info, rather than a performance heuristic.
17065
17066     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17067
17068     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17069     {
17070         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17071         return;
17072     }
17073
17074     // Make sure there aren't too many arguments.
17075     // This is currently an implementation limit due to fixed-size arrays in the
17076     // inline info, rather than a performance heuristic.
17077
17078     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17079
17080     if (methInfo->args.numArgs > MAX_INL_ARGS)
17081     {
17082         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17083         return;
17084     }
17085
17086     // Note force inline state
17087
17088     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17089
17090     // Note IL code size
17091
17092     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17093
17094     if (inlineResult->IsFailure())
17095     {
17096         return;
17097     }
17098
17099     // Make sure maxstack is not too big
17100
17101     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17102
17103     if (inlineResult->IsFailure())
17104     {
17105         return;
17106     }
17107 }
17108
17109 /*****************************************************************************
17110  */
17111
17112 void Compiler::impCheckCanInline(GenTreePtr             call,
17113                                  CORINFO_METHOD_HANDLE  fncHandle,
17114                                  unsigned               methAttr,
17115                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17116                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17117                                  InlineResult*          inlineResult)
17118 {
17119     // Either EE or JIT might throw exceptions below.
17120     // If that happens, just don't inline the method.
17121
17122     struct Param
17123     {
17124         Compiler*              pThis;
17125         GenTreePtr             call;
17126         CORINFO_METHOD_HANDLE  fncHandle;
17127         unsigned               methAttr;
17128         CORINFO_CONTEXT_HANDLE exactContextHnd;
17129         InlineResult*          result;
17130         InlineCandidateInfo**  ppInlineCandidateInfo;
17131     } param = {nullptr};
17132
17133     param.pThis                 = this;
17134     param.call                  = call;
17135     param.fncHandle             = fncHandle;
17136     param.methAttr              = methAttr;
17137     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17138     param.result                = inlineResult;
17139     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17140
17141     bool success = eeRunWithErrorTrap<Param>(
17142         [](Param* pParam) {
17143             DWORD                  dwRestrictions = 0;
17144             CorInfoInitClassResult initClassResult;
17145
17146 #ifdef DEBUG
17147             const char* methodName;
17148             const char* className;
17149             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17150
17151             if (JitConfig.JitNoInline())
17152             {
17153                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17154                 goto _exit;
17155             }
17156 #endif
17157
17158             /* Try to get the code address/size for the method */
17159
17160             CORINFO_METHOD_INFO methInfo;
17161             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17162             {
17163                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17164                 goto _exit;
17165             }
17166
17167             bool forceInline;
17168             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17169
17170             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17171
17172             if (pParam->result->IsFailure())
17173             {
17174                 assert(pParam->result->IsNever());
17175                 goto _exit;
17176             }
17177
17178             // Speculatively check if initClass() can be done.
17179             // If it can be done, we will try to inline the method. If inlining
17180             // succeeds, then we will do the non-speculative initClass() and commit it.
17181             // If this speculative call to initClass() fails, there is no point
17182             // trying to inline this method.
17183             initClassResult =
17184                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17185                                                            pParam->exactContextHnd /* context */,
17186                                                            TRUE /* speculative */);
17187
17188             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17189             {
17190                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17191                 goto _exit;
17192             }
17193
17194             // Given the EE the final say in whether to inline or not.
17195             // This should be last since for verifiable code, this can be expensive
17196
17197             /* VM Inline check also ensures that the method is verifiable if needed */
17198             CorInfoInline vmResult;
17199             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17200                                                                   &dwRestrictions);
17201
17202             if (vmResult == INLINE_FAIL)
17203             {
17204                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17205             }
17206             else if (vmResult == INLINE_NEVER)
17207             {
17208                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17209             }
17210
17211             if (pParam->result->IsFailure())
17212             {
17213                 // Make sure not to report this one.  It was already reported by the VM.
17214                 pParam->result->SetReported();
17215                 goto _exit;
17216             }
17217
17218             // check for unsupported inlining restrictions
17219             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17220
17221             if (dwRestrictions & INLINE_SAME_THIS)
17222             {
17223                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17224                 assert(thisArg);
17225
17226                 if (!pParam->pThis->impIsThis(thisArg))
17227                 {
17228                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17229                     goto _exit;
17230                 }
17231             }
17232
17233             /* Get the method properties */
17234
17235             CORINFO_CLASS_HANDLE clsHandle;
17236             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17237             unsigned clsAttr;
17238             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17239
17240             /* Get the return type */
17241
17242             var_types fncRetType;
17243             fncRetType = pParam->call->TypeGet();
17244
17245 #ifdef DEBUG
17246             var_types fncRealRetType;
17247             fncRealRetType = JITtype2varType(methInfo.args.retType);
17248
17249             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17250                    // <BUGNUM> VSW 288602 </BUGNUM>
17251                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17252                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17253                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17254 #endif
17255
17256             //
17257             // Allocate an InlineCandidateInfo structure
17258             //
17259             InlineCandidateInfo* pInfo;
17260             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17261
17262             pInfo->dwRestrictions  = dwRestrictions;
17263             pInfo->methInfo        = methInfo;
17264             pInfo->methAttr        = pParam->methAttr;
17265             pInfo->clsHandle       = clsHandle;
17266             pInfo->clsAttr         = clsAttr;
17267             pInfo->fncRetType      = fncRetType;
17268             pInfo->exactContextHnd = pParam->exactContextHnd;
17269             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17270             pInfo->initClassResult = initClassResult;
17271
17272             *(pParam->ppInlineCandidateInfo) = pInfo;
17273
17274         _exit:;
17275         },
17276         &param);
17277     if (!success)
17278     {
17279         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17280     }
17281 }
17282
17283 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17284                                       GenTreePtr    curArgVal,
17285                                       unsigned      argNum,
17286                                       InlineResult* inlineResult)
17287 {
17288     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17289
17290     if (curArgVal->gtOper == GT_MKREFANY)
17291     {
17292         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17293         return;
17294     }
17295
17296     inlCurArgInfo->argNode = curArgVal;
17297
17298     GenTreePtr lclVarTree;
17299     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17300     {
17301         inlCurArgInfo->argIsByRefToStructLocal = true;
17302 #ifdef FEATURE_SIMD
17303         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17304         {
17305             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17306         }
17307 #endif // FEATURE_SIMD
17308     }
17309
17310     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17311     {
17312         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17313         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17314     }
17315
17316     if (curArgVal->gtOper == GT_LCL_VAR)
17317     {
17318         inlCurArgInfo->argIsLclVar = true;
17319
17320         /* Remember the "original" argument number */
17321         curArgVal->gtLclVar.gtLclILoffs = argNum;
17322     }
17323
17324     if ((curArgVal->OperKind() & GTK_CONST) ||
17325         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17326     {
17327         inlCurArgInfo->argIsInvariant = true;
17328         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17329         {
17330             /* Abort, but do not mark as not inlinable */
17331             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17332             return;
17333         }
17334     }
17335
17336     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17337     {
17338         inlCurArgInfo->argHasLdargaOp = true;
17339     }
17340
17341 #ifdef DEBUG
17342     if (verbose)
17343     {
17344         if (inlCurArgInfo->argIsThis)
17345         {
17346             printf("thisArg:");
17347         }
17348         else
17349         {
17350             printf("\nArgument #%u:", argNum);
17351         }
17352         if (inlCurArgInfo->argIsLclVar)
17353         {
17354             printf(" is a local var");
17355         }
17356         if (inlCurArgInfo->argIsInvariant)
17357         {
17358             printf(" is a constant");
17359         }
17360         if (inlCurArgInfo->argHasGlobRef)
17361         {
17362             printf(" has global refs");
17363         }
17364         if (inlCurArgInfo->argHasSideEff)
17365         {
17366             printf(" has side effects");
17367         }
17368         if (inlCurArgInfo->argHasLdargaOp)
17369         {
17370             printf(" has ldarga effect");
17371         }
17372         if (inlCurArgInfo->argHasStargOp)
17373         {
17374             printf(" has starg effect");
17375         }
17376         if (inlCurArgInfo->argIsByRefToStructLocal)
17377         {
17378             printf(" is byref to a struct local");
17379         }
17380
17381         printf("\n");
17382         gtDispTree(curArgVal);
17383         printf("\n");
17384     }
17385 #endif
17386 }
17387
17388 /*****************************************************************************
17389  *
17390  */
17391
17392 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17393 {
17394     assert(!compIsForInlining());
17395
17396     GenTreePtr           call         = pInlineInfo->iciCall;
17397     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17398     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17399     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17400     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17401     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17402
17403     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17404
17405     /* init the argument stuct */
17406
17407     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17408
17409     /* Get hold of the 'this' pointer and the argument list proper */
17410
17411     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17412     GenTreePtr argList = call->gtCall.gtCallArgs;
17413     unsigned   argCnt  = 0; // Count of the arguments
17414
17415     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17416
17417     if (thisArg)
17418     {
17419         inlArgInfo[0].argIsThis = true;
17420
17421         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17422
17423         if (inlineResult->IsFailure())
17424         {
17425             return;
17426         }
17427
17428         /* Increment the argument count */
17429         argCnt++;
17430     }
17431
17432     /* Record some information about each of the arguments */
17433     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17434
17435 #if USER_ARGS_COME_LAST
17436     unsigned typeCtxtArg = thisArg ? 1 : 0;
17437 #else  // USER_ARGS_COME_LAST
17438     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17439 #endif // USER_ARGS_COME_LAST
17440
17441     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17442     {
17443         if (argTmp == argList && hasRetBuffArg)
17444         {
17445             continue;
17446         }
17447
17448         // Ignore the type context argument
17449         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17450         {
17451             typeCtxtArg = 0xFFFFFFFF;
17452             continue;
17453         }
17454
17455         assert(argTmp->gtOper == GT_LIST);
17456         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17457
17458         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17459
17460         if (inlineResult->IsFailure())
17461         {
17462             return;
17463         }
17464
17465         /* Increment the argument count */
17466         argCnt++;
17467     }
17468
17469     /* Make sure we got the arg number right */
17470     assert(argCnt == methInfo->args.totalILArgs());
17471
17472 #ifdef FEATURE_SIMD
17473     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17474 #endif // FEATURE_SIMD
17475
17476     /* We have typeless opcodes, get type information from the signature */
17477
17478     if (thisArg)
17479     {
17480         var_types sigType;
17481
17482         if (clsAttr & CORINFO_FLG_VALUECLASS)
17483         {
17484             sigType = TYP_BYREF;
17485         }
17486         else
17487         {
17488             sigType = TYP_REF;
17489         }
17490
17491         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17492         lclVarInfo[0].lclHasLdlocaOp = false;
17493
17494 #ifdef FEATURE_SIMD
17495         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17496         // the inlining multiplier) for anything in that assembly.
17497         // But we only need to normalize it if it is a TYP_STRUCT
17498         // (which we need to do even if we have already set foundSIMDType).
17499         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17500         {
17501             if (sigType == TYP_STRUCT)
17502             {
17503                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17504             }
17505             foundSIMDType = true;
17506         }
17507 #endif // FEATURE_SIMD
17508         lclVarInfo[0].lclTypeInfo = sigType;
17509
17510         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17511                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17512                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17513
17514         if (genActualType(thisArg->gtType) != genActualType(sigType))
17515         {
17516             if (sigType == TYP_REF)
17517             {
17518                 /* The argument cannot be bashed into a ref (see bug 750871) */
17519                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17520                 return;
17521             }
17522
17523             /* This can only happen with byrefs <-> ints/shorts */
17524
17525             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17526             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17527
17528             if (sigType == TYP_BYREF)
17529             {
17530                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17531             }
17532             else if (thisArg->gtType == TYP_BYREF)
17533             {
17534                 assert(sigType == TYP_I_IMPL);
17535
17536                 /* If possible change the BYREF to an int */
17537                 if (thisArg->IsVarAddr())
17538                 {
17539                     thisArg->gtType              = TYP_I_IMPL;
17540                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17541                 }
17542                 else
17543                 {
17544                     /* Arguments 'int <- byref' cannot be bashed */
17545                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17546                     return;
17547                 }
17548             }
17549         }
17550     }
17551
17552     /* Init the types of the arguments and make sure the types
17553      * from the trees match the types in the signature */
17554
17555     CORINFO_ARG_LIST_HANDLE argLst;
17556     argLst = methInfo->args.args;
17557
17558     unsigned i;
17559     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17560     {
17561         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17562
17563         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17564
17565 #ifdef FEATURE_SIMD
17566         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17567         {
17568             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17569             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17570             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17571             foundSIMDType = true;
17572             if (sigType == TYP_STRUCT)
17573             {
17574                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17575                 sigType              = structType;
17576             }
17577         }
17578 #endif // FEATURE_SIMD
17579
17580         lclVarInfo[i].lclTypeInfo    = sigType;
17581         lclVarInfo[i].lclHasLdlocaOp = false;
17582
17583         /* Does the tree type match the signature type? */
17584
17585         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17586
17587         if (sigType != inlArgNode->gtType)
17588         {
17589             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17590                but in bad IL cases with caller-callee signature mismatches we can see other types.
17591                Intentionally reject cases with mismatches so the jit is more flexible when
17592                encountering bad IL. */
17593
17594             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17595                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17596                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17597
17598             if (!isPlausibleTypeMatch)
17599             {
17600                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17601                 return;
17602             }
17603
17604             /* Is it a narrowing or widening cast?
17605              * Widening casts are ok since the value computed is already
17606              * normalized to an int (on the IL stack) */
17607
17608             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17609             {
17610                 if (sigType == TYP_BYREF)
17611                 {
17612                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17613                 }
17614                 else if (inlArgNode->gtType == TYP_BYREF)
17615                 {
17616                     assert(varTypeIsIntOrI(sigType));
17617
17618                     /* If possible bash the BYREF to an int */
17619                     if (inlArgNode->IsVarAddr())
17620                     {
17621                         inlArgNode->gtType           = TYP_I_IMPL;
17622                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17623                     }
17624                     else
17625                     {
17626                         /* Arguments 'int <- byref' cannot be changed */
17627                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17628                         return;
17629                     }
17630                 }
17631                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17632                 {
17633                     /* Narrowing cast */
17634
17635                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17636                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17637                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17638                     {
17639                         /* We don't need to insert a cast here as the variable
17640                            was assigned a normalized value of the right type */
17641
17642                         continue;
17643                     }
17644
17645                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17646
17647                     inlArgInfo[i].argIsLclVar = false;
17648
17649                     /* Try to fold the node in case we have constant arguments */
17650
17651                     if (inlArgInfo[i].argIsInvariant)
17652                     {
17653                         inlArgNode            = gtFoldExprConst(inlArgNode);
17654                         inlArgInfo[i].argNode = inlArgNode;
17655                         assert(inlArgNode->OperIsConst());
17656                     }
17657                 }
17658 #ifdef _TARGET_64BIT_
17659                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17660                 {
17661                     // This should only happen for int -> native int widening
17662                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17663
17664                     inlArgInfo[i].argIsLclVar = false;
17665
17666                     /* Try to fold the node in case we have constant arguments */
17667
17668                     if (inlArgInfo[i].argIsInvariant)
17669                     {
17670                         inlArgNode            = gtFoldExprConst(inlArgNode);
17671                         inlArgInfo[i].argNode = inlArgNode;
17672                         assert(inlArgNode->OperIsConst());
17673                     }
17674                 }
17675 #endif // _TARGET_64BIT_
17676             }
17677         }
17678     }
17679
17680     /* Init the types of the local variables */
17681
17682     CORINFO_ARG_LIST_HANDLE localsSig;
17683     localsSig = methInfo->locals.args;
17684
17685     for (i = 0; i < methInfo->locals.numArgs; i++)
17686     {
17687         bool      isPinned;
17688         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17689
17690         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17691         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17692         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17693
17694         if (isPinned)
17695         {
17696             // Pinned locals may cause inlines to fail.
17697             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17698             if (inlineResult->IsFailure())
17699             {
17700                 return;
17701             }
17702         }
17703
17704         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17705
17706         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17707         // out on the inline.
17708         if (type == TYP_STRUCT)
17709         {
17710             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17711             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17712             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17713             {
17714                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17715                 if (inlineResult->IsFailure())
17716                 {
17717                     return;
17718                 }
17719
17720                 // Do further notification in the case where the call site is rare; some policies do
17721                 // not track the relative hotness of call sites for "always" inline cases.
17722                 if (pInlineInfo->iciBlock->isRunRarely())
17723                 {
17724                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17725                     if (inlineResult->IsFailure())
17726                     {
17727
17728                         return;
17729                     }
17730                 }
17731             }
17732         }
17733
17734         localsSig = info.compCompHnd->getArgNext(localsSig);
17735
17736 #ifdef FEATURE_SIMD
17737         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17738         {
17739             foundSIMDType = true;
17740             if (featureSIMD && type == TYP_STRUCT)
17741             {
17742                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17743                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17744             }
17745         }
17746 #endif // FEATURE_SIMD
17747     }
17748
17749 #ifdef FEATURE_SIMD
17750     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17751     {
17752         foundSIMDType = true;
17753     }
17754     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17755 #endif // FEATURE_SIMD
17756 }
17757
17758 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17759 {
17760     assert(compIsForInlining());
17761
17762     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17763
17764     if (tmpNum == BAD_VAR_NUM)
17765     {
17766         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17767
17768         // The lifetime of this local might span multiple BBs.
17769         // So it is a long lifetime local.
17770         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17771
17772         lvaTable[tmpNum].lvType = lclTyp;
17773         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17774         {
17775             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17776         }
17777
17778         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17779         {
17780             lvaTable[tmpNum].lvPinned = 1;
17781
17782             if (!impInlineInfo->hasPinnedLocals)
17783             {
17784                 // If the inlinee returns a value, use a spill temp
17785                 // for the return value to ensure that even in case
17786                 // where the return expression refers to one of the
17787                 // pinned locals, we can unpin the local right after
17788                 // the inlined method body.
17789                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17790                 {
17791                     lvaInlineeReturnSpillTemp =
17792                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17793                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17794                 }
17795             }
17796
17797             impInlineInfo->hasPinnedLocals = true;
17798         }
17799
17800         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17801         {
17802             if (varTypeIsStruct(lclTyp))
17803             {
17804                 lvaSetStruct(tmpNum,
17805                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17806                              true /* unsafe value cls check */);
17807             }
17808             else
17809             {
17810                 // This is a wrapped primitive.  Make sure the verstate knows that
17811                 lvaTable[tmpNum].lvVerTypeInfo =
17812                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17813             }
17814         }
17815     }
17816
17817     return tmpNum;
17818 }
17819
17820 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17821 // Only use this method for the arguments of the inlinee method.
17822 // !!! Do not use it for the locals of the inlinee method. !!!!
17823
17824 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17825 {
17826     /* Get the argument type */
17827     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17828
17829     GenTreePtr op1 = nullptr;
17830
17831     // constant or address of local
17832     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17833     {
17834         /* Clone the constant. Note that we cannot directly use argNode
17835         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17836         would introduce aliasing between inlArgInfo[].argNode and
17837         impInlineExpr. Then gtFoldExpr() could change it, causing further
17838         references to the argument working off of the bashed copy. */
17839
17840         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17841         PREFIX_ASSUME(op1 != nullptr);
17842         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17843     }
17844     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17845     {
17846         /* Argument is a local variable (of the caller)
17847          * Can we re-use the passed argument node? */
17848
17849         op1                          = inlArgInfo[lclNum].argNode;
17850         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17851
17852         if (inlArgInfo[lclNum].argIsUsed)
17853         {
17854             assert(op1->gtOper == GT_LCL_VAR);
17855             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17856
17857             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17858             {
17859                 lclTyp = genActualType(lclTyp);
17860             }
17861
17862             /* Create a new lcl var node - remember the argument lclNum */
17863             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17864         }
17865     }
17866     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17867     {
17868         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17869            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17870            This way we will increase the chance for this byref to be optimized away by
17871            a subsequent "dereference" operation.
17872
17873            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17874            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17875            For example, if the caller is:
17876                 ldloca.s   V_1  // V_1 is a local struct
17877                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17878            and the callee being inlined has:
17879                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17880                     ldarga.s   ptrToInts
17881                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17882            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17883            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17884         */
17885         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17886                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17887         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17888     }
17889     else
17890     {
17891         /* Argument is a complex expression - it must be evaluated into a temp */
17892
17893         if (inlArgInfo[lclNum].argHasTmp)
17894         {
17895             assert(inlArgInfo[lclNum].argIsUsed);
17896             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17897
17898             /* Create a new lcl var node - remember the argument lclNum */
17899             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17900
17901             /* This is the second or later use of the this argument,
17902             so we have to use the temp (instead of the actual arg) */
17903             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17904         }
17905         else
17906         {
17907             /* First time use */
17908             assert(inlArgInfo[lclNum].argIsUsed == false);
17909
17910             /* Reserve a temp for the expression.
17911             * Use a large size node as we may change it later */
17912
17913             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17914
17915             lvaTable[tmpNum].lvType = lclTyp;
17916             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17917             if (inlArgInfo[lclNum].argHasLdargaOp)
17918             {
17919                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17920             }
17921
17922             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17923             {
17924                 if (varTypeIsStruct(lclTyp))
17925                 {
17926                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17927                                  true /* unsafe value cls check */);
17928                 }
17929                 else
17930                 {
17931                     // This is a wrapped primitive.  Make sure the verstate knows that
17932                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17933                 }
17934             }
17935
17936             inlArgInfo[lclNum].argHasTmp = true;
17937             inlArgInfo[lclNum].argTmpNum = tmpNum;
17938
17939             // If we require strict exception order, then arguments must
17940             // be evaluated in sequence before the body of the inlined method.
17941             // So we need to evaluate them to a temp.
17942             // Also, if arguments have global references, we need to
17943             // evaluate them to a temp before the inlined body as the
17944             // inlined body may be modifying the global ref.
17945             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17946             // if it is a struct, because it requires some additional handling.
17947
17948             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17949             {
17950                 /* Get a *LARGE* LCL_VAR node */
17951                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17952
17953                 /* Record op1 as the very first use of this argument.
17954                 If there are no further uses of the arg, we may be
17955                 able to use the actual arg node instead of the temp.
17956                 If we do see any further uses, we will clear this. */
17957                 inlArgInfo[lclNum].argBashTmpNode = op1;
17958             }
17959             else
17960             {
17961                 /* Get a small LCL_VAR node */
17962                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17963                 /* No bashing of this argument */
17964                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17965             }
17966         }
17967     }
17968
17969     /* Mark the argument as used */
17970
17971     inlArgInfo[lclNum].argIsUsed = true;
17972
17973     return op1;
17974 }
17975
17976 /******************************************************************************
17977  Is this the original "this" argument to the call being inlined?
17978
17979  Note that we do not inline methods with "starg 0", and so we do not need to
17980  worry about it.
17981 */
17982
17983 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17984 {
17985     assert(compIsForInlining());
17986     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17987 }
17988
17989 //-----------------------------------------------------------------------------
17990 // This function checks if a dereference in the inlinee can guarantee that
17991 // the "this" is non-NULL.
17992 // If we haven't hit a branch or a side effect, and we are dereferencing
17993 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17994 // then we can avoid a separate null pointer check.
17995 //
17996 // "additionalTreesToBeEvaluatedBefore"
17997 // is the set of pending trees that have not yet been added to the statement list,
17998 // and which have been removed from verCurrentState.esStack[]
17999
18000 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
18001                                                                   GenTreePtr  variableBeingDereferenced,
18002                                                                   InlArgInfo* inlArgInfo)
18003 {
18004     assert(compIsForInlining());
18005     assert(opts.OptEnabled(CLFLG_INLINING));
18006
18007     BasicBlock* block = compCurBB;
18008
18009     GenTreePtr stmt;
18010     GenTreePtr expr;
18011
18012     if (block != fgFirstBB)
18013     {
18014         return FALSE;
18015     }
18016
18017     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18018     {
18019         return FALSE;
18020     }
18021
18022     if (additionalTreesToBeEvaluatedBefore &&
18023         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18024     {
18025         return FALSE;
18026     }
18027
18028     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18029     {
18030         expr = stmt->gtStmt.gtStmtExpr;
18031
18032         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18033         {
18034             return FALSE;
18035         }
18036     }
18037
18038     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18039     {
18040         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18041         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18042         {
18043             return FALSE;
18044         }
18045     }
18046
18047     return TRUE;
18048 }
18049
18050 /******************************************************************************/
18051 // Check the inlining eligibility of this GT_CALL node.
18052 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
18053
18054 // Todo: find a way to record the failure reasons in the IR (or
18055 // otherwise build tree context) so when we do the inlining pass we
18056 // can capture these reasons
18057
18058 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
18059                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18060                                       CORINFO_CALL_INFO*     callInfo)
18061 {
18062     // Let the strategy know there's another call
18063     impInlineRoot()->m_inlineStrategy->NoteCall();
18064
18065     if (!opts.OptEnabled(CLFLG_INLINING))
18066     {
18067         /* XXX Mon 8/18/2008
18068          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18069          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18070          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18071          * figure out why we did not set MAXOPT for this compile.
18072          */
18073         assert(!compIsForInlining());
18074         return;
18075     }
18076
18077     if (compIsForImportOnly())
18078     {
18079         // Don't bother creating the inline candidate during verification.
18080         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18081         // that leads to the creation of multiple instances of Compiler.
18082         return;
18083     }
18084
18085     GenTreeCall* call = callNode->AsCall();
18086     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18087
18088     // Don't inline if not optimizing root method
18089     if (opts.compDbgCode)
18090     {
18091         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18092         return;
18093     }
18094
18095     // Don't inline if inlining into root method is disabled.
18096     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18097     {
18098         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18099         return;
18100     }
18101
18102     // Inlining candidate determination needs to honor only IL tail prefix.
18103     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18104     if (call->IsTailPrefixedCall())
18105     {
18106         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18107         return;
18108     }
18109
18110     // Tail recursion elimination takes precedence over inlining.
18111     // TODO: We may want to do some of the additional checks from fgMorphCall
18112     // here to reduce the chance we don't inline a call that won't be optimized
18113     // as a fast tail call or turned into a loop.
18114     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18115     {
18116         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18117         return;
18118     }
18119
18120     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18121     {
18122         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18123         return;
18124     }
18125
18126     /* Ignore helper calls */
18127
18128     if (call->gtCallType == CT_HELPER)
18129     {
18130         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18131         return;
18132     }
18133
18134     /* Ignore indirect calls */
18135     if (call->gtCallType == CT_INDIRECT)
18136     {
18137         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18138         return;
18139     }
18140
18141     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18142      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18143      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18144
18145     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18146     unsigned              methAttr;
18147
18148     // Reuse method flags from the original callInfo if possible
18149     if (fncHandle == callInfo->hMethod)
18150     {
18151         methAttr = callInfo->methodFlags;
18152     }
18153     else
18154     {
18155         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18156     }
18157
18158 #ifdef DEBUG
18159     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18160     {
18161         methAttr |= CORINFO_FLG_FORCEINLINE;
18162     }
18163 #endif
18164
18165     // Check for COMPlus_AggressiveInlining
18166     if (compDoAggressiveInlining)
18167     {
18168         methAttr |= CORINFO_FLG_FORCEINLINE;
18169     }
18170
18171     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18172     {
18173         /* Don't bother inline blocks that are in the filter region */
18174         if (bbInCatchHandlerILRange(compCurBB))
18175         {
18176 #ifdef DEBUG
18177             if (verbose)
18178             {
18179                 printf("\nWill not inline blocks that are in the catch handler region\n");
18180             }
18181
18182 #endif
18183
18184             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18185             return;
18186         }
18187
18188         if (bbInFilterILRange(compCurBB))
18189         {
18190 #ifdef DEBUG
18191             if (verbose)
18192             {
18193                 printf("\nWill not inline blocks that are in the filter region\n");
18194             }
18195 #endif
18196
18197             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18198             return;
18199         }
18200     }
18201
18202     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18203
18204     if (opts.compNeedSecurityCheck)
18205     {
18206         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18207         return;
18208     }
18209
18210     /* Check if we tried to inline this method before */
18211
18212     if (methAttr & CORINFO_FLG_DONT_INLINE)
18213     {
18214         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18215         return;
18216     }
18217
18218     /* Cannot inline synchronized methods */
18219
18220     if (methAttr & CORINFO_FLG_SYNCH)
18221     {
18222         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18223         return;
18224     }
18225
18226     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18227
18228     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18229     {
18230         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18231         return;
18232     }
18233
18234     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18235     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18236
18237     if (inlineResult.IsFailure())
18238     {
18239         return;
18240     }
18241
18242     // The old value should be NULL
18243     assert(call->gtInlineCandidateInfo == nullptr);
18244
18245     call->gtInlineCandidateInfo = inlineCandidateInfo;
18246
18247     // Mark the call node as inline candidate.
18248     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18249
18250     // Let the strategy know there's another candidate.
18251     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18252
18253     // Since we're not actually inlining yet, and this call site is
18254     // still just an inline candidate, there's nothing to report.
18255     inlineResult.SetReported();
18256 }
18257
18258 /******************************************************************************/
18259 // Returns true if the given intrinsic will be implemented by target-specific
18260 // instructions
18261
18262 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18263 {
18264 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18265     switch (intrinsicId)
18266     {
18267         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18268         //
18269         // TODO: Because the x86 backend only targets SSE for floating-point code,
18270         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18271         //       implemented those intrinsics as x87 instructions). If this poses
18272         //       a CQ problem, it may be necessary to change the implementation of
18273         //       the helper calls to decrease call overhead or switch back to the
18274         //       x87 instructions. This is tracked by #7097.
18275         case CORINFO_INTRINSIC_Sqrt:
18276         case CORINFO_INTRINSIC_Abs:
18277             return true;
18278
18279         default:
18280             return false;
18281     }
18282 #elif defined(_TARGET_ARM64_)
18283     switch (intrinsicId)
18284     {
18285         case CORINFO_INTRINSIC_Sqrt:
18286         case CORINFO_INTRINSIC_Abs:
18287         case CORINFO_INTRINSIC_Round:
18288             return true;
18289
18290         default:
18291             return false;
18292     }
18293 #elif defined(_TARGET_ARM_)
18294     switch (intrinsicId)
18295     {
18296         case CORINFO_INTRINSIC_Sqrt:
18297         case CORINFO_INTRINSIC_Abs:
18298         case CORINFO_INTRINSIC_Round:
18299             return true;
18300
18301         default:
18302             return false;
18303     }
18304 #elif defined(_TARGET_X86_)
18305     switch (intrinsicId)
18306     {
18307         case CORINFO_INTRINSIC_Sin:
18308         case CORINFO_INTRINSIC_Cos:
18309         case CORINFO_INTRINSIC_Sqrt:
18310         case CORINFO_INTRINSIC_Abs:
18311         case CORINFO_INTRINSIC_Round:
18312             return true;
18313
18314         default:
18315             return false;
18316     }
18317 #else
18318     // TODO: This portion of logic is not implemented for other arch.
18319     // The reason for returning true is that on all other arch the only intrinsic
18320     // enabled are target intrinsics.
18321     return true;
18322 #endif //_TARGET_AMD64_
18323 }
18324
18325 /******************************************************************************/
18326 // Returns true if the given intrinsic will be implemented by calling System.Math
18327 // methods.
18328
18329 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18330 {
18331     // Currently, if an math intrisic is not implemented by target-specific
18332     // intructions, it will be implemented by a System.Math call. In the
18333     // future, if we turn to implementing some of them with helper callers,
18334     // this predicate needs to be revisited.
18335     return !IsTargetIntrinsic(intrinsicId);
18336 }
18337
18338 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18339 {
18340     switch (intrinsicId)
18341     {
18342         case CORINFO_INTRINSIC_Sin:
18343         case CORINFO_INTRINSIC_Sqrt:
18344         case CORINFO_INTRINSIC_Abs:
18345         case CORINFO_INTRINSIC_Cos:
18346         case CORINFO_INTRINSIC_Round:
18347         case CORINFO_INTRINSIC_Cosh:
18348         case CORINFO_INTRINSIC_Sinh:
18349         case CORINFO_INTRINSIC_Tan:
18350         case CORINFO_INTRINSIC_Tanh:
18351         case CORINFO_INTRINSIC_Asin:
18352         case CORINFO_INTRINSIC_Acos:
18353         case CORINFO_INTRINSIC_Atan:
18354         case CORINFO_INTRINSIC_Atan2:
18355         case CORINFO_INTRINSIC_Log10:
18356         case CORINFO_INTRINSIC_Pow:
18357         case CORINFO_INTRINSIC_Exp:
18358         case CORINFO_INTRINSIC_Ceiling:
18359         case CORINFO_INTRINSIC_Floor:
18360             return true;
18361         default:
18362             return false;
18363     }
18364 }
18365
18366 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18367 {
18368     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18369 }
18370 /*****************************************************************************/