Merge pull request #9493 from pgavlin/ReportBytesAllocated
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492     // On coreclr the check for GC includes a "may" to account for the special
1493     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1495     // pointer.
1496     const bool mayContainGCPtrs =
1497         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1498
1499 #ifdef FEATURE_SIMD
1500     // Check to see if this is a SIMD type.
1501     if (featureSIMD && !mayContainGCPtrs)
1502     {
1503         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1504
1505         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1506         {
1507             unsigned int sizeBytes;
1508             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509             if (simdBaseType != TYP_UNKNOWN)
1510             {
1511                 assert(sizeBytes == originalSize);
1512                 structType = getSIMDTypeForSize(sizeBytes);
1513                 if (pSimdBaseType != nullptr)
1514                 {
1515                     *pSimdBaseType = simdBaseType;
1516                 }
1517                 // Also indicate that we use floating point registers.
1518                 compFloatingPointUsed = true;
1519             }
1520         }
1521     }
1522 #endif // FEATURE_SIMD
1523
1524     // Fetch GC layout info if requested
1525     if (gcLayout != nullptr)
1526     {
1527         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1528
1529         // Verify that the quick test up above via the class attributes gave a
1530         // safe view of the type's GCness.
1531         //
1532         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1533         // does not report any gc fields.
1534
1535         assert(mayContainGCPtrs || (numGCVars == 0));
1536
1537         if (pNumGCVars != nullptr)
1538         {
1539             *pNumGCVars = numGCVars;
1540         }
1541     }
1542     else
1543     {
1544         // Can't safely ask for number of GC pointers without also
1545         // asking for layout.
1546         assert(pNumGCVars == nullptr);
1547     }
1548
1549     return structType;
1550 }
1551
1552 //****************************************************************************
1553 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1554 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1555 //
1556 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1557                                       CORINFO_CLASS_HANDLE structHnd,
1558                                       unsigned             curLevel,
1559                                       bool                 forceNormalization /*=false*/)
1560 {
1561     assert(forceNormalization || varTypeIsStruct(structVal));
1562     assert(structHnd != NO_CLASS_HANDLE);
1563     var_types structType = structVal->TypeGet();
1564     bool      makeTemp   = false;
1565     if (structType == TYP_STRUCT)
1566     {
1567         structType = impNormStructType(structHnd);
1568     }
1569     bool                 alreadyNormalized = false;
1570     GenTreeLclVarCommon* structLcl         = nullptr;
1571
1572     genTreeOps oper = structVal->OperGet();
1573     switch (oper)
1574     {
1575         // GT_RETURN and GT_MKREFANY don't capture the handle.
1576         case GT_RETURN:
1577             break;
1578         case GT_MKREFANY:
1579             alreadyNormalized = true;
1580             break;
1581
1582         case GT_CALL:
1583             structVal->gtCall.gtRetClsHnd = structHnd;
1584             makeTemp                      = true;
1585             break;
1586
1587         case GT_RET_EXPR:
1588             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1589             makeTemp                         = true;
1590             break;
1591
1592         case GT_ARGPLACE:
1593             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1594             break;
1595
1596         case GT_INDEX:
1597             // This will be transformed to an OBJ later.
1598             alreadyNormalized                    = true;
1599             structVal->gtIndex.gtStructElemClass = structHnd;
1600             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1601             break;
1602
1603         case GT_FIELD:
1604             // Wrap it in a GT_OBJ.
1605             structVal->gtType = structType;
1606             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1607             break;
1608
1609         case GT_LCL_VAR:
1610         case GT_LCL_FLD:
1611             structLcl = structVal->AsLclVarCommon();
1612             // Wrap it in a GT_OBJ.
1613             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1614             __fallthrough;
1615
1616         case GT_OBJ:
1617         case GT_BLK:
1618         case GT_DYN_BLK:
1619         case GT_ASG:
1620             // These should already have the appropriate type.
1621             assert(structVal->gtType == structType);
1622             alreadyNormalized = true;
1623             break;
1624
1625         case GT_IND:
1626             assert(structVal->gtType == structType);
1627             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1628             alreadyNormalized = true;
1629             break;
1630
1631 #ifdef FEATURE_SIMD
1632         case GT_SIMD:
1633             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1634             break;
1635 #endif // FEATURE_SIMD
1636
1637         case GT_COMMA:
1638         {
1639             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1640             GenTree* blockNode = structVal->gtOp.gtOp2;
1641             assert(blockNode->gtType == structType);
1642
1643             // Is this GT_COMMA(op1, GT_COMMA())?
1644             GenTree* parent = structVal;
1645             if (blockNode->OperGet() == GT_COMMA)
1646             {
1647                 // Find the last node in the comma chain.
1648                 do
1649                 {
1650                     assert(blockNode->gtType == structType);
1651                     parent    = blockNode;
1652                     blockNode = blockNode->gtOp.gtOp2;
1653                 } while (blockNode->OperGet() == GT_COMMA);
1654             }
1655
1656 #ifdef FEATURE_SIMD
1657             if (blockNode->OperGet() == GT_SIMD)
1658             {
1659                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1660                 alreadyNormalized  = true;
1661             }
1662             else
1663 #endif
1664             {
1665                 assert(blockNode->OperIsBlk());
1666
1667                 // Sink the GT_COMMA below the blockNode addr.
1668                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1669                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1670                 //
1671                 // In case of a chained GT_COMMA case, we sink the last
1672                 // GT_COMMA below the blockNode addr.
1673                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1674                 assert(blockNodeAddr->gtType == TYP_BYREF);
1675                 GenTree* commaNode    = parent;
1676                 commaNode->gtType     = TYP_BYREF;
1677                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1678                 blockNode->gtOp.gtOp1 = commaNode;
1679                 if (parent == structVal)
1680                 {
1681                     structVal = blockNode;
1682                 }
1683                 alreadyNormalized = true;
1684             }
1685         }
1686         break;
1687
1688         default:
1689             assert(!"Unexpected node in impNormStructVal()");
1690             break;
1691     }
1692     structVal->gtType  = structType;
1693     GenTree* structObj = structVal;
1694
1695     if (!alreadyNormalized || forceNormalization)
1696     {
1697         if (makeTemp)
1698         {
1699             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1700
1701             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1702
1703             // The structVal is now the temp itself
1704
1705             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1706             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1707             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1708         }
1709         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1710         {
1711             // Wrap it in a GT_OBJ
1712             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1713         }
1714     }
1715
1716     if (structLcl != nullptr)
1717     {
1718         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1719         // so we don't set GTF_EXCEPT here.
1720         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1721         {
1722             structObj->gtFlags &= ~GTF_GLOB_REF;
1723         }
1724     }
1725     else
1726     {
1727         // In general a OBJ is an indirection and could raise an exception.
1728         structObj->gtFlags |= GTF_EXCEPT;
1729     }
1730     return (structObj);
1731 }
1732
1733 /******************************************************************************/
1734 // Given a type token, generate code that will evaluate to the correct
1735 // handle representation of that token (type handle, field handle, or method handle)
1736 //
1737 // For most cases, the handle is determined at compile-time, and the code
1738 // generated is simply an embedded handle.
1739 //
1740 // Run-time lookup is required if the enclosing method is shared between instantiations
1741 // and the token refers to formal type parameters whose instantiation is not known
1742 // at compile-time.
1743 //
1744 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1745                                       BOOL*                   pRuntimeLookup /* = NULL */,
1746                                       BOOL                    mustRestoreHandle /* = FALSE */,
1747                                       BOOL                    importParent /* = FALSE */)
1748 {
1749     assert(!fgGlobalMorph);
1750
1751     CORINFO_GENERICHANDLE_RESULT embedInfo;
1752     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1753
1754     if (pRuntimeLookup)
1755     {
1756         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1757     }
1758
1759     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1760     {
1761         switch (embedInfo.handleType)
1762         {
1763             case CORINFO_HANDLETYPE_CLASS:
1764                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1765                 break;
1766
1767             case CORINFO_HANDLETYPE_METHOD:
1768                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1769                 break;
1770
1771             case CORINFO_HANDLETYPE_FIELD:
1772                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1773                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1774                 break;
1775
1776             default:
1777                 break;
1778         }
1779     }
1780
1781     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1782                            embedInfo.compileTimeHandle);
1783 }
1784
1785 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                      CORINFO_LOOKUP*         pLookup,
1787                                      unsigned                handleFlags,
1788                                      void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                                unsigned              handleFlags,
1828                                                void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1843 }
1844
1845 GenTreePtr Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853 #if COR_JIT_EE_VERSION > 460
1854     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1855     {
1856         return nullptr;
1857     }
1858 #else
1859     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1860 #endif
1861
1862     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1863
1864     op1->gtCall.setEntryPoint(lookup);
1865
1866     return op1;
1867 }
1868 #endif
1869
1870 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1871 {
1872     GenTreePtr op1 = nullptr;
1873
1874     switch (pCallInfo->kind)
1875     {
1876         case CORINFO_CALL:
1877             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1878
1879 #ifdef FEATURE_READYTORUN_COMPILER
1880             if (opts.IsReadyToRun())
1881             {
1882                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1883                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1884                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1885             }
1886             else
1887             {
1888                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1889             }
1890 #endif
1891             break;
1892
1893         case CORINFO_CALL_CODE_POINTER:
1894             if (compIsForInlining())
1895             {
1896                 // Don't import runtime lookups when inlining
1897                 // Inlining has to be aborted in such a case
1898                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1899                 return nullptr;
1900             }
1901
1902             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1903             break;
1904
1905         default:
1906             noway_assert(!"unknown call kind");
1907             break;
1908     }
1909
1910     return op1;
1911 }
1912
1913 //------------------------------------------------------------------------
1914 // getRuntimeContextTree: find pointer to context for runtime lookup.
1915 //
1916 // Arguments:
1917 //    kind - lookup kind.
1918 //
1919 // Return Value:
1920 //    Return GenTree pointer to generic shared context.
1921 //
1922 // Notes:
1923 //    Reports about generic context using.
1924
1925 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1926 {
1927     GenTreePtr ctxTree = nullptr;
1928
1929     // Collectible types requires that for shared generic code, if we use the generic context parameter
1930     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1931     // context parameter is this that we don't need the eager reporting logic.)
1932     lvaGenericsContextUsed = true;
1933
1934     if (kind == CORINFO_LOOKUP_THISOBJ)
1935     {
1936         // this Object
1937         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1938
1939         // Vtable pointer of this object
1940         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1941         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1942         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1943     }
1944     else
1945     {
1946         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1947
1948         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1949     }
1950     return ctxTree;
1951 }
1952
1953 /*****************************************************************************/
1954 /* Import a dictionary lookup to access a handle in code shared between
1955    generic instantiations.
1956    The lookup depends on the typeContext which is only available at
1957    runtime, and not at compile-time.
1958    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1959    The cases are:
1960
1961    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1962       instantiation-specific handle, and the tokens to lookup the handle.
1963    2. pLookup->indirections != CORINFO_USEHELPER :
1964       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1965           to get the handle.
1966       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1967           If it is non-NULL, it is the handle required. Else, call a helper
1968           to lookup the handle.
1969  */
1970
1971 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1972                                             CORINFO_LOOKUP*         pLookup,
1973                                             void*                   compileTimeHandle)
1974 {
1975
1976     // This method can only be called from the importer instance of the Compiler.
1977     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1978     assert(!compIsForInlining());
1979
1980     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1981
1982 #ifdef FEATURE_READYTORUN_COMPILER
1983     if (opts.IsReadyToRun())
1984     {
1985         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1986                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1987     }
1988 #endif
1989
1990     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1991     // It's available only via the run-time helper function
1992     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1993     {
1994         GenTreeArgList* helperArgs =
1995             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1996                                                       nullptr, compileTimeHandle));
1997
1998         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1999     }
2000
2001     // Slot pointer
2002     GenTreePtr slotPtrTree = ctxTree;
2003
2004     if (pRuntimeLookup->testForNull)
2005     {
2006         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2007                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2008     }
2009
2010     // Applied repeated indirections
2011     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2012     {
2013         if (i != 0)
2014         {
2015             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2016             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2017             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2018         }
2019         if (pRuntimeLookup->offsets[i] != 0)
2020         {
2021             slotPtrTree =
2022                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2023         }
2024     }
2025
2026     // No null test required
2027     if (!pRuntimeLookup->testForNull)
2028     {
2029         if (pRuntimeLookup->indirections == 0)
2030         {
2031             return slotPtrTree;
2032         }
2033
2034         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2035         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2036
2037         if (!pRuntimeLookup->testForFixup)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2043
2044         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2045                                       nullptr DEBUGARG("impRuntimeLookup test"));
2046         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2047
2048         // Use a GT_AND to check for the lowest bit and indirect if it is set
2049         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2050         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2051         relop->gtFlags |= GTF_RELOP_QMARK;
2052
2053         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2054                            nullptr DEBUGARG("impRuntimeLookup indir"));
2055         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2056         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2057         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2058
2059         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2060
2061         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2062         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2063         return gtNewLclvNode(tmp, TYP_I_IMPL);
2064     }
2065
2066     assert(pRuntimeLookup->indirections != 0);
2067
2068     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2069
2070     // Extract the handle
2071     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2072     handle->gtFlags |= GTF_IND_NONFAULTING;
2073
2074     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2075                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2076
2077     // Call to helper
2078     GenTreeArgList* helperArgs =
2079         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2080                                                   compileTimeHandle));
2081     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2082
2083     // Check for null and possibly call helper
2084     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2085     relop->gtFlags |= GTF_RELOP_QMARK;
2086
2087     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2088                                                          gtNewNothingNode(), // do nothing if nonnull
2089                                                          helperCall);
2090
2091     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2092
2093     unsigned tmp;
2094     if (handleCopy->IsLocal())
2095     {
2096         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2097     }
2098     else
2099     {
2100         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2101     }
2102
2103     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2104     return gtNewLclvNode(tmp, TYP_I_IMPL);
2105 }
2106
2107 /******************************************************************************
2108  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2109  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2110  *     else, grab a new temp.
2111  *  For structs (which can be pushed on the stack using obj, etc),
2112  *  special handling is needed
2113  */
2114
2115 struct RecursiveGuard
2116 {
2117 public:
2118     RecursiveGuard()
2119     {
2120         m_pAddress = nullptr;
2121     }
2122
2123     ~RecursiveGuard()
2124     {
2125         if (m_pAddress)
2126         {
2127             *m_pAddress = false;
2128         }
2129     }
2130
2131     void Init(bool* pAddress, bool bInitialize)
2132     {
2133         assert(pAddress && *pAddress == false && "Recursive guard violation");
2134         m_pAddress = pAddress;
2135
2136         if (bInitialize)
2137         {
2138             *m_pAddress = true;
2139         }
2140     }
2141
2142 protected:
2143     bool* m_pAddress;
2144 };
2145
2146 bool Compiler::impSpillStackEntry(unsigned level,
2147                                   unsigned tnum
2148 #ifdef DEBUG
2149                                   ,
2150                                   bool        bAssertOnRecursion,
2151                                   const char* reason
2152 #endif
2153                                   )
2154 {
2155
2156 #ifdef DEBUG
2157     RecursiveGuard guard;
2158     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2159 #endif
2160
2161     GenTreePtr tree = verCurrentState.esStack[level].val;
2162
2163     /* Allocate a temp if we haven't been asked to use a particular one */
2164
2165     if (tiVerificationNeeded)
2166     {
2167         // Ignore bad temp requests (they will happen with bad code and will be
2168         // catched when importing the destblock)
2169         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2170         {
2171             return false;
2172         }
2173     }
2174     else
2175     {
2176         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2177         {
2178             return false;
2179         }
2180     }
2181
2182     if (tnum == BAD_VAR_NUM)
2183     {
2184         tnum = lvaGrabTemp(true DEBUGARG(reason));
2185     }
2186     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2187     {
2188         // if verification is needed and tnum's type is incompatible with
2189         // type on that stack, we grab a new temp. This is safe since
2190         // we will throw a verification exception in the dest block.
2191
2192         var_types valTyp = tree->TypeGet();
2193         var_types dstTyp = lvaTable[tnum].TypeGet();
2194
2195         // if the two types are different, we return. This will only happen with bad code and will
2196         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2197         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2198             !(
2199 #ifndef _TARGET_64BIT_
2200                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2201 #endif // !_TARGET_64BIT_
2202                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2203         {
2204             if (verNeedsVerification())
2205             {
2206                 return false;
2207             }
2208         }
2209     }
2210
2211     /* Assign the spilled entry to the temp */
2212     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2213
2214     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2215     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2216     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2217     verCurrentState.esStack[level].val = temp;
2218
2219     return true;
2220 }
2221
2222 /*****************************************************************************
2223  *
2224  *  Ensure that the stack has only spilled values
2225  */
2226
2227 void Compiler::impSpillStackEnsure(bool spillLeaves)
2228 {
2229     assert(!spillLeaves || opts.compDbgCode);
2230
2231     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2232     {
2233         GenTreePtr tree = verCurrentState.esStack[level].val;
2234
2235         if (!spillLeaves && tree->OperIsLeaf())
2236         {
2237             continue;
2238         }
2239
2240         // Temps introduced by the importer itself don't need to be spilled
2241
2242         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2243
2244         if (isTempLcl)
2245         {
2246             continue;
2247         }
2248
2249         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2250     }
2251 }
2252
2253 void Compiler::impSpillEvalStack()
2254 {
2255     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2256     {
2257         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2258     }
2259 }
2260
2261 /*****************************************************************************
2262  *
2263  *  If the stack contains any trees with side effects in them, assign those
2264  *  trees to temps and append the assignments to the statement list.
2265  *  On return the stack is guaranteed to be empty.
2266  */
2267
2268 inline void Compiler::impEvalSideEffects()
2269 {
2270     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2271     verCurrentState.esStackDepth = 0;
2272 }
2273
2274 /*****************************************************************************
2275  *
2276  *  If the stack contains any trees with side effects in them, assign those
2277  *  trees to temps and replace them on the stack with refs to their temps.
2278  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2279  */
2280
2281 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2282 {
2283     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2284
2285     /* Before we make any appends to the tree list we must spill the
2286      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2287
2288     impSpillSpecialSideEff();
2289
2290     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2291     {
2292         chkLevel = verCurrentState.esStackDepth;
2293     }
2294
2295     assert(chkLevel <= verCurrentState.esStackDepth);
2296
2297     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2298
2299     for (unsigned i = 0; i < chkLevel; i++)
2300     {
2301         GenTreePtr tree = verCurrentState.esStack[i].val;
2302
2303         GenTreePtr lclVarTree;
2304
2305         if ((tree->gtFlags & spillFlags) != 0 ||
2306             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2307              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2308              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2309                                            // lvAddrTaken flag.
2310         {
2311             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2312         }
2313     }
2314 }
2315
2316 /*****************************************************************************
2317  *
2318  *  If the stack contains any trees with special side effects in them, assign
2319  *  those trees to temps and replace them on the stack with refs to their temps.
2320  */
2321
2322 inline void Compiler::impSpillSpecialSideEff()
2323 {
2324     // Only exception objects need to be carefully handled
2325
2326     if (!compCurBB->bbCatchTyp)
2327     {
2328         return;
2329     }
2330
2331     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2332     {
2333         GenTreePtr tree = verCurrentState.esStack[level].val;
2334         // Make sure if we have an exception object in the sub tree we spill ourselves.
2335         if (gtHasCatchArg(tree))
2336         {
2337             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2338         }
2339     }
2340 }
2341
2342 /*****************************************************************************
2343  *
2344  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2345  */
2346
2347 void Compiler::impSpillValueClasses()
2348 {
2349     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2350     {
2351         GenTreePtr tree = verCurrentState.esStack[level].val;
2352
2353         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2354         {
2355             // Tree walk was aborted, which means that we found a
2356             // value class on the stack.  Need to spill that
2357             // stack entry.
2358
2359             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2360         }
2361     }
2362 }
2363
2364 /*****************************************************************************
2365  *
2366  *  Callback that checks if a tree node is TYP_STRUCT
2367  */
2368
2369 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2370 {
2371     fgWalkResult walkResult = WALK_CONTINUE;
2372
2373     if ((*pTree)->gtType == TYP_STRUCT)
2374     {
2375         // Abort the walk and indicate that we found a value class
2376
2377         walkResult = WALK_ABORT;
2378     }
2379
2380     return walkResult;
2381 }
2382
2383 /*****************************************************************************
2384  *
2385  *  If the stack contains any trees with references to local #lclNum, assign
2386  *  those trees to temps and replace their place on the stack with refs to
2387  *  their temps.
2388  */
2389
2390 void Compiler::impSpillLclRefs(ssize_t lclNum)
2391 {
2392     /* Before we make any appends to the tree list we must spill the
2393      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2394
2395     impSpillSpecialSideEff();
2396
2397     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2398     {
2399         GenTreePtr tree = verCurrentState.esStack[level].val;
2400
2401         /* If the tree may throw an exception, and the block has a handler,
2402            then we need to spill assignments to the local if the local is
2403            live on entry to the handler.
2404            Just spill 'em all without considering the liveness */
2405
2406         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2407
2408         /* Skip the tree if it doesn't have an affected reference,
2409            unless xcptnCaught */
2410
2411         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2412         {
2413             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2414         }
2415     }
2416 }
2417
2418 /*****************************************************************************
2419  *
2420  *  Push catch arg onto the stack.
2421  *  If there are jumps to the beginning of the handler, insert basic block
2422  *  and spill catch arg to a temp. Update the handler block if necessary.
2423  *
2424  *  Returns the basic block of the actual handler.
2425  */
2426
2427 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2428 {
2429     // Do not inject the basic block twice on reimport. This should be
2430     // hit only under JIT stress. See if the block is the one we injected.
2431     // Note that EH canonicalization can inject internal blocks here. We might
2432     // be able to re-use such a block (but we don't, right now).
2433     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2434         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2435     {
2436         GenTreePtr tree = hndBlk->bbTreeList;
2437
2438         if (tree != nullptr && tree->gtOper == GT_STMT)
2439         {
2440             tree = tree->gtStmt.gtStmtExpr;
2441             assert(tree != nullptr);
2442
2443             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2444                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2445             {
2446                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2447
2448                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2449
2450                 return hndBlk->bbNext;
2451             }
2452         }
2453
2454         // If we get here, it must have been some other kind of internal block. It's possible that
2455         // someone prepended something to our injected block, but that's unlikely.
2456     }
2457
2458     /* Push the exception address value on the stack */
2459     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2460
2461     /* Mark the node as having a side-effect - i.e. cannot be
2462      * moved around since it is tied to a fixed location (EAX) */
2463     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2464
2465     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2466     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2467     {
2468         if (hndBlk->bbRefs == 1)
2469         {
2470             hndBlk->bbRefs++;
2471         }
2472
2473         /* Create extra basic block for the spill */
2474         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2475         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2476         newBlk->setBBWeight(hndBlk->bbWeight);
2477         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2478
2479         /* Account for the new link we are about to create */
2480         hndBlk->bbRefs++;
2481
2482         /* Spill into a temp */
2483         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2484         lvaTable[tempNum].lvType = TYP_REF;
2485         arg                      = gtNewTempAssign(tempNum, arg);
2486
2487         hndBlk->bbStkTempsIn = tempNum;
2488
2489         /* Report the debug info. impImportBlockCode won't treat
2490          * the actual handler as exception block and thus won't do it for us. */
2491         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2492         {
2493             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2494             arg            = gtNewStmt(arg, impCurStmtOffs);
2495         }
2496
2497         fgInsertStmtAtEnd(newBlk, arg);
2498
2499         arg = gtNewLclvNode(tempNum, TYP_REF);
2500     }
2501
2502     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2503
2504     return hndBlk;
2505 }
2506
2507 /*****************************************************************************
2508  *
2509  *  Given a tree, clone it. *pClone is set to the cloned tree.
2510  *  Returns the original tree if the cloning was easy,
2511  *   else returns the temp to which the tree had to be spilled to.
2512  *  If the tree has side-effects, it will be spilled to a temp.
2513  */
2514
2515 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2516                                   GenTreePtr*          pClone,
2517                                   CORINFO_CLASS_HANDLE structHnd,
2518                                   unsigned             curLevel,
2519                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2520 {
2521     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2522     {
2523         GenTreePtr clone = gtClone(tree, true);
2524
2525         if (clone)
2526         {
2527             *pClone = clone;
2528             return tree;
2529         }
2530     }
2531
2532     /* Store the operand in a temp and return the temp */
2533
2534     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2535
2536     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2537     // return a struct type. It also may modify the struct type to a more
2538     // specialized type (e.g. a SIMD type).  So we will get the type from
2539     // the lclVar AFTER calling impAssignTempGen().
2540
2541     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2542     var_types type = genActualType(lvaTable[temp].TypeGet());
2543
2544     *pClone = gtNewLclvNode(temp, type);
2545     return gtNewLclvNode(temp, type);
2546 }
2547
2548 /*****************************************************************************
2549  * Remember the IL offset (including stack-empty info) for the trees we will
2550  * generate now.
2551  */
2552
2553 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2554 {
2555     if (compIsForInlining())
2556     {
2557         GenTreePtr callStmt = impInlineInfo->iciStmt;
2558         assert(callStmt->gtOper == GT_STMT);
2559         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2560     }
2561     else
2562     {
2563         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2564         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2565         impCurStmtOffs    = offs | stkBit;
2566     }
2567 }
2568
2569 /*****************************************************************************
2570  * Returns current IL offset with stack-empty and call-instruction info incorporated
2571  */
2572 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2573 {
2574     if (compIsForInlining())
2575     {
2576         return BAD_IL_OFFSET;
2577     }
2578     else
2579     {
2580         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2581         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2582         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2583         return offs | stkBit | callInstructionBit;
2584     }
2585 }
2586
2587 /*****************************************************************************
2588  *
2589  *  Remember the instr offset for the statements
2590  *
2591  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2592  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2593  *  as some of the trees corresponding to code up to impCurOpcOffs might
2594  *  still be sitting on the stack.
2595  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2596  *  This should be called when an opcode finally/explicitly causes
2597  *  impAppendTree(tree) to be called (as opposed to being called because of
2598  *  a spill caused by the opcode)
2599  */
2600
2601 #ifdef DEBUG
2602
2603 void Compiler::impNoteLastILoffs()
2604 {
2605     if (impLastILoffsStmt == nullptr)
2606     {
2607         // We should have added a statement for the current basic block
2608         // Is this assert correct ?
2609
2610         assert(impTreeLast);
2611         assert(impTreeLast->gtOper == GT_STMT);
2612
2613         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2614     }
2615     else
2616     {
2617         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2618         impLastILoffsStmt                          = nullptr;
2619     }
2620 }
2621
2622 #endif // DEBUG
2623
2624 /*****************************************************************************
2625  * We don't create any GenTree (excluding spills) for a branch.
2626  * For debugging info, we need a placeholder so that we can note
2627  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2628  */
2629
2630 void Compiler::impNoteBranchOffs()
2631 {
2632     if (opts.compDbgCode)
2633     {
2634         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2635     }
2636 }
2637
2638 /*****************************************************************************
2639  * Locate the next stmt boundary for which we need to record info.
2640  * We will have to spill the stack at such boundaries if it is not
2641  * already empty.
2642  * Returns the next stmt boundary (after the start of the block)
2643  */
2644
2645 unsigned Compiler::impInitBlockLineInfo()
2646 {
2647     /* Assume the block does not correspond with any IL offset. This prevents
2648        us from reporting extra offsets. Extra mappings can cause confusing
2649        stepping, especially if the extra mapping is a jump-target, and the
2650        debugger does not ignore extra mappings, but instead rewinds to the
2651        nearest known offset */
2652
2653     impCurStmtOffsSet(BAD_IL_OFFSET);
2654
2655     if (compIsForInlining())
2656     {
2657         return ~0;
2658     }
2659
2660     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2661
2662     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2663     {
2664         impCurStmtOffsSet(blockOffs);
2665     }
2666
2667     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2668     {
2669         impCurStmtOffsSet(blockOffs);
2670     }
2671
2672     /* Always report IL offset 0 or some tests get confused.
2673        Probably a good idea anyways */
2674
2675     if (blockOffs == 0)
2676     {
2677         impCurStmtOffsSet(blockOffs);
2678     }
2679
2680     if (!info.compStmtOffsetsCount)
2681     {
2682         return ~0;
2683     }
2684
2685     /* Find the lowest explicit stmt boundary within the block */
2686
2687     /* Start looking at an entry that is based on our instr offset */
2688
2689     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2690
2691     if (index >= info.compStmtOffsetsCount)
2692     {
2693         index = info.compStmtOffsetsCount - 1;
2694     }
2695
2696     /* If we've guessed too far, back up */
2697
2698     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2699     {
2700         index--;
2701     }
2702
2703     /* If we guessed short, advance ahead */
2704
2705     while (info.compStmtOffsets[index] < blockOffs)
2706     {
2707         index++;
2708
2709         if (index == info.compStmtOffsetsCount)
2710         {
2711             return info.compStmtOffsetsCount;
2712         }
2713     }
2714
2715     assert(index < info.compStmtOffsetsCount);
2716
2717     if (info.compStmtOffsets[index] == blockOffs)
2718     {
2719         /* There is an explicit boundary for the start of this basic block.
2720            So we will start with bbCodeOffs. Else we will wait until we
2721            get to the next explicit boundary */
2722
2723         impCurStmtOffsSet(blockOffs);
2724
2725         index++;
2726     }
2727
2728     return index;
2729 }
2730
2731 /*****************************************************************************/
2732
2733 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2734 {
2735     switch (opcode)
2736     {
2737         case CEE_CALL:
2738         case CEE_CALLI:
2739         case CEE_CALLVIRT:
2740             return true;
2741
2742         default:
2743             return false;
2744     }
2745 }
2746
2747 /*****************************************************************************/
2748
2749 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2750 {
2751     switch (opcode)
2752     {
2753         case CEE_CALL:
2754         case CEE_CALLI:
2755         case CEE_CALLVIRT:
2756         case CEE_JMP:
2757         case CEE_NEWOBJ:
2758         case CEE_NEWARR:
2759             return true;
2760
2761         default:
2762             return false;
2763     }
2764 }
2765
2766 /*****************************************************************************/
2767
2768 // One might think it is worth caching these values, but results indicate
2769 // that it isn't.
2770 // In addition, caching them causes SuperPMI to be unable to completely
2771 // encapsulate an individual method context.
2772 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2773 {
2774     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2775     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2776     return refAnyClass;
2777 }
2778
2779 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2780 {
2781     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2782     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2783     return typeHandleClass;
2784 }
2785
2786 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2787 {
2788     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2789     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2790     return argIteratorClass;
2791 }
2792
2793 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2794 {
2795     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2796     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2797     return stringClass;
2798 }
2799
2800 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2801 {
2802     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2803     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2804     return objectClass;
2805 }
2806
2807 /*****************************************************************************
2808  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2809  *  set its type to TYP_BYREF when we create it. We know if it can be
2810  *  changed to TYP_I_IMPL only at the point where we use it
2811  */
2812
2813 /* static */
2814 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2815 {
2816     if (tree1->IsVarAddr())
2817     {
2818         tree1->gtType = TYP_I_IMPL;
2819     }
2820
2821     if (tree2 && tree2->IsVarAddr())
2822     {
2823         tree2->gtType = TYP_I_IMPL;
2824     }
2825 }
2826
2827 /*****************************************************************************
2828  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2829  *  to make that an explicit cast in our trees, so any implicit casts that
2830  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2831  *  turned into explicit casts here.
2832  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2833  */
2834
2835 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2836 {
2837     var_types currType   = genActualType(tree->gtType);
2838     var_types wantedType = genActualType(dstTyp);
2839
2840     if (wantedType != currType)
2841     {
2842         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2843         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2844         {
2845             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2846             {
2847                 tree->gtType = TYP_I_IMPL;
2848             }
2849         }
2850 #ifdef _TARGET_64BIT_
2851         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2852         {
2853             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2854             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2855         }
2856         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2857         {
2858             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2859             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2860         }
2861 #endif // _TARGET_64BIT_
2862     }
2863
2864     return tree;
2865 }
2866
2867 /*****************************************************************************
2868  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2869  *  but we want to make that an explicit cast in our trees, so any implicit casts
2870  *  that exist in the IL are turned into explicit casts here.
2871  */
2872
2873 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2874 {
2875 #ifndef LEGACY_BACKEND
2876     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2877     {
2878         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2879     }
2880 #endif // !LEGACY_BACKEND
2881
2882     return tree;
2883 }
2884
2885 //------------------------------------------------------------------------
2886 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2887 //    with a GT_COPYBLK node.
2888 //
2889 // Arguments:
2890 //    sig - The InitializeArray signature.
2891 //
2892 // Return Value:
2893 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2894 //    nullptr otherwise.
2895 //
2896 // Notes:
2897 //    The function recognizes the following IL pattern:
2898 //      ldc <length> or a list of ldc <lower bound>/<length>
2899 //      newarr or newobj
2900 //      dup
2901 //      ldtoken <field handle>
2902 //      call InitializeArray
2903 //    The lower bounds need not be constant except when the array rank is 1.
2904 //    The function recognizes all kinds of arrays thus enabling a small runtime
2905 //    such as CoreRT to skip providing an implementation for InitializeArray.
2906
2907 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2908 {
2909     assert(sig->numArgs == 2);
2910
2911     GenTreePtr fieldTokenNode = impStackTop(0).val;
2912     GenTreePtr arrayLocalNode = impStackTop(1).val;
2913
2914     //
2915     // Verify that the field token is known and valid.  Note that It's also
2916     // possible for the token to come from reflection, in which case we cannot do
2917     // the optimization and must therefore revert to calling the helper.  You can
2918     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2919     //
2920
2921     // Check to see if the ldtoken helper call is what we see here.
2922     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2923         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2924     {
2925         return nullptr;
2926     }
2927
2928     // Strip helper call away
2929     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2930
2931     if (fieldTokenNode->gtOper == GT_IND)
2932     {
2933         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2934     }
2935
2936     // Check for constant
2937     if (fieldTokenNode->gtOper != GT_CNS_INT)
2938     {
2939         return nullptr;
2940     }
2941
2942     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2943     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2944     {
2945         return nullptr;
2946     }
2947
2948     //
2949     // We need to get the number of elements in the array and the size of each element.
2950     // We verify that the newarr statement is exactly what we expect it to be.
2951     // If it's not then we just return NULL and we don't optimize this call
2952     //
2953
2954     //
2955     // It is possible the we don't have any statements in the block yet
2956     //
2957     if (impTreeLast->gtOper != GT_STMT)
2958     {
2959         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2960         return nullptr;
2961     }
2962
2963     //
2964     // We start by looking at the last statement, making sure it's an assignment, and
2965     // that the target of the assignment is the array passed to InitializeArray.
2966     //
2967     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2968     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2969         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2970         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2971     {
2972         return nullptr;
2973     }
2974
2975     //
2976     // Make sure that the object being assigned is a helper call.
2977     //
2978
2979     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2980     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2981     {
2982         return nullptr;
2983     }
2984
2985     //
2986     // Verify that it is one of the new array helpers.
2987     //
2988
2989     bool isMDArray = false;
2990
2991     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2992         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2993         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2994         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2995 #ifdef FEATURE_READYTORUN_COMPILER
2996         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2997 #endif
2998             )
2999     {
3000 #if COR_JIT_EE_VERSION > 460
3001         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3002         {
3003             return nullptr;
3004         }
3005
3006         isMDArray = true;
3007 #endif
3008     }
3009
3010     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3011
3012     //
3013     // Make sure we found a compile time handle to the array
3014     //
3015
3016     if (!arrayClsHnd)
3017     {
3018         return nullptr;
3019     }
3020
3021     unsigned rank = 0;
3022     S_UINT32 numElements;
3023
3024     if (isMDArray)
3025     {
3026         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3027
3028         if (rank == 0)
3029         {
3030             return nullptr;
3031         }
3032
3033         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3034         assert(tokenArg != nullptr);
3035         GenTreeArgList* numArgsArg = tokenArg->Rest();
3036         assert(numArgsArg != nullptr);
3037         GenTreeArgList* argsArg = numArgsArg->Rest();
3038         assert(argsArg != nullptr);
3039
3040         //
3041         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3042         // so at least one length must be present and the rank can't exceed 32 so there can
3043         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3044         //
3045
3046         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3047             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3048         {
3049             return nullptr;
3050         }
3051
3052         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3053         bool     lowerBoundsSpecified;
3054
3055         if (numArgs == rank * 2)
3056         {
3057             lowerBoundsSpecified = true;
3058         }
3059         else if (numArgs == rank)
3060         {
3061             lowerBoundsSpecified = false;
3062
3063             //
3064             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3065             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3066             // we get a SDArray as well, see the for loop below.
3067             //
3068
3069             if (rank == 1)
3070             {
3071                 isMDArray = false;
3072             }
3073         }
3074         else
3075         {
3076             return nullptr;
3077         }
3078
3079         //
3080         // The rank is known to be at least 1 so we can start with numElements being 1
3081         // to avoid the need to special case the first dimension.
3082         //
3083
3084         numElements = S_UINT32(1);
3085
3086         struct Match
3087         {
3088             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3089             {
3090                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3091                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3092             }
3093
3094             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3095             {
3096                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3097                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3098                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3099             }
3100
3101             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3102             {
3103                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3104                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3105             }
3106
3107             static bool IsComma(GenTree* tree)
3108             {
3109                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3110             }
3111         };
3112
3113         unsigned argIndex = 0;
3114         GenTree* comma;
3115
3116         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3117         {
3118             if (lowerBoundsSpecified)
3119             {
3120                 //
3121                 // In general lower bounds can be ignored because they're not needed to
3122                 // calculate the total number of elements. But for single dimensional arrays
3123                 // we need to know if the lower bound is 0 because in this case the runtime
3124                 // creates a SDArray and this affects the way the array data offset is calculated.
3125                 //
3126
3127                 if (rank == 1)
3128                 {
3129                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3130                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3131                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3132
3133                     if (lowerBoundNode->IsIntegralConst(0))
3134                     {
3135                         isMDArray = false;
3136                     }
3137                 }
3138
3139                 comma = comma->gtGetOp2();
3140                 argIndex++;
3141             }
3142
3143             GenTree* lengthNodeAssign = comma->gtGetOp1();
3144             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3145             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3146
3147             if (!lengthNode->IsCnsIntOrI())
3148             {
3149                 return nullptr;
3150             }
3151
3152             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3153             argIndex++;
3154         }
3155
3156         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3157
3158         if (argIndex != numArgs)
3159         {
3160             return nullptr;
3161         }
3162     }
3163     else
3164     {
3165         //
3166         // Make sure there are exactly two arguments:  the array class and
3167         // the number of elements.
3168         //
3169
3170         GenTreePtr arrayLengthNode;
3171
3172         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3173 #ifdef FEATURE_READYTORUN_COMPILER
3174         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3175         {
3176             // Array length is 1st argument for readytorun helper
3177             arrayLengthNode = args->Current();
3178         }
3179         else
3180 #endif
3181         {
3182             // Array length is 2nd argument for regular helper
3183             arrayLengthNode = args->Rest()->Current();
3184         }
3185
3186         //
3187         // Make sure that the number of elements look valid.
3188         //
3189         if (arrayLengthNode->gtOper != GT_CNS_INT)
3190         {
3191             return nullptr;
3192         }
3193
3194         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3195
3196         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3197         {
3198             return nullptr;
3199         }
3200     }
3201
3202     CORINFO_CLASS_HANDLE elemClsHnd;
3203     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3204
3205     //
3206     // Note that genTypeSize will return zero for non primitive types, which is exactly
3207     // what we want (size will then be 0, and we will catch this in the conditional below).
3208     // Note that we don't expect this to fail for valid binaries, so we assert in the
3209     // non-verification case (the verification case should not assert but rather correctly
3210     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3211     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3212     // why.
3213     //
3214
3215     S_UINT32 elemSize(genTypeSize(elementType));
3216     S_UINT32 size = elemSize * S_UINT32(numElements);
3217
3218     if (size.IsOverflow())
3219     {
3220         return nullptr;
3221     }
3222
3223     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3224     {
3225         assert(verNeedsVerification());
3226         return nullptr;
3227     }
3228
3229     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3230     if (!initData)
3231     {
3232         return nullptr;
3233     }
3234
3235     //
3236     // At this point we are ready to commit to implementing the InitializeArray
3237     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3238     // return the struct assignment node.
3239     //
3240
3241     impPopStack();
3242     impPopStack();
3243
3244     const unsigned blkSize = size.Value();
3245     GenTreePtr     dst;
3246
3247     if (isMDArray)
3248     {
3249         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3250
3251         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3252     }
3253     else
3254     {
3255         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3256     }
3257     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3258     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3259     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3260
3261     return gtNewBlkOpNode(blk,     // dst
3262                           src,     // src
3263                           blkSize, // size
3264                           false,   // volatil
3265                           true);   // copyBlock
3266 }
3267
3268 /*****************************************************************************/
3269 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3270 // Returns NULL if an intrinsic cannot be used
3271
3272 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3273                                   CORINFO_CLASS_HANDLE  clsHnd,
3274                                   CORINFO_METHOD_HANDLE method,
3275                                   CORINFO_SIG_INFO*     sig,
3276                                   int                   memberRef,
3277                                   bool                  readonlyCall,
3278                                   bool                  tailCall,
3279                                   CorInfoIntrinsics*    pIntrinsicID)
3280 {
3281     bool mustExpand = false;
3282 #if COR_JIT_EE_VERSION > 460
3283     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3284 #else
3285     CorInfoIntrinsics intrinsicID                                      = info.compCompHnd->getIntrinsicID(method);
3286 #endif
3287     *pIntrinsicID = intrinsicID;
3288
3289 #ifndef _TARGET_ARM_
3290     genTreeOps interlockedOperator;
3291 #endif
3292
3293     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3294     {
3295         // must be done regardless of DbgCode and MinOpts
3296         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3297     }
3298 #ifdef _TARGET_64BIT_
3299     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3300     {
3301         // must be done regardless of DbgCode and MinOpts
3302         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3303     }
3304 #else
3305     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3306 #endif
3307
3308     GenTreePtr retNode = nullptr;
3309
3310     //
3311     // We disable the inlining of instrinsics for MinOpts.
3312     //
3313     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3314     {
3315         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3316         return retNode;
3317     }
3318
3319     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3320     // seem to work properly for Infinity values, we don't do
3321     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3322
3323     var_types callType = JITtype2varType(sig->retType);
3324
3325     /* First do the intrinsics which are always smaller than a call */
3326
3327     switch (intrinsicID)
3328     {
3329         GenTreePtr op1, op2;
3330
3331         case CORINFO_INTRINSIC_Sin:
3332         case CORINFO_INTRINSIC_Sqrt:
3333         case CORINFO_INTRINSIC_Abs:
3334         case CORINFO_INTRINSIC_Cos:
3335         case CORINFO_INTRINSIC_Round:
3336         case CORINFO_INTRINSIC_Cosh:
3337         case CORINFO_INTRINSIC_Sinh:
3338         case CORINFO_INTRINSIC_Tan:
3339         case CORINFO_INTRINSIC_Tanh:
3340         case CORINFO_INTRINSIC_Asin:
3341         case CORINFO_INTRINSIC_Acos:
3342         case CORINFO_INTRINSIC_Atan:
3343         case CORINFO_INTRINSIC_Atan2:
3344         case CORINFO_INTRINSIC_Log10:
3345         case CORINFO_INTRINSIC_Pow:
3346         case CORINFO_INTRINSIC_Exp:
3347         case CORINFO_INTRINSIC_Ceiling:
3348         case CORINFO_INTRINSIC_Floor:
3349
3350             // These are math intrinsics
3351
3352             assert(callType != TYP_STRUCT);
3353
3354             op1 = nullptr;
3355
3356 #if defined(LEGACY_BACKEND)
3357             if (IsTargetIntrinsic(intrinsicID))
3358 #elif !defined(_TARGET_X86_)
3359             // Intrinsics that are not implemented directly by target instructions will
3360             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3361             // don't do this optimization, because
3362             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3363             //  b) It will be non-trivial task or too late to re-materialize a surviving
3364             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3365             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3366 #else
3367             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3368             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3369             // code generation for certain EH constructs.
3370             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3371 #endif
3372             {
3373                 switch (sig->numArgs)
3374                 {
3375                     case 1:
3376                         op1 = impPopStack().val;
3377
3378 #if FEATURE_X87_DOUBLES
3379
3380                         // X87 stack doesn't differentiate between float/double
3381                         // so it doesn't need a cast, but everybody else does
3382                         // Just double check it is at least a FP type
3383                         noway_assert(varTypeIsFloating(op1));
3384
3385 #else // FEATURE_X87_DOUBLES
3386
3387                         if (op1->TypeGet() != callType)
3388                         {
3389                             op1 = gtNewCastNode(callType, op1, callType);
3390                         }
3391
3392 #endif // FEATURE_X87_DOUBLES
3393
3394                         op1 = new (this, GT_INTRINSIC)
3395                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3396                         break;
3397
3398                     case 2:
3399                         op2 = impPopStack().val;
3400                         op1 = impPopStack().val;
3401
3402 #if FEATURE_X87_DOUBLES
3403
3404                         // X87 stack doesn't differentiate between float/double
3405                         // so it doesn't need a cast, but everybody else does
3406                         // Just double check it is at least a FP type
3407                         noway_assert(varTypeIsFloating(op2));
3408                         noway_assert(varTypeIsFloating(op1));
3409
3410 #else // FEATURE_X87_DOUBLES
3411
3412                         if (op2->TypeGet() != callType)
3413                         {
3414                             op2 = gtNewCastNode(callType, op2, callType);
3415                         }
3416                         if (op1->TypeGet() != callType)
3417                         {
3418                             op1 = gtNewCastNode(callType, op1, callType);
3419                         }
3420
3421 #endif // FEATURE_X87_DOUBLES
3422
3423                         op1 = new (this, GT_INTRINSIC)
3424                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3425                         break;
3426
3427                     default:
3428                         NO_WAY("Unsupported number of args for Math Instrinsic");
3429                 }
3430
3431 #ifndef LEGACY_BACKEND
3432                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3433                 {
3434                     op1->gtFlags |= GTF_CALL;
3435                 }
3436 #endif
3437             }
3438
3439             retNode = op1;
3440             break;
3441
3442 #ifdef _TARGET_XARCH_
3443         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3444         case CORINFO_INTRINSIC_InterlockedAdd32:
3445             interlockedOperator = GT_LOCKADD;
3446             goto InterlockedBinOpCommon;
3447         case CORINFO_INTRINSIC_InterlockedXAdd32:
3448             interlockedOperator = GT_XADD;
3449             goto InterlockedBinOpCommon;
3450         case CORINFO_INTRINSIC_InterlockedXchg32:
3451             interlockedOperator = GT_XCHG;
3452             goto InterlockedBinOpCommon;
3453
3454 #ifdef _TARGET_AMD64_
3455         case CORINFO_INTRINSIC_InterlockedAdd64:
3456             interlockedOperator = GT_LOCKADD;
3457             goto InterlockedBinOpCommon;
3458         case CORINFO_INTRINSIC_InterlockedXAdd64:
3459             interlockedOperator = GT_XADD;
3460             goto InterlockedBinOpCommon;
3461         case CORINFO_INTRINSIC_InterlockedXchg64:
3462             interlockedOperator = GT_XCHG;
3463             goto InterlockedBinOpCommon;
3464 #endif // _TARGET_AMD64_
3465
3466         InterlockedBinOpCommon:
3467             assert(callType != TYP_STRUCT);
3468             assert(sig->numArgs == 2);
3469
3470             op2 = impPopStack().val;
3471             op1 = impPopStack().val;
3472
3473             // This creates:
3474             //   val
3475             // XAdd
3476             //   addr
3477             //     field (for example)
3478             //
3479             // In the case where the first argument is the address of a local, we might
3480             // want to make this *not* make the var address-taken -- but atomic instructions
3481             // on a local are probably pretty useless anyway, so we probably don't care.
3482
3483             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3484             op1->gtFlags |= GTF_GLOB_EFFECT;
3485             retNode = op1;
3486             break;
3487 #endif // _TARGET_XARCH_
3488
3489         case CORINFO_INTRINSIC_MemoryBarrier:
3490
3491             assert(sig->numArgs == 0);
3492
3493             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3494             op1->gtFlags |= GTF_GLOB_EFFECT;
3495             retNode = op1;
3496             break;
3497
3498 #ifdef _TARGET_XARCH_
3499         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3500         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3501 #ifdef _TARGET_AMD64_
3502         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3503 #endif
3504         {
3505             assert(callType != TYP_STRUCT);
3506             assert(sig->numArgs == 3);
3507             GenTreePtr op3;
3508
3509             op3 = impPopStack().val; // comparand
3510             op2 = impPopStack().val; // value
3511             op1 = impPopStack().val; // location
3512
3513             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3514
3515             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3516             retNode = node;
3517             break;
3518         }
3519 #endif
3520
3521         case CORINFO_INTRINSIC_StringLength:
3522             op1 = impPopStack().val;
3523             if (!opts.MinOpts() && !opts.compDbgCode)
3524             {
3525                 GenTreeArrLen* arrLen =
3526                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3527                 op1 = arrLen;
3528             }
3529             else
3530             {
3531                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3532                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3533                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3534                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3535             }
3536             retNode = op1;
3537             break;
3538
3539         case CORINFO_INTRINSIC_StringGetChar:
3540             op2 = impPopStack().val;
3541             op1 = impPopStack().val;
3542             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3543             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3544             retNode = op1;
3545             break;
3546
3547         case CORINFO_INTRINSIC_InitializeArray:
3548             retNode = impInitializeArrayIntrinsic(sig);
3549             break;
3550
3551         case CORINFO_INTRINSIC_Array_Address:
3552         case CORINFO_INTRINSIC_Array_Get:
3553         case CORINFO_INTRINSIC_Array_Set:
3554             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3555             break;
3556
3557         case CORINFO_INTRINSIC_GetTypeFromHandle:
3558             op1 = impStackTop(0).val;
3559             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3560                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3561             {
3562                 op1 = impPopStack().val;
3563                 // Change call to return RuntimeType directly.
3564                 op1->gtType = TYP_REF;
3565                 retNode     = op1;
3566             }
3567             // Call the regular function.
3568             break;
3569
3570         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3571             op1 = impStackTop(0).val;
3572             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3574             {
3575                 // Old tree
3576                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3577                 //
3578                 // New tree
3579                 // TreeToGetNativeTypeHandle
3580
3581                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3582                 // to that helper.
3583
3584                 op1 = impPopStack().val;
3585
3586                 // Get native TypeHandle argument to old helper
3587                 op1 = op1->gtCall.gtCallArgs;
3588                 assert(op1->OperIsList());
3589                 assert(op1->gtOp.gtOp2 == nullptr);
3590                 op1     = op1->gtOp.gtOp1;
3591                 retNode = op1;
3592             }
3593             // Call the regular function.
3594             break;
3595
3596 #ifndef LEGACY_BACKEND
3597         case CORINFO_INTRINSIC_Object_GetType:
3598
3599             op1 = impPopStack().val;
3600             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3601
3602             // Set the CALL flag to indicate that the operator is implemented by a call.
3603             // Set also the EXCEPTION flag because the native implementation of
3604             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3605             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3606             retNode = op1;
3607             break;
3608 #endif
3609         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3610         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3611         // substitution.  The parameter byref will be assigned into the newly allocated object.
3612         case CORINFO_INTRINSIC_ByReference_Ctor:
3613         {
3614             // Remove call to constructor and directly assign the byref passed
3615             // to the call to the first slot of the ByReference struct.
3616             op1                                    = impPopStack().val;
3617             GenTreePtr           thisptr           = newobjThis;
3618             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3619             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3620             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3621             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3622             assert(byReferenceStruct != nullptr);
3623             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3624             retNode = assign;
3625             break;
3626         }
3627         // Implement ptr value getter for ByReference struct.
3628         case CORINFO_INTRINSIC_ByReference_Value:
3629         {
3630             op1                         = impPopStack().val;
3631             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3633             retNode                     = field;
3634             break;
3635         }
3636         default:
3637             /* Unknown intrinsic */
3638             break;
3639     }
3640
3641     if (mustExpand)
3642     {
3643         if (retNode == nullptr)
3644         {
3645             NO_WAY("JIT must expand the intrinsic!");
3646         }
3647     }
3648
3649     return retNode;
3650 }
3651
3652 /*****************************************************************************/
3653
3654 GenTreePtr Compiler::impArrayAccessIntrinsic(
3655     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3656 {
3657     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3658        the following, as it generates fatter code.
3659     */
3660
3661     if (compCodeOpt() == SMALL_CODE)
3662     {
3663         return nullptr;
3664     }
3665
3666     /* These intrinsics generate fatter (but faster) code and are only
3667        done if we don't need SMALL_CODE */
3668
3669     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3670
3671     // The rank 1 case is special because it has to handle two array formats
3672     // we will simply not do that case
3673     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3674     {
3675         return nullptr;
3676     }
3677
3678     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3679     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3680
3681     // For the ref case, we will only be able to inline if the types match
3682     // (verifier checks for this, we don't care for the nonverified case and the
3683     // type is final (so we don't need to do the cast)
3684     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3685     {
3686         // Get the call site signature
3687         CORINFO_SIG_INFO LocalSig;
3688         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3689         assert(LocalSig.hasThis());
3690
3691         CORINFO_CLASS_HANDLE actualElemClsHnd;
3692
3693         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3694         {
3695             // Fetch the last argument, the one that indicates the type we are setting.
3696             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3697             for (unsigned r = 0; r < rank; r++)
3698             {
3699                 argType = info.compCompHnd->getArgNext(argType);
3700             }
3701
3702             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3703             actualElemClsHnd = argInfo.GetClassHandle();
3704         }
3705         else
3706         {
3707             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3708
3709             // Fetch the return type
3710             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3711             assert(retInfo.IsByRef());
3712             actualElemClsHnd = retInfo.GetClassHandle();
3713         }
3714
3715         // if it's not final, we can't do the optimization
3716         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3717         {
3718             return nullptr;
3719         }
3720     }
3721
3722     unsigned arrayElemSize;
3723     if (elemType == TYP_STRUCT)
3724     {
3725         assert(arrElemClsHnd);
3726
3727         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3728     }
3729     else
3730     {
3731         arrayElemSize = genTypeSize(elemType);
3732     }
3733
3734     if ((unsigned char)arrayElemSize != arrayElemSize)
3735     {
3736         // arrayElemSize would be truncated as an unsigned char.
3737         // This means the array element is too large. Don't do the optimization.
3738         return nullptr;
3739     }
3740
3741     GenTreePtr val = nullptr;
3742
3743     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3744     {
3745         // Assignment of a struct is more work, and there are more gets than sets.
3746         if (elemType == TYP_STRUCT)
3747         {
3748             return nullptr;
3749         }
3750
3751         val = impPopStack().val;
3752         assert(genActualType(elemType) == genActualType(val->gtType) ||
3753                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3754                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3755                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3756     }
3757
3758     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3759
3760     GenTreePtr inds[GT_ARR_MAX_RANK];
3761     for (unsigned k = rank; k > 0; k--)
3762     {
3763         inds[k - 1] = impPopStack().val;
3764     }
3765
3766     GenTreePtr arr = impPopStack().val;
3767     assert(arr->gtType == TYP_REF);
3768
3769     GenTreePtr arrElem =
3770         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3771                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3772
3773     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3774     {
3775         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3776     }
3777
3778     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3779     {
3780         assert(val != nullptr);
3781         return gtNewAssignNode(arrElem, val);
3782     }
3783     else
3784     {
3785         return arrElem;
3786     }
3787 }
3788
3789 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3790 {
3791     unsigned i;
3792
3793     // do some basic checks first
3794     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3795     {
3796         return FALSE;
3797     }
3798
3799     if (verCurrentState.esStackDepth > 0)
3800     {
3801         // merge stack types
3802         StackEntry* parentStack = block->bbStackOnEntry();
3803         StackEntry* childStack  = verCurrentState.esStack;
3804
3805         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3806         {
3807             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3808             {
3809                 return FALSE;
3810             }
3811         }
3812     }
3813
3814     // merge initialization status of this ptr
3815
3816     if (verTrackObjCtorInitState)
3817     {
3818         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3819         assert(verCurrentState.thisInitialized != TIS_Bottom);
3820
3821         // If the successor block's thisInit state is unknown, copy it from the current state.
3822         if (block->bbThisOnEntry() == TIS_Bottom)
3823         {
3824             *changed = true;
3825             verSetThisInit(block, verCurrentState.thisInitialized);
3826         }
3827         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3828         {
3829             if (block->bbThisOnEntry() != TIS_Top)
3830             {
3831                 *changed = true;
3832                 verSetThisInit(block, TIS_Top);
3833
3834                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3835                 {
3836                     // The block is bad. Control can flow through the block to any handler that catches the
3837                     // verification exception, but the importer ignores bad blocks and therefore won't model
3838                     // this flow in the normal way. To complete the merge into the bad block, the new state
3839                     // needs to be manually pushed to the handlers that may be reached after the verification
3840                     // exception occurs.
3841                     //
3842                     // Usually, the new state was already propagated to the relevant handlers while processing
3843                     // the predecessors of the bad block. The exception is when the bad block is at the start
3844                     // of a try region, meaning it is protected by additional handlers that do not protect its
3845                     // predecessors.
3846                     //
3847                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3848                     {
3849                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3850                         // recursive calls back into this code path (if successors of the current bad block are
3851                         // also bad blocks).
3852                         //
3853                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3854                         verCurrentState.thisInitialized = TIS_Top;
3855                         impVerifyEHBlock(block, true);
3856                         verCurrentState.thisInitialized = origTIS;
3857                     }
3858                 }
3859             }
3860         }
3861     }
3862     else
3863     {
3864         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3865     }
3866
3867     return TRUE;
3868 }
3869
3870 /*****************************************************************************
3871  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3872  *   already logged it (presumably in a more detailed fashion than done here)
3873  * 'bVerificationException' is true for a verification exception, false for a
3874  *   "call unauthorized by host" exception.
3875  */
3876
3877 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3878 {
3879     block->bbJumpKind = BBJ_THROW;
3880     block->bbFlags |= BBF_FAILED_VERIFICATION;
3881
3882     impCurStmtOffsSet(block->bbCodeOffs);
3883
3884 #ifdef DEBUG
3885     // we need this since BeginTreeList asserts otherwise
3886     impTreeList = impTreeLast = nullptr;
3887     block->bbFlags &= ~BBF_IMPORTED;
3888
3889     if (logMsg)
3890     {
3891         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3892                 block->bbCodeOffs, block->bbCodeOffsEnd));
3893         if (verbose)
3894         {
3895             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3896         }
3897     }
3898
3899     if (JitConfig.DebugBreakOnVerificationFailure())
3900     {
3901         DebugBreak();
3902     }
3903 #endif
3904
3905     impBeginTreeList();
3906
3907     // if the stack is non-empty evaluate all the side-effects
3908     if (verCurrentState.esStackDepth > 0)
3909     {
3910         impEvalSideEffects();
3911     }
3912     assert(verCurrentState.esStackDepth == 0);
3913
3914     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3915                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3916     // verCurrentState.esStackDepth = 0;
3917     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3918
3919     // The inliner is not able to handle methods that require throw block, so
3920     // make sure this methods never gets inlined.
3921     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3922 }
3923
3924 /*****************************************************************************
3925  *
3926  */
3927 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3928
3929 {
3930     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3931     // slightly different mechanism in which it calls the JIT to perform IL verification:
3932     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3933     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3934     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3935     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3936     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3937     // to fail upon runtime of the jitted method.
3938     //
3939     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3940     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3941     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3942     // we detect these two conditions, instead of generating a throw statement inside the offending
3943     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3944     // to return false and make RyuJIT behave the same way JIT64 does.
3945     //
3946     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3947     // RyuJIT for the time being until we completely replace JIT64.
3948     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3949
3950     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3951     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3952     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3953     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3954     // be turned off during importation).
3955     CLANG_FORMAT_COMMENT_ANCHOR;
3956
3957 #ifdef _TARGET_64BIT_
3958
3959 #ifdef DEBUG
3960     bool canSkipVerificationResult =
3961         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3962     assert(tiVerificationNeeded || canSkipVerificationResult);
3963 #endif // DEBUG
3964
3965     // Add the non verifiable flag to the compiler
3966     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3967     {
3968         tiIsVerifiableCode = FALSE;
3969     }
3970 #endif //_TARGET_64BIT_
3971     verResetCurrentState(block, &verCurrentState);
3972     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3973
3974 #ifdef DEBUG
3975     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3976 #endif                   // DEBUG
3977 }
3978
3979 /******************************************************************************/
3980 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3981 {
3982     assert(ciType < CORINFO_TYPE_COUNT);
3983
3984     typeInfo tiResult;
3985     switch (ciType)
3986     {
3987         case CORINFO_TYPE_STRING:
3988         case CORINFO_TYPE_CLASS:
3989             tiResult = verMakeTypeInfo(clsHnd);
3990             if (!tiResult.IsType(TI_REF))
3991             { // type must be consistent with element type
3992                 return typeInfo();
3993             }
3994             break;
3995
3996 #ifdef _TARGET_64BIT_
3997         case CORINFO_TYPE_NATIVEINT:
3998         case CORINFO_TYPE_NATIVEUINT:
3999             if (clsHnd)
4000             {
4001                 // If we have more precise information, use it
4002                 return verMakeTypeInfo(clsHnd);
4003             }
4004             else
4005             {
4006                 return typeInfo::nativeInt();
4007             }
4008             break;
4009 #endif // _TARGET_64BIT_
4010
4011         case CORINFO_TYPE_VALUECLASS:
4012         case CORINFO_TYPE_REFANY:
4013             tiResult = verMakeTypeInfo(clsHnd);
4014             // type must be constant with element type;
4015             if (!tiResult.IsValueClass())
4016             {
4017                 return typeInfo();
4018             }
4019             break;
4020         case CORINFO_TYPE_VAR:
4021             return verMakeTypeInfo(clsHnd);
4022
4023         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4024         case CORINFO_TYPE_VOID:
4025             return typeInfo();
4026             break;
4027
4028         case CORINFO_TYPE_BYREF:
4029         {
4030             CORINFO_CLASS_HANDLE childClassHandle;
4031             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4032             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4033         }
4034         break;
4035
4036         default:
4037             if (clsHnd)
4038             { // If we have more precise information, use it
4039                 return typeInfo(TI_STRUCT, clsHnd);
4040             }
4041             else
4042             {
4043                 return typeInfo(JITtype2tiType(ciType));
4044             }
4045     }
4046     return tiResult;
4047 }
4048
4049 /******************************************************************************/
4050
4051 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4052 {
4053     if (clsHnd == nullptr)
4054     {
4055         return typeInfo();
4056     }
4057
4058     // Byrefs should only occur in method and local signatures, which are accessed
4059     // using ICorClassInfo and ICorClassInfo.getChildType.
4060     // So findClass() and getClassAttribs() should not be called for byrefs
4061
4062     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4063     {
4064         assert(!"Did findClass() return a Byref?");
4065         return typeInfo();
4066     }
4067
4068     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4069
4070     if (attribs & CORINFO_FLG_VALUECLASS)
4071     {
4072         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4073
4074         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4075         // not occur here, so we may want to change this to an assert instead.
4076         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4077         {
4078             return typeInfo();
4079         }
4080
4081 #ifdef _TARGET_64BIT_
4082         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4083         {
4084             return typeInfo::nativeInt();
4085         }
4086 #endif // _TARGET_64BIT_
4087
4088         if (t != CORINFO_TYPE_UNDEF)
4089         {
4090             return (typeInfo(JITtype2tiType(t)));
4091         }
4092         else if (bashStructToRef)
4093         {
4094             return (typeInfo(TI_REF, clsHnd));
4095         }
4096         else
4097         {
4098             return (typeInfo(TI_STRUCT, clsHnd));
4099         }
4100     }
4101     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4102     {
4103         // See comment in _typeInfo.h for why we do it this way.
4104         return (typeInfo(TI_REF, clsHnd, true));
4105     }
4106     else
4107     {
4108         return (typeInfo(TI_REF, clsHnd));
4109     }
4110 }
4111
4112 /******************************************************************************/
4113 BOOL Compiler::verIsSDArray(typeInfo ti)
4114 {
4115     if (ti.IsNullObjRef())
4116     { // nulls are SD arrays
4117         return TRUE;
4118     }
4119
4120     if (!ti.IsType(TI_REF))
4121     {
4122         return FALSE;
4123     }
4124
4125     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4126     {
4127         return FALSE;
4128     }
4129     return TRUE;
4130 }
4131
4132 /******************************************************************************/
4133 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4134 /* Returns an error type if anything goes wrong */
4135
4136 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4137 {
4138     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4139
4140     if (!verIsSDArray(arrayObjectType))
4141     {
4142         return typeInfo();
4143     }
4144
4145     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4146     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4147
4148     return verMakeTypeInfo(ciType, childClassHandle);
4149 }
4150
4151 /*****************************************************************************
4152  */
4153 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4154 {
4155     CORINFO_CLASS_HANDLE classHandle;
4156     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4157
4158     var_types type = JITtype2varType(ciType);
4159     if (varTypeIsGC(type))
4160     {
4161         // For efficiency, getArgType only returns something in classHandle for
4162         // value types.  For other types that have addition type info, you
4163         // have to call back explicitly
4164         classHandle = info.compCompHnd->getArgClass(sig, args);
4165         if (!classHandle)
4166         {
4167             NO_WAY("Could not figure out Class specified in argument or local signature");
4168         }
4169     }
4170
4171     return verMakeTypeInfo(ciType, classHandle);
4172 }
4173
4174 /*****************************************************************************/
4175
4176 // This does the expensive check to figure out whether the method
4177 // needs to be verified. It is called only when we fail verification,
4178 // just before throwing the verification exception.
4179
4180 BOOL Compiler::verNeedsVerification()
4181 {
4182     // If we have previously determined that verification is NOT needed
4183     // (for example in Compiler::compCompile), that means verification is really not needed.
4184     // Return the same decision we made before.
4185     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4186
4187     if (!tiVerificationNeeded)
4188     {
4189         return tiVerificationNeeded;
4190     }
4191
4192     assert(tiVerificationNeeded);
4193
4194     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4195     // obtain the answer.
4196     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4197         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4198
4199     // canSkipVerification will return one of the following three values:
4200     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4201     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4202     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4203     //     but need to insert a callout to the VM to ask during runtime
4204     //     whether to skip verification or not.
4205
4206     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4207     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4208     {
4209         tiRuntimeCalloutNeeded = true;
4210     }
4211
4212     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4213     {
4214         // Dev10 706080 - Testers don't like the assert, so just silence it
4215         // by not using the macros that invoke debugAssert.
4216         badCode();
4217     }
4218
4219     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4220     // The following line means we will NOT do jit time verification if canSkipVerification
4221     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4222     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4223     return tiVerificationNeeded;
4224 }
4225
4226 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4227 {
4228     if (ti.IsByRef())
4229     {
4230         return TRUE;
4231     }
4232     if (!ti.IsType(TI_STRUCT))
4233     {
4234         return FALSE;
4235     }
4236     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4237 }
4238
4239 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4240 {
4241     if (ti.IsPermanentHomeByRef())
4242     {
4243         return TRUE;
4244     }
4245     else
4246     {
4247         return FALSE;
4248     }
4249 }
4250
4251 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4252 {
4253     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4254             || ti.IsUnboxedGenericTypeVar() ||
4255             (ti.IsType(TI_STRUCT) &&
4256              // exclude byreflike structs
4257              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4258 }
4259
4260 // Is it a boxed value type?
4261 bool Compiler::verIsBoxedValueType(typeInfo ti)
4262 {
4263     if (ti.GetType() == TI_REF)
4264     {
4265         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4266         return !!eeIsValueClass(clsHnd);
4267     }
4268     else
4269     {
4270         return false;
4271     }
4272 }
4273
4274 /*****************************************************************************
4275  *
4276  *  Check if a TailCall is legal.
4277  */
4278
4279 bool Compiler::verCheckTailCallConstraint(
4280     OPCODE                  opcode,
4281     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4282     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4283     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4284                                                        // return false to the caller.
4285                                                        // If false, it will throw.
4286     )
4287 {
4288     DWORD            mflags;
4289     CORINFO_SIG_INFO sig;
4290     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4291                                    // this counter is used to keep track of how many items have been
4292                                    // virtually popped
4293
4294     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4295     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4296     unsigned              methodClassFlgs = 0;
4297
4298     assert(impOpcodeIsCallOpcode(opcode));
4299
4300     if (compIsForInlining())
4301     {
4302         return false;
4303     }
4304
4305     // for calli, VerifyOrReturn that this is not a virtual method
4306     if (opcode == CEE_CALLI)
4307     {
4308         /* Get the call sig */
4309         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4310
4311         // We don't know the target method, so we have to infer the flags, or
4312         // assume the worst-case.
4313         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4314     }
4315     else
4316     {
4317         methodHnd = pResolvedToken->hMethod;
4318
4319         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4320
4321         // When verifying generic code we pair the method handle with its
4322         // owning class to get the exact method signature.
4323         methodClassHnd = pResolvedToken->hClass;
4324         assert(methodClassHnd);
4325
4326         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4327
4328         // opcode specific check
4329         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4330     }
4331
4332     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4333     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4334
4335     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4336     {
4337         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4338     }
4339
4340     // check compatibility of the arguments
4341     unsigned int argCount;
4342     argCount = sig.numArgs;
4343     CORINFO_ARG_LIST_HANDLE args;
4344     args = sig.args;
4345     while (argCount--)
4346     {
4347         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4348
4349         // check that the argument is not a byref for tailcalls
4350         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4351
4352         // For unsafe code, we might have parameters containing pointer to the stack location.
4353         // Disallow the tailcall for this kind.
4354         CORINFO_CLASS_HANDLE classHandle;
4355         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4356         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4357
4358         args = info.compCompHnd->getArgNext(args);
4359     }
4360
4361     // update popCount
4362     popCount += sig.numArgs;
4363
4364     // check for 'this' which is on non-static methods, not called via NEWOBJ
4365     if (!(mflags & CORINFO_FLG_STATIC))
4366     {
4367         // Always update the popCount.
4368         // This is crucial for the stack calculation to be correct.
4369         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4370         popCount++;
4371
4372         if (opcode == CEE_CALLI)
4373         {
4374             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4375             // on the stack.
4376             if (tiThis.IsValueClass())
4377             {
4378                 tiThis.MakeByRef();
4379             }
4380             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4381         }
4382         else
4383         {
4384             // Check type compatibility of the this argument
4385             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4386             if (tiDeclaredThis.IsValueClass())
4387             {
4388                 tiDeclaredThis.MakeByRef();
4389             }
4390
4391             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4392         }
4393     }
4394
4395     // Tail calls on constrained calls should be illegal too:
4396     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4397     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4398
4399     // Get the exact view of the signature for an array method
4400     if (sig.retType != CORINFO_TYPE_VOID)
4401     {
4402         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4403         {
4404             assert(opcode != CEE_CALLI);
4405             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4406         }
4407     }
4408
4409     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4410     typeInfo tiCallerRetType =
4411         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4412
4413     // void return type gets morphed into the error type, so we have to treat them specially here
4414     if (sig.retType == CORINFO_TYPE_VOID)
4415     {
4416         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4417                                   speculative);
4418     }
4419     else
4420     {
4421         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4422                                                    NormaliseForStack(tiCallerRetType), true),
4423                                   "tailcall return mismatch", speculative);
4424     }
4425
4426     // for tailcall, stack must be empty
4427     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4428
4429     return true; // Yes, tailcall is legal
4430 }
4431
4432 /*****************************************************************************
4433  *
4434  *  Checks the IL verification rules for the call
4435  */
4436
4437 void Compiler::verVerifyCall(OPCODE                  opcode,
4438                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4439                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4440                              bool                    tailCall,
4441                              bool                    readonlyCall,
4442                              const BYTE*             delegateCreateStart,
4443                              const BYTE*             codeAddr,
4444                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4445 {
4446     DWORD             mflags;
4447     CORINFO_SIG_INFO* sig      = nullptr;
4448     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4449                                     // this counter is used to keep track of how many items have been
4450                                     // virtually popped
4451
4452     // for calli, VerifyOrReturn that this is not a virtual method
4453     if (opcode == CEE_CALLI)
4454     {
4455         Verify(false, "Calli not verifiable");
4456         return;
4457     }
4458
4459     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4460     mflags = callInfo->verMethodFlags;
4461
4462     sig = &callInfo->verSig;
4463
4464     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4465     {
4466         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4467     }
4468
4469     // opcode specific check
4470     unsigned methodClassFlgs = callInfo->classFlags;
4471     switch (opcode)
4472     {
4473         case CEE_CALLVIRT:
4474             // cannot do callvirt on valuetypes
4475             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4476             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4477             break;
4478
4479         case CEE_NEWOBJ:
4480         {
4481             assert(!tailCall); // Importer should not allow this
4482             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4483                            "newobj must be on instance");
4484
4485             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4486             {
4487                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4488                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4489                 typeInfo tiDeclaredFtn =
4490                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4491                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4492
4493                 assert(popCount == 0);
4494                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4495                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4496
4497                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4498                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4499                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4500                                "delegate object type mismatch");
4501
4502                 CORINFO_CLASS_HANDLE objTypeHandle =
4503                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4504
4505                 // the method signature must be compatible with the delegate's invoke method
4506
4507                 // check that for virtual functions, the type of the object used to get the
4508                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4509                 // since this is a bit of work to determine in general, we pattern match stylized
4510                 // code sequences
4511
4512                 // the delegate creation code check, which used to be done later, is now done here
4513                 // so we can read delegateMethodRef directly from
4514                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4515                 // we then use it in our call to isCompatibleDelegate().
4516
4517                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4518                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4519                                "must create delegates with certain IL");
4520
4521                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4522                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4523                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4524                 delegateResolvedToken.token        = delegateMethodRef;
4525                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4526                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4527
4528                 CORINFO_CALL_INFO delegateCallInfo;
4529                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4530                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4531
4532                 BOOL isOpenDelegate = FALSE;
4533                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4534                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4535                                                                       &isOpenDelegate),
4536                                "function incompatible with delegate");
4537
4538                 // check the constraints on the target method
4539                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4540                                "delegate target has unsatisfied class constraints");
4541                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4542                                                                             tiActualFtn.GetMethod()),
4543                                "delegate target has unsatisfied method constraints");
4544
4545                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4546                 // for additional verification rules for delegates
4547                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4548                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4549                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4550                 {
4551
4552                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4553 #ifdef DEBUG
4554                         && StrictCheckForNonVirtualCallToVirtualMethod()
4555 #endif
4556                             )
4557                     {
4558                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4559                         {
4560                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4561                                                verIsBoxedValueType(tiActualObj),
4562                                            "The 'this' parameter to the call must be either the calling method's "
4563                                            "'this' parameter or "
4564                                            "a boxed value type.");
4565                         }
4566                     }
4567                 }
4568
4569                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4570                 {
4571                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4572
4573                     Verify(targetIsStatic || !isOpenDelegate,
4574                            "Unverifiable creation of an open instance delegate for a protected member.");
4575
4576                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4577                                                                 ? info.compClassHnd
4578                                                                 : tiActualObj.GetClassHandleForObjRef();
4579
4580                     // In the case of protected methods, it is a requirement that the 'this'
4581                     // pointer be a subclass of the current context.  Perform this check.
4582                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4583                            "Accessing protected method through wrong type.");
4584                 }
4585                 goto DONE_ARGS;
4586             }
4587         }
4588         // fall thru to default checks
4589         default:
4590             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4591     }
4592     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4593                    "can only newobj a delegate constructor");
4594
4595     // check compatibility of the arguments
4596     unsigned int argCount;
4597     argCount = sig->numArgs;
4598     CORINFO_ARG_LIST_HANDLE args;
4599     args = sig->args;
4600     while (argCount--)
4601     {
4602         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4603
4604         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4605         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4606
4607         args = info.compCompHnd->getArgNext(args);
4608     }
4609
4610 DONE_ARGS:
4611
4612     // update popCount
4613     popCount += sig->numArgs;
4614
4615     // check for 'this' which are is non-static methods, not called via NEWOBJ
4616     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4617     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4618     {
4619         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4620         popCount++;
4621
4622         // If it is null, we assume we can access it (since it will AV shortly)
4623         // If it is anything but a reference class, there is no hierarchy, so
4624         // again, we don't need the precise instance class to compute 'protected' access
4625         if (tiThis.IsType(TI_REF))
4626         {
4627             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4628         }
4629
4630         // Check type compatibility of the this argument
4631         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4632         if (tiDeclaredThis.IsValueClass())
4633         {
4634             tiDeclaredThis.MakeByRef();
4635         }
4636
4637         // If this is a call to the base class .ctor, set thisPtr Init for
4638         // this block.
4639         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4640         {
4641             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4642                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4643             {
4644                 assert(verCurrentState.thisInitialized !=
4645                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4646                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4647                                "Call to base class constructor when 'this' is possibly initialized");
4648                 // Otherwise, 'this' is now initialized.
4649                 verCurrentState.thisInitialized = TIS_Init;
4650                 tiThis.SetInitialisedObjRef();
4651             }
4652             else
4653             {
4654                 // We allow direct calls to value type constructors
4655                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4656                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4657                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4658                                "Bad call to a constructor");
4659             }
4660         }
4661
4662         if (pConstrainedResolvedToken != nullptr)
4663         {
4664             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4665
4666             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4667
4668             // We just dereference this and test for equality
4669             tiThis.DereferenceByRef();
4670             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4671                            "this type mismatch with constrained type operand");
4672
4673             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4674             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4675         }
4676
4677         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4678         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4679         {
4680             tiDeclaredThis.SetIsReadonlyByRef();
4681         }
4682
4683         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4684
4685         if (tiThis.IsByRef())
4686         {
4687             // Find the actual type where the method exists (as opposed to what is declared
4688             // in the metadata). This is to prevent passing a byref as the "this" argument
4689             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4690
4691             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4692             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4693                            "Call to base type of valuetype (which is never a valuetype)");
4694         }
4695
4696         // Rules for non-virtual call to a non-final virtual method:
4697
4698         // Define:
4699         // The "this" pointer is considered to be "possibly written" if
4700         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4701         //   (or)
4702         //   2. It has been stored to (STARG.0) anywhere in the method.
4703
4704         // A non-virtual call to a non-final virtual method is only allowed if
4705         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4706         //   (or)
4707         //   2. The this pointer passed to the callee is the current method's this pointer.
4708         //      (and) The current method's this pointer is not "possibly written".
4709
4710         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4711         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4712         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4713         // hard and more error prone.
4714
4715         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4716 #ifdef DEBUG
4717             && StrictCheckForNonVirtualCallToVirtualMethod()
4718 #endif
4719                 )
4720         {
4721             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4722             {
4723                 VerifyOrReturn(
4724                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4725                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4726                     "a boxed value type.");
4727             }
4728         }
4729     }
4730
4731     // check any constraints on the callee's class and type parameters
4732     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4733                    "method has unsatisfied class constraints");
4734     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4735                    "method has unsatisfied method constraints");
4736
4737     if (mflags & CORINFO_FLG_PROTECTED)
4738     {
4739         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4740                        "Can't access protected method");
4741     }
4742
4743     // Get the exact view of the signature for an array method
4744     if (sig->retType != CORINFO_TYPE_VOID)
4745     {
4746         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4747     }
4748
4749     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4750     // The methods supported by array types are under the control of the EE
4751     // so we can trust that only the Address operation returns a byref.
4752     if (readonlyCall)
4753     {
4754         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4755         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4756                        "unexpected use of readonly prefix");
4757     }
4758
4759     // Verify the tailcall
4760     if (tailCall)
4761     {
4762         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4763     }
4764 }
4765
4766 /*****************************************************************************
4767  *  Checks that a delegate creation is done using the following pattern:
4768  *     dup
4769  *     ldvirtftn targetMemberRef
4770  *  OR
4771  *     ldftn targetMemberRef
4772  *
4773  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4774  *  not in this basic block)
4775  *
4776  *  targetMemberRef is read from the code sequence.
4777  *  targetMemberRef is validated iff verificationNeeded.
4778  */
4779
4780 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4781                                         const BYTE*  codeAddr,
4782                                         mdMemberRef& targetMemberRef)
4783 {
4784     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4785     {
4786         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4787         return TRUE;
4788     }
4789     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4790     {
4791         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4792         return TRUE;
4793     }
4794
4795     return FALSE;
4796 }
4797
4798 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4799 {
4800     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4801     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4802     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4803     if (!tiCompatibleWith(value, normPtrVal, true))
4804     {
4805         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4806         compUnsafeCastUsed = true;
4807     }
4808     return ptrVal;
4809 }
4810
4811 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4812 {
4813     assert(!instrType.IsStruct());
4814
4815     typeInfo ptrVal;
4816     if (ptr.IsByRef())
4817     {
4818         ptrVal = DereferenceByRef(ptr);
4819         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4820         {
4821             Verify(false, "bad pointer");
4822             compUnsafeCastUsed = true;
4823         }
4824         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4825         {
4826             Verify(false, "pointer not consistent with instr");
4827             compUnsafeCastUsed = true;
4828         }
4829     }
4830     else
4831     {
4832         Verify(false, "pointer not byref");
4833         compUnsafeCastUsed = true;
4834     }
4835
4836     return ptrVal;
4837 }
4838
4839 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4840 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4841 // ld*flda or a st*fld.
4842 // 'enclosingClass' is given if we are accessing a field in some specific type.
4843
4844 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4845                               const CORINFO_FIELD_INFO& fieldInfo,
4846                               const typeInfo*           tiThis,
4847                               BOOL                      mutator,
4848                               BOOL                      allowPlainStructAsThis)
4849 {
4850     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4851     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4852     CORINFO_CLASS_HANDLE instanceClass =
4853         info.compClassHnd; // for statics, we imagine the instance is the current class.
4854
4855     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4856     if (mutator)
4857     {
4858         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4859         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4860         {
4861             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4862                        info.compIsStatic == isStaticField,
4863                    "bad use of initonly field (set or address taken)");
4864         }
4865     }
4866
4867     if (tiThis == nullptr)
4868     {
4869         Verify(isStaticField, "used static opcode with non-static field");
4870     }
4871     else
4872     {
4873         typeInfo tThis = *tiThis;
4874
4875         if (allowPlainStructAsThis && tThis.IsValueClass())
4876         {
4877             tThis.MakeByRef();
4878         }
4879
4880         // If it is null, we assume we can access it (since it will AV shortly)
4881         // If it is anything but a refernce class, there is no hierarchy, so
4882         // again, we don't need the precise instance class to compute 'protected' access
4883         if (tiThis->IsType(TI_REF))
4884         {
4885             instanceClass = tiThis->GetClassHandleForObjRef();
4886         }
4887
4888         // Note that even if the field is static, we require that the this pointer
4889         // satisfy the same constraints as a non-static field  This happens to
4890         // be simpler and seems reasonable
4891         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4892         if (tiDeclaredThis.IsValueClass())
4893         {
4894             tiDeclaredThis.MakeByRef();
4895
4896             // we allow read-only tThis, on any field access (even stores!), because if the
4897             // class implementor wants to prohibit stores he should make the field private.
4898             // we do this by setting the read-only bit on the type we compare tThis to.
4899             tiDeclaredThis.SetIsReadonlyByRef();
4900         }
4901         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4902         {
4903             // Any field access is legal on "uninitialized" this pointers.
4904             // The easiest way to implement this is to simply set the
4905             // initialized bit for the duration of the type check on the
4906             // field access only.  It does not change the state of the "this"
4907             // for the function as a whole. Note that the "tThis" is a copy
4908             // of the original "this" type (*tiThis) passed in.
4909             tThis.SetInitialisedObjRef();
4910         }
4911
4912         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4913     }
4914
4915     // Presently the JIT does not check that we don't store or take the address of init-only fields
4916     // since we cannot guarantee their immutability and it is not a security issue.
4917
4918     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4919     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4920                    "field has unsatisfied class constraints");
4921     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4922     {
4923         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4924                "Accessing protected method through wrong type.");
4925     }
4926 }
4927
4928 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4929 {
4930     if (tiOp1.IsNumberType())
4931     {
4932 #ifdef _TARGET_64BIT_
4933         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4934 #else  // _TARGET_64BIT
4935         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4936         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4937         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4938         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4939 #endif // !_TARGET_64BIT_
4940     }
4941     else if (tiOp1.IsObjRef())
4942     {
4943         switch (opcode)
4944         {
4945             case CEE_BEQ_S:
4946             case CEE_BEQ:
4947             case CEE_BNE_UN_S:
4948             case CEE_BNE_UN:
4949             case CEE_CEQ:
4950             case CEE_CGT_UN:
4951                 break;
4952             default:
4953                 Verify(FALSE, "Cond not allowed on object types");
4954         }
4955         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4956     }
4957     else if (tiOp1.IsByRef())
4958     {
4959         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4960     }
4961     else
4962     {
4963         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4964     }
4965 }
4966
4967 void Compiler::verVerifyThisPtrInitialised()
4968 {
4969     if (verTrackObjCtorInitState)
4970     {
4971         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4972     }
4973 }
4974
4975 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4976 {
4977     // Either target == context, in this case calling an alternate .ctor
4978     // Or target is the immediate parent of context
4979
4980     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4981 }
4982
4983 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4984                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4985                                         CORINFO_CALL_INFO*      pCallInfo)
4986 {
4987     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4988     {
4989         NO_WAY("Virtual call to a function added via EnC is not supported");
4990     }
4991
4992 #ifdef FEATURE_READYTORUN_COMPILER
4993     if (opts.IsReadyToRun())
4994     {
4995         if (!pCallInfo->exactContextNeedsRuntimeLookup)
4996         {
4997             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4998                                                     gtNewArgList(thisPtr));
4999
5000             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5001
5002             return call;
5003         }
5004
5005         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5006         if (IsTargetAbi(CORINFO_CORERT_ABI))
5007         {
5008             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5009
5010             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5011                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5012         }
5013     }
5014 #endif
5015
5016     // Get the exact descriptor for the static callsite
5017     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5018     if (exactTypeDesc == nullptr)
5019     { // compDonotInline()
5020         return nullptr;
5021     }
5022
5023     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5024     if (exactMethodDesc == nullptr)
5025     { // compDonotInline()
5026         return nullptr;
5027     }
5028
5029     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5030
5031     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5032
5033     helpArgs = gtNewListNode(thisPtr, helpArgs);
5034
5035     // Call helper function.  This gets the target address of the final destination callsite.
5036
5037     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5038 }
5039
5040 /*****************************************************************************
5041  *
5042  *  Build and import a box node
5043  */
5044
5045 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5046 {
5047     // Get the tree for the type handle for the boxed object.  In the case
5048     // of shared generic code or ngen'd code this might be an embedded
5049     // computation.
5050     // Note we can only box do it if the class construtor has been called
5051     // We can always do it on primitive types
5052
5053     GenTreePtr op1 = nullptr;
5054     GenTreePtr op2 = nullptr;
5055     var_types  lclTyp;
5056
5057     impSpillSpecialSideEff();
5058
5059     // Now get the expression to box from the stack.
5060     CORINFO_CLASS_HANDLE operCls;
5061     GenTreePtr           exprToBox = impPopStack(operCls).val;
5062
5063     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5064     if (boxHelper == CORINFO_HELP_BOX)
5065     {
5066         // we are doing 'normal' boxing.  This means that we can inline the box operation
5067         // Box(expr) gets morphed into
5068         // temp = new(clsHnd)
5069         // cpobj(temp+4, expr, clsHnd)
5070         // push temp
5071         // The code paths differ slightly below for structs and primitives because
5072         // "cpobj" differs in these cases.  In one case you get
5073         //    impAssignStructPtr(temp+4, expr, clsHnd)
5074         // and the other you get
5075         //    *(temp+4) = expr
5076
5077         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5078         {
5079             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5080         }
5081
5082         // needs to stay in use until this box expression is appended
5083         // some other node.  We approximate this by keeping it alive until
5084         // the opcode stack becomes empty
5085         impBoxTempInUse = true;
5086
5087 #ifdef FEATURE_READYTORUN_COMPILER
5088         bool usingReadyToRunHelper = false;
5089
5090         if (opts.IsReadyToRun())
5091         {
5092             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5093             usingReadyToRunHelper = (op1 != nullptr);
5094         }
5095
5096         if (!usingReadyToRunHelper)
5097 #endif
5098         {
5099             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5100             // and the newfast call with a single call to a dynamic R2R cell that will:
5101             //      1) Load the context
5102             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5103             //      3) Allocate and return the new object for boxing
5104             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5105
5106             // Ensure that the value class is restored
5107             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5108             if (op2 == nullptr)
5109             { // compDonotInline()
5110                 return;
5111             }
5112
5113             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5114                                       gtNewArgList(op2));
5115         }
5116
5117         /* Remember that this basic block contains 'new' of an array */
5118         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5119
5120         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5121
5122         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5123
5124         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5125         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5126         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5127
5128         if (varTypeIsStruct(exprToBox))
5129         {
5130             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5131             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5132         }
5133         else
5134         {
5135             lclTyp = exprToBox->TypeGet();
5136             if (lclTyp == TYP_BYREF)
5137             {
5138                 lclTyp = TYP_I_IMPL;
5139             }
5140             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5141             if (impIsPrimitive(jitType))
5142             {
5143                 lclTyp = JITtype2varType(jitType);
5144             }
5145             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5146                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5147             var_types srcTyp = exprToBox->TypeGet();
5148             var_types dstTyp = lclTyp;
5149
5150             if (srcTyp != dstTyp)
5151             {
5152                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5153                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5154                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5155             }
5156             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5157         }
5158
5159         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5160         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5161
5162         // Record that this is a "box" node.
5163         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5164
5165         // If it is a value class, mark the "box" node.  We can use this information
5166         // to optimise several cases:
5167         //    "box(x) == null" --> false
5168         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5169         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5170
5171         op1->gtFlags |= GTF_BOX_VALUE;
5172         assert(op1->IsBoxedValue());
5173         assert(asg->gtOper == GT_ASG);
5174     }
5175     else
5176     {
5177         // Don't optimize, just call the helper and be done with it
5178
5179         // Ensure that the value class is restored
5180         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5181         if (op2 == nullptr)
5182         { // compDonotInline()
5183             return;
5184         }
5185
5186         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5187         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5188     }
5189
5190     /* Push the result back on the stack, */
5191     /* even if clsHnd is a value class we want the TI_REF */
5192     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5193     impPushOnStack(op1, tiRetVal);
5194 }
5195
5196 //------------------------------------------------------------------------
5197 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5198 //
5199 // Arguments:
5200 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5201 //                     by a call to CEEInfo::resolveToken().
5202 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5203 //                by a call to CEEInfo::getCallInfo().
5204 //
5205 // Assumptions:
5206 //    The multi-dimensional array constructor arguments (array dimensions) are
5207 //    pushed on the IL stack on entry to this method.
5208 //
5209 // Notes:
5210 //    Multi-dimensional array constructors are imported as calls to a JIT
5211 //    helper, not as regular calls.
5212
5213 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5214 {
5215     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5216     if (classHandle == nullptr)
5217     { // compDonotInline()
5218         return;
5219     }
5220
5221     assert(pCallInfo->sig.numArgs);
5222
5223     GenTreePtr      node;
5224     GenTreeArgList* args;
5225
5226     //
5227     // There are two different JIT helpers that can be used to allocate
5228     // multi-dimensional arrays:
5229     //
5230     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5231     //      This variant is deprecated. It should be eventually removed.
5232     //
5233     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5234     //      pointer to block of int32s. This variant is more portable.
5235     //
5236     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5237     // unconditionally would require ReadyToRun version bump.
5238     //
5239     CLANG_FORMAT_COMMENT_ANCHOR;
5240
5241 #if COR_JIT_EE_VERSION > 460
5242     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5243     {
5244         LclVarDsc* newObjArrayArgsVar;
5245
5246         // Reuse the temp used to pass the array dimensions to avoid bloating
5247         // the stack frame in case there are multiple calls to multi-dim array
5248         // constructors within a single method.
5249         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5250         {
5251             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5252             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5253             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5254         }
5255
5256         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5257         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5258         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5259             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5260
5261         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5262         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5263         // to one allocation at a time.
5264         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5265
5266         //
5267         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5268         //  - Array class handle
5269         //  - Number of dimension arguments
5270         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5271         //
5272
5273         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5274         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5275
5276         // Pop dimension arguments from the stack one at a time and store it
5277         // into lvaNewObjArrayArgs temp.
5278         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5279         {
5280             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5281
5282             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5283             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5284             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5285                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5286             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5287
5288             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5289         }
5290
5291         args = gtNewArgList(node);
5292
5293         // pass number of arguments to the helper
5294         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5295
5296         args = gtNewListNode(classHandle, args);
5297
5298         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5299     }
5300     else
5301 #endif
5302     {
5303         //
5304         // The varargs helper needs the type and method handles as last
5305         // and  last-1 param (this is a cdecl call, so args will be
5306         // pushed in reverse order on the CPU stack)
5307         //
5308
5309         args = gtNewArgList(classHandle);
5310
5311         // pass number of arguments to the helper
5312         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5313
5314         unsigned argFlags = 0;
5315         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5316
5317         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5318
5319         // varargs, so we pop the arguments
5320         node->gtFlags |= GTF_CALL_POP_ARGS;
5321
5322 #ifdef DEBUG
5323         // At the present time we don't track Caller pop arguments
5324         // that have GC references in them
5325         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5326         {
5327             assert(temp->Current()->gtType != TYP_REF);
5328         }
5329 #endif
5330     }
5331
5332     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5333     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5334
5335     // Remember that this basic block contains 'new' of a md array
5336     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5337
5338     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5339 }
5340
5341 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5342                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5343                                       CORINFO_THIS_TRANSFORM  transform)
5344 {
5345     switch (transform)
5346     {
5347         case CORINFO_DEREF_THIS:
5348         {
5349             GenTreePtr obj = thisPtr;
5350
5351             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5352             impBashVarAddrsToI(obj);
5353             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5354             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5355
5356             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5357             // ldind could point anywhere, example a boxed class static int
5358             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5359
5360             return obj;
5361         }
5362
5363         case CORINFO_BOX_THIS:
5364         {
5365             // Constraint calls where there might be no
5366             // unboxed entry point require us to implement the call via helper.
5367             // These only occur when a possible target of the call
5368             // may have inherited an implementation of an interface
5369             // method from System.Object or System.ValueType.  The EE does not provide us with
5370             // "unboxed" versions of these methods.
5371
5372             GenTreePtr obj = thisPtr;
5373
5374             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5375             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5376             obj->gtFlags |= GTF_EXCEPT;
5377
5378             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5379             var_types   objType = JITtype2varType(jitTyp);
5380             if (impIsPrimitive(jitTyp))
5381             {
5382                 if (obj->OperIsBlk())
5383                 {
5384                     obj->ChangeOperUnchecked(GT_IND);
5385
5386                     // Obj could point anywhere, example a boxed class static int
5387                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5388                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5389                 }
5390
5391                 obj->gtType = JITtype2varType(jitTyp);
5392                 assert(varTypeIsArithmetic(obj->gtType));
5393             }
5394
5395             // This pushes on the dereferenced byref
5396             // This is then used immediately to box.
5397             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5398
5399             // This pops off the byref-to-a-value-type remaining on the stack and
5400             // replaces it with a boxed object.
5401             // This is then used as the object to the virtual call immediately below.
5402             impImportAndPushBox(pConstrainedResolvedToken);
5403             if (compDonotInline())
5404             {
5405                 return nullptr;
5406             }
5407
5408             obj = impPopStack().val;
5409             return obj;
5410         }
5411         case CORINFO_NO_THIS_TRANSFORM:
5412         default:
5413             return thisPtr;
5414     }
5415 }
5416
5417 //------------------------------------------------------------------------
5418 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5419 //
5420 // Return Value:
5421 //    true if PInvoke inlining should be enabled in current method, false otherwise
5422 //
5423 // Notes:
5424 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5425
5426 bool Compiler::impCanPInvokeInline()
5427 {
5428     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5429            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5430         ;
5431 }
5432
5433 //------------------------------------------------------------------------
5434 // impCanPInvokeInlineCallSite: basic legality checks using information
5435 // from a call to see if the call qualifies as an inline pinvoke.
5436 //
5437 // Arguments:
5438 //    block      - block contaning the call, or for inlinees, block
5439 //                 containing the call being inlined
5440 //
5441 // Return Value:
5442 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5443 //
5444 // Notes:
5445 //    For runtimes that support exception handling interop there are
5446 //    restrictions on using inline pinvoke in handler regions.
5447 //
5448 //    * We have to disable pinvoke inlining inside of filters because
5449 //    in case the main execution (i.e. in the try block) is inside
5450 //    unmanaged code, we cannot reuse the inlined stub (we still need
5451 //    the original state until we are in the catch handler)
5452 //
5453 //    * We disable pinvoke inlining inside handlers since the GSCookie
5454 //    is in the inlined Frame (see
5455 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5456 //    this would not protect framelets/return-address of handlers.
5457 //
5458 //    These restrictions are currently also in place for CoreCLR but
5459 //    can be relaxed when coreclr/#8459 is addressed.
5460
5461 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5462 {
5463     if (block->hasHndIndex())
5464     {
5465         return false;
5466     }
5467
5468     // The remaining limitations do not apply to CoreRT
5469     if (IsTargetAbi(CORINFO_CORERT_ABI))
5470     {
5471         return true;
5472     }
5473
5474 #ifdef _TARGET_AMD64_
5475     // On x64, we disable pinvoke inlining inside of try regions.
5476     // Here is the comment from JIT64 explaining why:
5477     //
5478     //   [VSWhidbey: 611015] - because the jitted code links in the
5479     //   Frame (instead of the stub) we rely on the Frame not being
5480     //   'active' until inside the stub.  This normally happens by the
5481     //   stub setting the return address pointer in the Frame object
5482     //   inside the stub.  On a normal return, the return address
5483     //   pointer is zeroed out so the Frame can be safely re-used, but
5484     //   if an exception occurs, nobody zeros out the return address
5485     //   pointer.  Thus if we re-used the Frame object, it would go
5486     //   'active' as soon as we link it into the Frame chain.
5487     //
5488     //   Technically we only need to disable PInvoke inlining if we're
5489     //   in a handler or if we're in a try body with a catch or
5490     //   filter/except where other non-handler code in this method
5491     //   might run and try to re-use the dirty Frame object.
5492     //
5493     //   A desktop test case where this seems to matter is
5494     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5495     if (block->hasTryIndex())
5496     {
5497         return false;
5498     }
5499 #endif // _TARGET_AMD64_
5500
5501     return true;
5502 }
5503
5504 //------------------------------------------------------------------------
5505 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5506 // if it can be expressed as an inline pinvoke.
5507 //
5508 // Arguments:
5509 //    call       - tree for the call
5510 //    methHnd    - handle for the method being called (may be null)
5511 //    sig        - signature of the method being called
5512 //    mflags     - method flags for the method being called
5513 //    block      - block contaning the call, or for inlinees, block
5514 //                 containing the call being inlined
5515 //
5516 // Notes:
5517 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5518 //
5519 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5520 //   call passes a combination of legality and profitabilty checks.
5521 //
5522 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5523
5524 void Compiler::impCheckForPInvokeCall(
5525     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5526 {
5527     CorInfoUnmanagedCallConv unmanagedCallConv;
5528
5529     // If VM flagged it as Pinvoke, flag the call node accordingly
5530     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5531     {
5532         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5533     }
5534
5535     if (methHnd)
5536     {
5537         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5538         {
5539             return;
5540         }
5541
5542         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5543     }
5544     else
5545     {
5546         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5547         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5548         {
5549             // Used by the IL Stubs.
5550             callConv = CORINFO_CALLCONV_C;
5551         }
5552         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5553         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5554         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5555         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5556
5557         assert(!call->gtCall.gtCallCookie);
5558     }
5559
5560     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5561         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5562     {
5563         return;
5564     }
5565     optNativeCallCount++;
5566
5567     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5568     {
5569         // PInvoke CALLI in IL stubs must be inlined
5570     }
5571     else
5572     {
5573         // Check legality
5574         if (!impCanPInvokeInlineCallSite(block))
5575         {
5576             return;
5577         }
5578
5579         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5580         // profitability checks
5581         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5582         {
5583             if (!impCanPInvokeInline())
5584             {
5585                 return;
5586             }
5587
5588             // Size-speed tradeoff: don't use inline pinvoke at rarely
5589             // executed call sites.  The non-inline version is more
5590             // compact.
5591             if (block->isRunRarely())
5592             {
5593                 return;
5594             }
5595         }
5596
5597         // The expensive check should be last
5598         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5599         {
5600             return;
5601         }
5602     }
5603
5604     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5605
5606     call->gtFlags |= GTF_CALL_UNMANAGED;
5607     info.compCallUnmanaged++;
5608
5609     // AMD64 convention is same for native and managed
5610     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5611     {
5612         call->gtFlags |= GTF_CALL_POP_ARGS;
5613     }
5614
5615     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5616     {
5617         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5618     }
5619 }
5620
5621 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5622 {
5623     var_types callRetTyp = JITtype2varType(sig->retType);
5624
5625     /* The function pointer is on top of the stack - It may be a
5626      * complex expression. As it is evaluated after the args,
5627      * it may cause registered args to be spilled. Simply spill it.
5628      */
5629
5630     // Ignore this trivial case.
5631     if (impStackTop().val->gtOper != GT_LCL_VAR)
5632     {
5633         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5634                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5635     }
5636
5637     /* Get the function pointer */
5638
5639     GenTreePtr fptr = impPopStack().val;
5640     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5641
5642 #ifdef DEBUG
5643     // This temporary must never be converted to a double in stress mode,
5644     // because that can introduce a call to the cast helper after the
5645     // arguments have already been evaluated.
5646
5647     if (fptr->OperGet() == GT_LCL_VAR)
5648     {
5649         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5650     }
5651 #endif
5652
5653     /* Create the call node */
5654
5655     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5656
5657     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5658
5659     return call;
5660 }
5661
5662 /*****************************************************************************/
5663
5664 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5665 {
5666     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5667
5668     /* Since we push the arguments in reverse order (i.e. right -> left)
5669      * spill any side effects from the stack
5670      *
5671      * OBS: If there is only one side effect we do not need to spill it
5672      *      thus we have to spill all side-effects except last one
5673      */
5674
5675     unsigned lastLevelWithSideEffects = UINT_MAX;
5676
5677     unsigned argsToReverse = sig->numArgs;
5678
5679     // For "thiscall", the first argument goes in a register. Since its
5680     // order does not need to be changed, we do not need to spill it
5681
5682     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5683     {
5684         assert(argsToReverse);
5685         argsToReverse--;
5686     }
5687
5688 #ifndef _TARGET_X86_
5689     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5690     argsToReverse = 0;
5691 #endif
5692
5693     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5694     {
5695         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5696         {
5697             assert(lastLevelWithSideEffects == UINT_MAX);
5698
5699             impSpillStackEntry(level,
5700                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5701         }
5702         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5703         {
5704             if (lastLevelWithSideEffects != UINT_MAX)
5705             {
5706                 /* We had a previous side effect - must spill it */
5707                 impSpillStackEntry(lastLevelWithSideEffects,
5708                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5709
5710                 /* Record the level for the current side effect in case we will spill it */
5711                 lastLevelWithSideEffects = level;
5712             }
5713             else
5714             {
5715                 /* This is the first side effect encountered - record its level */
5716
5717                 lastLevelWithSideEffects = level;
5718             }
5719         }
5720     }
5721
5722     /* The argument list is now "clean" - no out-of-order side effects
5723      * Pop the argument list in reverse order */
5724
5725     unsigned   argFlags = 0;
5726     GenTreePtr args     = call->gtCall.gtCallArgs =
5727         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5728
5729     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5730     {
5731         GenTreePtr thisPtr = args->Current();
5732         impBashVarAddrsToI(thisPtr);
5733         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5734     }
5735
5736     if (args)
5737     {
5738         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5739     }
5740 }
5741
5742 //------------------------------------------------------------------------
5743 // impInitClass: Build a node to initialize the class before accessing the
5744 //               field if necessary
5745 //
5746 // Arguments:
5747 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5748 //                     by a call to CEEInfo::resolveToken().
5749 //
5750 // Return Value: If needed, a pointer to the node that will perform the class
5751 //               initializtion.  Otherwise, nullptr.
5752 //
5753
5754 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5755 {
5756     CorInfoInitClassResult initClassResult =
5757         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5758
5759     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5760     {
5761         return nullptr;
5762     }
5763     BOOL runtimeLookup;
5764
5765     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5766
5767     if (node == nullptr)
5768     {
5769         assert(compDonotInline());
5770         return nullptr;
5771     }
5772
5773     if (runtimeLookup)
5774     {
5775         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5776     }
5777     else
5778     {
5779         // Call the shared non gc static helper, as its the fastest
5780         node = fgGetSharedCCtor(pResolvedToken->hClass);
5781     }
5782
5783     return node;
5784 }
5785
5786 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5787 {
5788     GenTreePtr op1 = nullptr;
5789
5790     switch (lclTyp)
5791     {
5792         int     ival;
5793         __int64 lval;
5794         double  dval;
5795
5796         case TYP_BOOL:
5797             ival = *((bool*)fldAddr);
5798             goto IVAL_COMMON;
5799
5800         case TYP_BYTE:
5801             ival = *((signed char*)fldAddr);
5802             goto IVAL_COMMON;
5803
5804         case TYP_UBYTE:
5805             ival = *((unsigned char*)fldAddr);
5806             goto IVAL_COMMON;
5807
5808         case TYP_SHORT:
5809             ival = *((short*)fldAddr);
5810             goto IVAL_COMMON;
5811
5812         case TYP_CHAR:
5813         case TYP_USHORT:
5814             ival = *((unsigned short*)fldAddr);
5815             goto IVAL_COMMON;
5816
5817         case TYP_UINT:
5818         case TYP_INT:
5819             ival = *((int*)fldAddr);
5820         IVAL_COMMON:
5821             op1 = gtNewIconNode(ival);
5822             break;
5823
5824         case TYP_LONG:
5825         case TYP_ULONG:
5826             lval = *((__int64*)fldAddr);
5827             op1  = gtNewLconNode(lval);
5828             break;
5829
5830         case TYP_FLOAT:
5831             dval = *((float*)fldAddr);
5832             op1  = gtNewDconNode(dval);
5833 #if !FEATURE_X87_DOUBLES
5834             // X87 stack doesn't differentiate between float/double
5835             // so R4 is treated as R8, but everybody else does
5836             op1->gtType = TYP_FLOAT;
5837 #endif // FEATURE_X87_DOUBLES
5838             break;
5839
5840         case TYP_DOUBLE:
5841             dval = *((double*)fldAddr);
5842             op1  = gtNewDconNode(dval);
5843             break;
5844
5845         default:
5846             assert(!"Unexpected lclTyp");
5847             break;
5848     }
5849
5850     return op1;
5851 }
5852
5853 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5854                                                 CORINFO_ACCESS_FLAGS    access,
5855                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5856                                                 var_types               lclTyp)
5857 {
5858     GenTreePtr op1;
5859
5860     switch (pFieldInfo->fieldAccessor)
5861     {
5862         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5863         {
5864             assert(!compIsForInlining());
5865
5866             // We first call a special helper to get the statics base pointer
5867             op1 = impParentClassTokenToHandle(pResolvedToken);
5868
5869             // compIsForInlining() is false so we should not neve get NULL here
5870             assert(op1 != nullptr);
5871
5872             var_types type = TYP_BYREF;
5873
5874             switch (pFieldInfo->helper)
5875             {
5876                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5877                     type = TYP_I_IMPL;
5878                     break;
5879                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5880                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5881                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5882                     break;
5883                 default:
5884                     assert(!"unknown generic statics helper");
5885                     break;
5886             }
5887
5888             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5889
5890             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5891             op1              = gtNewOperNode(GT_ADD, type, op1,
5892                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5893         }
5894         break;
5895
5896         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5897         {
5898 #ifdef FEATURE_READYTORUN_COMPILER
5899             if (opts.IsReadyToRun())
5900             {
5901                 unsigned callFlags = 0;
5902
5903                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5904                 {
5905                     callFlags |= GTF_CALL_HOISTABLE;
5906                 }
5907
5908                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5909
5910                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5911             }
5912             else
5913 #endif
5914             {
5915                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5916             }
5917
5918             {
5919                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5920                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5921                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5922             }
5923             break;
5924         }
5925 #if COR_JIT_EE_VERSION > 460
5926         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5927         {
5928 #ifdef FEATURE_READYTORUN_COMPILER
5929             noway_assert(opts.IsReadyToRun());
5930             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5931             assert(kind.needsRuntimeLookup);
5932
5933             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5934             GenTreeArgList* args    = gtNewArgList(ctxTree);
5935
5936             unsigned callFlags = 0;
5937
5938             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5939             {
5940                 callFlags |= GTF_CALL_HOISTABLE;
5941             }
5942             var_types type = TYP_BYREF;
5943             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5944
5945             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5946             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5947             op1              = gtNewOperNode(GT_ADD, type, op1,
5948                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5949 #else
5950             unreached();
5951 #endif // FEATURE_READYTORUN_COMPILER
5952         }
5953         break;
5954 #endif // COR_JIT_EE_VERSION > 460
5955         default:
5956         {
5957             if (!(access & CORINFO_ACCESS_ADDRESS))
5958             {
5959                 // In future, it may be better to just create the right tree here instead of folding it later.
5960                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5961
5962                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5963                 {
5964                     op1->gtType = TYP_REF; // points at boxed object
5965                     FieldSeqNode* firstElemFldSeq =
5966                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5967                     op1 =
5968                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5969                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5970
5971                     if (varTypeIsStruct(lclTyp))
5972                     {
5973                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5974                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5975                     }
5976                     else
5977                     {
5978                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5979                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5980                     }
5981                 }
5982
5983                 return op1;
5984             }
5985             else
5986             {
5987                 void** pFldAddr = nullptr;
5988                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5989
5990                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5991
5992                 /* Create the data member node */
5993                 if (pFldAddr == nullptr)
5994                 {
5995                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5996                 }
5997                 else
5998                 {
5999                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6000
6001                     // There are two cases here, either the static is RVA based,
6002                     // in which case the type of the FIELD node is not a GC type
6003                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6004                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6005                     // because handles to statics now go into the large object heap
6006
6007                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6008                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6009                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6010                 }
6011             }
6012             break;
6013         }
6014     }
6015
6016     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6017     {
6018         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6019
6020         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6021
6022         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6023                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6024     }
6025
6026     if (!(access & CORINFO_ACCESS_ADDRESS))
6027     {
6028         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6029         op1->gtFlags |= GTF_GLOB_REF;
6030     }
6031
6032     return op1;
6033 }
6034
6035 // In general try to call this before most of the verification work.  Most people expect the access
6036 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6037 // out if you can't access something we also think that you're unverifiable for other reasons.
6038 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6039 {
6040     if (result != CORINFO_ACCESS_ALLOWED)
6041     {
6042         impHandleAccessAllowedInternal(result, helperCall);
6043     }
6044 }
6045
6046 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6047 {
6048     switch (result)
6049     {
6050         case CORINFO_ACCESS_ALLOWED:
6051             break;
6052         case CORINFO_ACCESS_ILLEGAL:
6053             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6054             // method is verifiable.  Otherwise, delay the exception to runtime.
6055             if (compIsForImportOnly())
6056             {
6057                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6058             }
6059             else
6060             {
6061                 impInsertHelperCall(helperCall);
6062             }
6063             break;
6064         case CORINFO_ACCESS_RUNTIME_CHECK:
6065             impInsertHelperCall(helperCall);
6066             break;
6067     }
6068 }
6069
6070 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6071 {
6072     // Construct the argument list
6073     GenTreeArgList* args = nullptr;
6074     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6075     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6076     {
6077         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6078         GenTreePtr                currentArg = nullptr;
6079         switch (helperArg.argType)
6080         {
6081             case CORINFO_HELPER_ARG_TYPE_Field:
6082                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6083                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6084                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6085                 break;
6086             case CORINFO_HELPER_ARG_TYPE_Method:
6087                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6088                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6089                 break;
6090             case CORINFO_HELPER_ARG_TYPE_Class:
6091                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6092                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6093                 break;
6094             case CORINFO_HELPER_ARG_TYPE_Module:
6095                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6096                 break;
6097             case CORINFO_HELPER_ARG_TYPE_Const:
6098                 currentArg = gtNewIconNode(helperArg.constant);
6099                 break;
6100             default:
6101                 NO_WAY("Illegal helper arg type");
6102         }
6103         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6104     }
6105
6106     /* TODO-Review:
6107      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6108      * Also, consider sticking this in the first basic block.
6109      */
6110     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6111     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6112 }
6113
6114 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6115                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6116                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6117 {
6118 #ifdef FEATURE_CORECLR
6119     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6120     {
6121         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6122         // This helper throws an exception if the CLR host disallows the call.
6123
6124         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6125                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6126                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6127         // Append the callout statement
6128         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6129     }
6130 #endif // FEATURE_CORECLR
6131 }
6132
6133 // Checks whether the return types of caller and callee are compatible
6134 // so that callee can be tail called. Note that here we don't check
6135 // compatibility in IL Verifier sense, but on the lines of return type
6136 // sizes are equal and get returned in the same return register.
6137 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6138                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6139                                             var_types            calleeRetType,
6140                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6141 {
6142     // Note that we can not relax this condition with genActualType() as the
6143     // calling convention dictates that the caller of a function with a small
6144     // typed return value is responsible for normalizing the return val.
6145     if (callerRetType == calleeRetType)
6146     {
6147         return true;
6148     }
6149
6150 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6151     // Jit64 compat:
6152     if (callerRetType == TYP_VOID)
6153     {
6154         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6155         //     tail.call
6156         //     pop
6157         //     ret
6158         //
6159         // Note that the above IL pattern is not valid as per IL verification rules.
6160         // Therefore, only full trust code can take advantage of this pattern.
6161         return true;
6162     }
6163
6164     // These checks return true if the return value type sizes are the same and
6165     // get returned in the same return register i.e. caller doesn't need to normalize
6166     // return value. Some of the tail calls permitted by below checks would have
6167     // been rejected by IL Verifier before we reached here.  Therefore, only full
6168     // trust code can make those tail calls.
6169     unsigned callerRetTypeSize = 0;
6170     unsigned calleeRetTypeSize = 0;
6171     bool     isCallerRetTypMBEnreg =
6172         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6173     bool isCalleeRetTypMBEnreg =
6174         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6175
6176     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6177     {
6178         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6179     }
6180 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6181
6182     return false;
6183 }
6184
6185 // For prefixFlags
6186 enum
6187 {
6188     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6189     PREFIX_TAILCALL_IMPLICIT =
6190         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6191     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6192     PREFIX_VOLATILE    = 0x00000100,
6193     PREFIX_UNALIGNED   = 0x00001000,
6194     PREFIX_CONSTRAINED = 0x00010000,
6195     PREFIX_READONLY    = 0x00100000
6196 };
6197
6198 /********************************************************************************
6199  *
6200  * Returns true if the current opcode and and the opcodes following it correspond
6201  * to a supported tail call IL pattern.
6202  *
6203  */
6204 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6205                                       OPCODE      curOpcode,
6206                                       const BYTE* codeAddrOfNextOpcode,
6207                                       const BYTE* codeEnd,
6208                                       bool        isRecursive,
6209                                       bool*       isCallPopAndRet /* = nullptr */)
6210 {
6211     // Bail out if the current opcode is not a call.
6212     if (!impOpcodeIsCallOpcode(curOpcode))
6213     {
6214         return false;
6215     }
6216
6217 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6218     // If shared ret tail opt is not enabled, we will enable
6219     // it for recursive methods.
6220     if (isRecursive)
6221 #endif
6222     {
6223         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6224         // sequence. Make sure we don't go past the end of the IL however.
6225         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6226     }
6227
6228     // Bail out if there is no next opcode after call
6229     if (codeAddrOfNextOpcode >= codeEnd)
6230     {
6231         return false;
6232     }
6233
6234     // Scan the opcodes to look for the following IL patterns if either
6235     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6236     //  ii) if tail prefixed, IL verification is not needed for the method.
6237     //
6238     // Only in the above two cases we can allow the below tail call patterns
6239     // violating ECMA spec.
6240     //
6241     // Pattern1:
6242     //       call
6243     //       nop*
6244     //       ret
6245     //
6246     // Pattern2:
6247     //       call
6248     //       nop*
6249     //       pop
6250     //       nop*
6251     //       ret
6252     int    cntPop = 0;
6253     OPCODE nextOpcode;
6254
6255 #ifdef _TARGET_AMD64_
6256     do
6257     {
6258         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6259         codeAddrOfNextOpcode += sizeof(__int8);
6260     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6261              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6262              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6263                                                                                          // one pop seen so far.
6264 #else
6265     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6266 #endif
6267
6268     if (isCallPopAndRet)
6269     {
6270         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6271         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6272     }
6273
6274 #ifdef _TARGET_AMD64_
6275     // Jit64 Compat:
6276     // Tail call IL pattern could be either of the following
6277     // 1) call/callvirt/calli + ret
6278     // 2) call/callvirt/calli + pop + ret in a method returning void.
6279     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6280 #else //!_TARGET_AMD64_
6281     return (nextOpcode == CEE_RET) && (cntPop == 0);
6282 #endif
6283 }
6284
6285 /*****************************************************************************
6286  *
6287  * Determine whether the call could be converted to an implicit tail call
6288  *
6289  */
6290 bool Compiler::impIsImplicitTailCallCandidate(
6291     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6292 {
6293
6294 #if FEATURE_TAILCALL_OPT
6295     if (!opts.compTailCallOpt)
6296     {
6297         return false;
6298     }
6299
6300     if (opts.compDbgCode || opts.MinOpts())
6301     {
6302         return false;
6303     }
6304
6305     // must not be tail prefixed
6306     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6307     {
6308         return false;
6309     }
6310
6311 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6312     // the block containing call is marked as BBJ_RETURN
6313     // We allow shared ret tail call optimization on recursive calls even under
6314     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6315     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6316         return false;
6317 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6318
6319     // must be call+ret or call+pop+ret
6320     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6321     {
6322         return false;
6323     }
6324
6325     return true;
6326 #else
6327     return false;
6328 #endif // FEATURE_TAILCALL_OPT
6329 }
6330
6331 //------------------------------------------------------------------------
6332 // impImportCall: import a call-inspiring opcode
6333 //
6334 // Arguments:
6335 //    opcode                    - opcode that inspires the call
6336 //    pResolvedToken            - resolved token for the call target
6337 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6338 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6339 //    prefixFlags               - IL prefix flags for the call
6340 //    callInfo                  - EE supplied info for the call
6341 //    rawILOffset               - IL offset of the opcode
6342 //
6343 // Returns:
6344 //    Type of the call's return value.
6345 //
6346 // Notes:
6347 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6348 //
6349 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6350 //    uninitalized object.
6351
6352 #ifdef _PREFAST_
6353 #pragma warning(push)
6354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6355 #endif
6356
6357 var_types Compiler::impImportCall(OPCODE                  opcode,
6358                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6359                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6360                                   GenTreePtr              newobjThis,
6361                                   int                     prefixFlags,
6362                                   CORINFO_CALL_INFO*      callInfo,
6363                                   IL_OFFSET               rawILOffset)
6364 {
6365     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6366
6367     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6368     var_types              callRetTyp                     = TYP_COUNT;
6369     CORINFO_SIG_INFO*      sig                            = nullptr;
6370     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6371     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6372     unsigned               clsFlags                       = 0;
6373     unsigned               mflags                         = 0;
6374     unsigned               argFlags                       = 0;
6375     GenTreePtr             call                           = nullptr;
6376     GenTreeArgList*        args                           = nullptr;
6377     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6378     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6379     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6380     bool                   canTailCall                    = true;
6381     const char*            szCanTailCallFailReason        = nullptr;
6382     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6383     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6384
6385     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6386     // do that before tailcalls, but that is probably not the intended
6387     // semantic. So just disallow tailcalls from synchronized methods.
6388     // Also, popping arguments in a varargs function is more work and NYI
6389     // If we have a security object, we have to keep our frame around for callers
6390     // to see any imperative security.
6391     if (info.compFlags & CORINFO_FLG_SYNCH)
6392     {
6393         canTailCall             = false;
6394         szCanTailCallFailReason = "Caller is synchronized";
6395     }
6396 #if !FEATURE_FIXED_OUT_ARGS
6397     else if (info.compIsVarArgs)
6398     {
6399         canTailCall             = false;
6400         szCanTailCallFailReason = "Caller is varargs";
6401     }
6402 #endif // FEATURE_FIXED_OUT_ARGS
6403     else if (opts.compNeedSecurityCheck)
6404     {
6405         canTailCall             = false;
6406         szCanTailCallFailReason = "Caller requires a security check.";
6407     }
6408
6409     // We only need to cast the return value of pinvoke inlined calls that return small types
6410
6411     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6412     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6413     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6414     // the time being that the callee might be compiled by the other JIT and thus the return
6415     // value will need to be widened by us (or not widened at all...)
6416
6417     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6418
6419     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6420     bool bIntrinsicImported = false;
6421
6422     CORINFO_SIG_INFO calliSig;
6423     GenTreeArgList*  extraArg = nullptr;
6424
6425     /*-------------------------------------------------------------------------
6426      * First create the call node
6427      */
6428
6429     if (opcode == CEE_CALLI)
6430     {
6431         /* Get the call site sig */
6432         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6433
6434         callRetTyp = JITtype2varType(calliSig.retType);
6435         clsHnd     = calliSig.retTypeClass;
6436
6437         call = impImportIndirectCall(&calliSig, ilOffset);
6438
6439         // We don't know the target method, so we have to infer the flags, or
6440         // assume the worst-case.
6441         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6442
6443 #ifdef DEBUG
6444         if (verbose)
6445         {
6446             unsigned structSize =
6447                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6448             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6449                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6450         }
6451 #endif
6452         // This should be checked in impImportBlockCode.
6453         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6454
6455         sig = &calliSig;
6456
6457 #ifdef DEBUG
6458         // We cannot lazily obtain the signature of a CALLI call because it has no method
6459         // handle that we can use, so we need to save its full call signature here.
6460         assert(call->gtCall.callSig == nullptr);
6461         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6462         *call->gtCall.callSig = calliSig;
6463 #endif // DEBUG
6464
6465         if (IsTargetAbi(CORINFO_CORERT_ABI))
6466         {
6467             bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
6468             if (managedCall)
6469             {
6470                 call->AsCall()->SetFatPointerCandidate();
6471                 setMethodHasFatPointer();
6472             }
6473         }
6474     }
6475     else // (opcode != CEE_CALLI)
6476     {
6477         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6478
6479         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6480         // supply the instantiation parameters necessary to make direct calls to underlying
6481         // shared generic code, rather than calling through instantiating stubs.  If the
6482         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6483         // must indeed pass an instantiation parameter.
6484
6485         methHnd = callInfo->hMethod;
6486
6487         sig        = &(callInfo->sig);
6488         callRetTyp = JITtype2varType(sig->retType);
6489
6490         mflags = callInfo->methodFlags;
6491
6492 #ifdef DEBUG
6493         if (verbose)
6494         {
6495             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6496             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6497                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6498         }
6499 #endif
6500         if (compIsForInlining())
6501         {
6502             /* Does this call site have security boundary restrictions? */
6503
6504             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6505             {
6506                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6507                 return callRetTyp;
6508             }
6509
6510             /* Does the inlinee need a security check token on the frame */
6511
6512             if (mflags & CORINFO_FLG_SECURITYCHECK)
6513             {
6514                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6515                 return callRetTyp;
6516             }
6517
6518             /* Does the inlinee use StackCrawlMark */
6519
6520             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6521             {
6522                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NOINLINE_CALLEE);
6523                 return callRetTyp;
6524             }
6525
6526             /* For now ignore delegate invoke */
6527
6528             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6529             {
6530                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6531                 return callRetTyp;
6532             }
6533
6534             /* For now ignore varargs */
6535             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6536             {
6537                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6538                 return callRetTyp;
6539             }
6540
6541             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6542             {
6543                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6544                 return callRetTyp;
6545             }
6546
6547             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6548             {
6549                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6550                 return callRetTyp;
6551             }
6552         }
6553
6554         clsHnd = pResolvedToken->hClass;
6555
6556         clsFlags = callInfo->classFlags;
6557
6558 #ifdef DEBUG
6559         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6560
6561         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6562         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6563         const char* modName;
6564         const char* className;
6565         const char* methodName;
6566         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6567             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6568             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6569         {
6570             return impImportJitTestLabelMark(sig->numArgs);
6571         }
6572 #endif // DEBUG
6573
6574         // <NICE> Factor this into getCallInfo </NICE>
6575         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6576         {
6577             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6578                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6579
6580             if (call != nullptr)
6581             {
6582                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6583                        (clsFlags & CORINFO_FLG_FINAL));
6584
6585 #ifdef FEATURE_READYTORUN_COMPILER
6586                 if (call->OperGet() == GT_INTRINSIC)
6587                 {
6588                     if (opts.IsReadyToRun())
6589                     {
6590                         noway_assert(callInfo->kind == CORINFO_CALL);
6591                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6592                     }
6593                     else
6594                     {
6595                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6596                     }
6597                 }
6598 #endif
6599
6600                 bIntrinsicImported = true;
6601                 goto DONE_CALL;
6602             }
6603         }
6604
6605 #ifdef FEATURE_SIMD
6606         if (featureSIMD)
6607         {
6608             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6609             if (call != nullptr)
6610             {
6611                 bIntrinsicImported = true;
6612                 goto DONE_CALL;
6613             }
6614         }
6615 #endif // FEATURE_SIMD
6616
6617         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6618         {
6619             NO_WAY("Virtual call to a function added via EnC is not supported");
6620         }
6621
6622         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6623             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6624             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6625         {
6626             BADCODE("Bad calling convention");
6627         }
6628
6629         //-------------------------------------------------------------------------
6630         //  Construct the call node
6631         //
6632         // Work out what sort of call we're making.
6633         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6634
6635         constraintCallThisTransform = callInfo->thisTransform;
6636
6637         exactContextHnd                = callInfo->contextHandle;
6638         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6639
6640         // Recursive call is treaded as a loop to the begining of the method.
6641         if (methHnd == info.compMethodHnd)
6642         {
6643 #ifdef DEBUG
6644             if (verbose)
6645             {
6646                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6647                         fgFirstBB->bbNum, compCurBB->bbNum);
6648             }
6649 #endif
6650             fgMarkBackwardJump(fgFirstBB, compCurBB);
6651         }
6652
6653         switch (callInfo->kind)
6654         {
6655
6656             case CORINFO_VIRTUALCALL_STUB:
6657             {
6658                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6659                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6660                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6661                 {
6662
6663                     if (compIsForInlining())
6664                     {
6665                         // Don't import runtime lookups when inlining
6666                         // Inlining has to be aborted in such a case
6667                         /* XXX Fri 3/20/2009
6668                          * By the way, this would never succeed.  If the handle lookup is into the generic
6669                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6670                          * inlined code will crash.
6671                          *
6672                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6673                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6674                          * failing here.
6675                          */
6676                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6677                         return callRetTyp;
6678                     }
6679
6680                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6681                     assert(!compDonotInline());
6682
6683                     // This is the rough code to set up an indirect stub call
6684                     assert(stubAddr != nullptr);
6685
6686                     // The stubAddr may be a
6687                     // complex expression. As it is evaluated after the args,
6688                     // it may cause registered args to be spilled. Simply spill it.
6689
6690                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6691                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6692                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6693
6694                     // Create the actual call node
6695
6696                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6697                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6698
6699                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6700
6701                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6702                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6703
6704 #ifdef _TARGET_X86_
6705                     // No tailcalls allowed for these yet...
6706                     canTailCall             = false;
6707                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6708 #endif
6709                 }
6710                 else
6711                 {
6712                     // ok, the stub is available at compile type.
6713
6714                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6715                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6716                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6717                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6718                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6719                     {
6720                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6721                     }
6722                 }
6723
6724 #ifdef FEATURE_READYTORUN_COMPILER
6725                 if (opts.IsReadyToRun())
6726                 {
6727                     // Null check is sometimes needed for ready to run to handle
6728                     // non-virtual <-> virtual changes between versions
6729                     if (callInfo->nullInstanceCheck)
6730                     {
6731                         call->gtFlags |= GTF_CALL_NULLCHECK;
6732                     }
6733                 }
6734 #endif
6735
6736                 break;
6737             }
6738
6739             case CORINFO_VIRTUALCALL_VTABLE:
6740             {
6741                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6742                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6743                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6744                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6745                 break;
6746             }
6747
6748             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6749             {
6750                 if (compIsForInlining())
6751                 {
6752                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6753                     return callRetTyp;
6754                 }
6755
6756                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6757                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6758                 // OK, We've been told to call via LDVIRTFTN, so just
6759                 // take the call now....
6760
6761                 args = impPopList(sig->numArgs, &argFlags, sig);
6762
6763                 GenTreePtr thisPtr = impPopStack().val;
6764                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6765                 if (compDonotInline())
6766                 {
6767                     return callRetTyp;
6768                 }
6769
6770                 // Clone the (possibly transformed) "this" pointer
6771                 GenTreePtr thisPtrCopy;
6772                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6773                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6774
6775                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6776                 if (compDonotInline())
6777                 {
6778                     return callRetTyp;
6779                 }
6780
6781                 thisPtr = nullptr; // can't reuse it
6782
6783                 // Now make an indirect call through the function pointer
6784
6785                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6786                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6787                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6788
6789                 // Create the actual call node
6790
6791                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6792                 call->gtCall.gtCallObjp = thisPtrCopy;
6793                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6794
6795 #ifdef FEATURE_READYTORUN_COMPILER
6796                 if (opts.IsReadyToRun())
6797                 {
6798                     // Null check is needed for ready to run to handle
6799                     // non-virtual <-> virtual changes between versions
6800                     call->gtFlags |= GTF_CALL_NULLCHECK;
6801                 }
6802 #endif
6803
6804                 // Sine we are jumping over some code, check that its OK to skip that code
6805                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6806                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6807                 goto DONE;
6808             }
6809
6810             case CORINFO_CALL:
6811             {
6812                 // This is for a non-virtual, non-interface etc. call
6813                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6814
6815                 // We remove the nullcheck for the GetType call instrinsic.
6816                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6817                 // and instrinsics.
6818                 if (callInfo->nullInstanceCheck &&
6819                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6820                 {
6821                     call->gtFlags |= GTF_CALL_NULLCHECK;
6822                 }
6823
6824 #ifdef FEATURE_READYTORUN_COMPILER
6825                 if (opts.IsReadyToRun())
6826                 {
6827                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6828                 }
6829 #endif
6830                 break;
6831             }
6832
6833             case CORINFO_CALL_CODE_POINTER:
6834             {
6835                 // The EE has asked us to call by computing a code pointer and then doing an
6836                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6837
6838                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6839                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6840
6841                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6842                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6843
6844                 GenTreePtr fptr =
6845                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6846
6847                 if (compDonotInline())
6848                 {
6849                     return callRetTyp;
6850                 }
6851
6852                 // Now make an indirect call through the function pointer
6853
6854                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6855                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6856                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6857
6858                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6859                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6860                 if (callInfo->nullInstanceCheck)
6861                 {
6862                     call->gtFlags |= GTF_CALL_NULLCHECK;
6863                 }
6864
6865                 break;
6866             }
6867
6868             default:
6869                 assert(!"unknown call kind");
6870                 break;
6871         }
6872
6873         //-------------------------------------------------------------------------
6874         // Set more flags
6875
6876         PREFIX_ASSUME(call != nullptr);
6877
6878         if (mflags & CORINFO_FLG_NOGCCHECK)
6879         {
6880             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6881         }
6882
6883         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6884         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6885             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6886             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6887         {
6888             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6889         }
6890     }
6891     assert(sig);
6892     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6893
6894     /* Some sanity checks */
6895
6896     // CALL_VIRT and NEWOBJ must have a THIS pointer
6897     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6898     // static bit and hasThis are negations of one another
6899     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6900     assert(call != nullptr);
6901
6902     /*-------------------------------------------------------------------------
6903      * Check special-cases etc
6904      */
6905
6906     /* Special case - Check if it is a call to Delegate.Invoke(). */
6907
6908     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6909     {
6910         assert(!compIsForInlining());
6911         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6912         assert(mflags & CORINFO_FLG_FINAL);
6913
6914         /* Set the delegate flag */
6915         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6916
6917         if (callInfo->secureDelegateInvoke)
6918         {
6919             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6920         }
6921
6922         if (opcode == CEE_CALLVIRT)
6923         {
6924             assert(mflags & CORINFO_FLG_FINAL);
6925
6926             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6927             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6928             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6929         }
6930     }
6931
6932     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6933     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6934     if (varTypeIsStruct(callRetTyp))
6935     {
6936         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6937         call->gtType = callRetTyp;
6938     }
6939
6940 #if !FEATURE_VARARG
6941     /* Check for varargs */
6942     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6943         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6944     {
6945         BADCODE("Varargs not supported.");
6946     }
6947 #endif // !FEATURE_VARARG
6948
6949     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6950         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6951     {
6952         assert(!compIsForInlining());
6953
6954         /* Set the right flags */
6955
6956         call->gtFlags |= GTF_CALL_POP_ARGS;
6957         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6958
6959         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6960            will be expecting to pop a certain number of arguments, but if we
6961            tailcall to a function with a different number of arguments, we
6962            are hosed. There are ways around this (caller remembers esp value,
6963            varargs is not caller-pop, etc), but not worth it. */
6964         CLANG_FORMAT_COMMENT_ANCHOR;
6965
6966 #ifdef _TARGET_X86_
6967         if (canTailCall)
6968         {
6969             canTailCall             = false;
6970             szCanTailCallFailReason = "Callee is varargs";
6971         }
6972 #endif
6973
6974         /* Get the total number of arguments - this is already correct
6975          * for CALLI - for methods we have to get it from the call site */
6976
6977         if (opcode != CEE_CALLI)
6978         {
6979 #ifdef DEBUG
6980             unsigned numArgsDef = sig->numArgs;
6981 #endif
6982             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6983
6984 #ifdef DEBUG
6985             // We cannot lazily obtain the signature of a vararg call because using its method
6986             // handle will give us only the declared argument list, not the full argument list.
6987             assert(call->gtCall.callSig == nullptr);
6988             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6989             *call->gtCall.callSig = *sig;
6990 #endif
6991
6992             // For vararg calls we must be sure to load the return type of the
6993             // method actually being called, as well as the return types of the
6994             // specified in the vararg signature. With type equivalency, these types
6995             // may not be the same.
6996             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6997             {
6998                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6999                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7000                     sig->retType != CORINFO_TYPE_VAR)
7001                 {
7002                     // Make sure that all valuetypes (including enums) that we push are loaded.
7003                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7004                     // all valuetypes in the method signature are already loaded.
7005                     // We need to be able to find the size of the valuetypes, but we cannot
7006                     // do a class-load from within GC.
7007                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7008                 }
7009             }
7010
7011             assert(numArgsDef <= sig->numArgs);
7012         }
7013
7014         /* We will have "cookie" as the last argument but we cannot push
7015          * it on the operand stack because we may overflow, so we append it
7016          * to the arg list next after we pop them */
7017     }
7018
7019     if (mflags & CORINFO_FLG_SECURITYCHECK)
7020     {
7021         assert(!compIsForInlining());
7022
7023         // Need security prolog/epilog callouts when there is
7024         // imperative security in the method. This is to give security a
7025         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7026
7027         if (compIsForInlining())
7028         {
7029             // Cannot handle this if the method being imported is an inlinee by itself.
7030             // Because inlinee method does not have its own frame.
7031
7032             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7033             return callRetTyp;
7034         }
7035         else
7036         {
7037             tiSecurityCalloutNeeded = true;
7038
7039             // If the current method calls a method which needs a security check,
7040             // (i.e. the method being compiled has imperative security)
7041             // we need to reserve a slot for the security object in
7042             // the current method's stack frame
7043             opts.compNeedSecurityCheck = true;
7044         }
7045     }
7046
7047     //--------------------------- Inline NDirect ------------------------------
7048
7049     // For inline cases we technically should look at both the current
7050     // block and the call site block (or just the latter if we've
7051     // fused the EH trees). However the block-related checks pertain to
7052     // EH and we currently won't inline a method with EH. So for
7053     // inlinees, just checking the call site block is sufficient.
7054     {
7055         // New lexical block here to avoid compilation errors because of GOTOs.
7056         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7057         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7058     }
7059
7060     if (call->gtFlags & GTF_CALL_UNMANAGED)
7061     {
7062         // We set up the unmanaged call by linking the frame, disabling GC, etc
7063         // This needs to be cleaned up on return
7064         if (canTailCall)
7065         {
7066             canTailCall             = false;
7067             szCanTailCallFailReason = "Callee is native";
7068         }
7069
7070         checkForSmallType = true;
7071
7072         impPopArgsForUnmanagedCall(call, sig);
7073
7074         goto DONE;
7075     }
7076     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7077                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7078                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7079                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7080     {
7081         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7082         {
7083             // Normally this only happens with inlining.
7084             // However, a generic method (or type) being NGENd into another module
7085             // can run into this issue as well.  There's not an easy fall-back for NGEN
7086             // so instead we fallback to JIT.
7087             if (compIsForInlining())
7088             {
7089                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7090             }
7091             else
7092             {
7093                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7094             }
7095
7096             return callRetTyp;
7097         }
7098
7099         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7100
7101         // This cookie is required to be either a simple GT_CNS_INT or
7102         // an indirection of a GT_CNS_INT
7103         //
7104         GenTreePtr cookieConst = cookie;
7105         if (cookie->gtOper == GT_IND)
7106         {
7107             cookieConst = cookie->gtOp.gtOp1;
7108         }
7109         assert(cookieConst->gtOper == GT_CNS_INT);
7110
7111         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7112         // we won't allow this tree to participate in any CSE logic
7113         //
7114         cookie->gtFlags |= GTF_DONT_CSE;
7115         cookieConst->gtFlags |= GTF_DONT_CSE;
7116
7117         call->gtCall.gtCallCookie = cookie;
7118
7119         if (canTailCall)
7120         {
7121             canTailCall             = false;
7122             szCanTailCallFailReason = "PInvoke calli";
7123         }
7124     }
7125
7126     /*-------------------------------------------------------------------------
7127      * Create the argument list
7128      */
7129
7130     //-------------------------------------------------------------------------
7131     // Special case - for varargs we have an implicit last argument
7132
7133     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7134     {
7135         assert(!compIsForInlining());
7136
7137         void *varCookie, *pVarCookie;
7138         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7139         {
7140             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7141             return callRetTyp;
7142         }
7143
7144         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7145         assert((!varCookie) != (!pVarCookie));
7146         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7147
7148         assert(extraArg == nullptr);
7149         extraArg = gtNewArgList(cookie);
7150     }
7151
7152     //-------------------------------------------------------------------------
7153     // Extra arg for shared generic code and array methods
7154     //
7155     // Extra argument containing instantiation information is passed in the
7156     // following circumstances:
7157     // (a) To the "Address" method on array classes; the extra parameter is
7158     //     the array's type handle (a TypeDesc)
7159     // (b) To shared-code instance methods in generic structs; the extra parameter
7160     //     is the struct's type handle (a vtable ptr)
7161     // (c) To shared-code per-instantiation non-generic static methods in generic
7162     //     classes and structs; the extra parameter is the type handle
7163     // (d) To shared-code generic methods; the extra parameter is an
7164     //     exact-instantiation MethodDesc
7165     //
7166     // We also set the exact type context associated with the call so we can
7167     // inline the call correctly later on.
7168
7169     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7170     {
7171         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7172         if (clsHnd == nullptr)
7173         {
7174             NO_WAY("CALLI on parameterized type");
7175         }
7176
7177         assert(opcode != CEE_CALLI);
7178
7179         GenTreePtr instParam;
7180         BOOL       runtimeLookup;
7181
7182         // Instantiated generic method
7183         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7184         {
7185             CORINFO_METHOD_HANDLE exactMethodHandle =
7186                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7187
7188             if (!exactContextNeedsRuntimeLookup)
7189             {
7190 #ifdef FEATURE_READYTORUN_COMPILER
7191                 if (opts.IsReadyToRun())
7192                 {
7193                     instParam =
7194                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7195                     if (instParam == nullptr)
7196                     {
7197                         return callRetTyp;
7198                     }
7199                 }
7200                 else
7201 #endif
7202                 {
7203                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7204                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7205                 }
7206             }
7207             else
7208             {
7209                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7210                 if (instParam == nullptr)
7211                 {
7212                     return callRetTyp;
7213                 }
7214             }
7215         }
7216
7217         // otherwise must be an instance method in a generic struct,
7218         // a static method in a generic type, or a runtime-generated array method
7219         else
7220         {
7221             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7222             CORINFO_CLASS_HANDLE exactClassHandle =
7223                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7224
7225             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7226             {
7227                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7228                 return callRetTyp;
7229             }
7230
7231             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7232             {
7233                 // We indicate "readonly" to the Address operation by using a null
7234                 // instParam.
7235                 instParam = gtNewIconNode(0, TYP_REF);
7236             }
7237
7238             if (!exactContextNeedsRuntimeLookup)
7239             {
7240 #ifdef FEATURE_READYTORUN_COMPILER
7241                 if (opts.IsReadyToRun())
7242                 {
7243                     instParam =
7244                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7245                     if (instParam == nullptr)
7246                     {
7247                         return callRetTyp;
7248                     }
7249                 }
7250                 else
7251 #endif
7252                 {
7253                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7254                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7255                 }
7256             }
7257             else
7258             {
7259                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7260                 if (instParam == nullptr)
7261                 {
7262                     return callRetTyp;
7263                 }
7264             }
7265         }
7266
7267         assert(extraArg == nullptr);
7268         extraArg = gtNewArgList(instParam);
7269     }
7270
7271     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7272     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7273     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7274     // exactContextHnd is not currently required when inlining shared generic code into shared
7275     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7276     // (e.g. anything marked needsRuntimeLookup)
7277     if (exactContextNeedsRuntimeLookup)
7278     {
7279         exactContextHnd = nullptr;
7280     }
7281
7282     //-------------------------------------------------------------------------
7283     // The main group of arguments
7284
7285     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7286
7287     if (args)
7288     {
7289         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7290     }
7291
7292     //-------------------------------------------------------------------------
7293     // The "this" pointer
7294
7295     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7296     {
7297         GenTreePtr obj;
7298
7299         if (opcode == CEE_NEWOBJ)
7300         {
7301             obj = newobjThis;
7302         }
7303         else
7304         {
7305             obj = impPopStack().val;
7306             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7307             if (compDonotInline())
7308             {
7309                 return callRetTyp;
7310             }
7311         }
7312
7313         /* Is this a virtual or interface call? */
7314
7315         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7316         {
7317             /* only true object pointers can be virtual */
7318
7319             assert(obj->gtType == TYP_REF);
7320         }
7321         else
7322         {
7323             if (impIsThis(obj))
7324             {
7325                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7326             }
7327         }
7328
7329         /* Store the "this" value in the call */
7330
7331         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7332         call->gtCall.gtCallObjp = obj;
7333     }
7334
7335     //-------------------------------------------------------------------------
7336     // The "this" pointer for "newobj"
7337
7338     if (opcode == CEE_NEWOBJ)
7339     {
7340         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7341         {
7342             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7343             // This is a 'new' of a variable sized object, wher
7344             // the constructor is to return the object.  In this case
7345             // the constructor claims to return VOID but we know it
7346             // actually returns the new object
7347             assert(callRetTyp == TYP_VOID);
7348             callRetTyp   = TYP_REF;
7349             call->gtType = TYP_REF;
7350             impSpillSpecialSideEff();
7351
7352             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7353         }
7354         else
7355         {
7356             if (clsFlags & CORINFO_FLG_DELEGATE)
7357             {
7358                 // New inliner morph it in impImportCall.
7359                 // This will allow us to inline the call to the delegate constructor.
7360                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7361             }
7362
7363             if (!bIntrinsicImported)
7364             {
7365
7366 #if defined(DEBUG) || defined(INLINE_DATA)
7367
7368                 // Keep track of the raw IL offset of the call
7369                 call->gtCall.gtRawILOffset = rawILOffset;
7370
7371 #endif // defined(DEBUG) || defined(INLINE_DATA)
7372
7373                 // Is it an inline candidate?
7374                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7375             }
7376
7377             // append the call node.
7378             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7379
7380             // Now push the value of the 'new onto the stack
7381
7382             // This is a 'new' of a non-variable sized object.
7383             // Append the new node (op1) to the statement list,
7384             // and then push the local holding the value of this
7385             // new instruction on the stack.
7386
7387             if (clsFlags & CORINFO_FLG_VALUECLASS)
7388             {
7389                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7390
7391                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7392                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7393             }
7394             else
7395             {
7396                 if (newobjThis->gtOper == GT_COMMA)
7397                 {
7398                     // In coreclr the callout can be inserted even if verification is disabled
7399                     // so we cannot rely on tiVerificationNeeded alone
7400
7401                     // We must have inserted the callout. Get the real newobj.
7402                     newobjThis = newobjThis->gtOp.gtOp2;
7403                 }
7404
7405                 assert(newobjThis->gtOper == GT_LCL_VAR);
7406                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7407             }
7408         }
7409         return callRetTyp;
7410     }
7411
7412 DONE:
7413
7414     if (tailCall)
7415     {
7416         // This check cannot be performed for implicit tail calls for the reason
7417         // that impIsImplicitTailCallCandidate() is not checking whether return
7418         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7419         // As a result it is possible that in the following case, we find that
7420         // the type stack is non-empty if Callee() is considered for implicit
7421         // tail calling.
7422         //      int Caller(..) { .... void Callee(); ret val; ... }
7423         //
7424         // Note that we cannot check return type compatibility before ImpImportCall()
7425         // as we don't have required info or need to duplicate some of the logic of
7426         // ImpImportCall().
7427         //
7428         // For implicit tail calls, we perform this check after return types are
7429         // known to be compatible.
7430         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7431         {
7432             BADCODE("Stack should be empty after tailcall");
7433         }
7434
7435         // Note that we can not relax this condition with genActualType() as
7436         // the calling convention dictates that the caller of a function with
7437         // a small-typed return value is responsible for normalizing the return val
7438
7439         if (canTailCall &&
7440             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7441                                           callInfo->sig.retTypeClass))
7442         {
7443             canTailCall             = false;
7444             szCanTailCallFailReason = "Return types are not tail call compatible";
7445         }
7446
7447         // Stack empty check for implicit tail calls.
7448         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7449         {
7450 #ifdef _TARGET_AMD64_
7451             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7452             // in JIT64, not an InvalidProgramException.
7453             Verify(false, "Stack should be empty after tailcall");
7454 #else  // _TARGET_64BIT_
7455             BADCODE("Stack should be empty after tailcall");
7456 #endif //!_TARGET_64BIT_
7457         }
7458
7459         // assert(compCurBB is not a catch, finally or filter block);
7460         // assert(compCurBB is not a try block protected by a finally block);
7461
7462         // Check for permission to tailcall
7463         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7464
7465         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7466
7467         if (canTailCall)
7468         {
7469             // True virtual or indirect calls, shouldn't pass in a callee handle.
7470             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7471                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7472                                                        ? nullptr
7473                                                        : methHnd;
7474             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7475
7476             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7477             {
7478                 canTailCall = true;
7479                 if (explicitTailCall)
7480                 {
7481                     // In case of explicit tail calls, mark it so that it is not considered
7482                     // for in-lining.
7483                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7484 #ifdef DEBUG
7485                     if (verbose)
7486                     {
7487                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7488                         printTreeID(call);
7489                         printf("\n");
7490                     }
7491 #endif
7492                 }
7493                 else
7494                 {
7495 #if FEATURE_TAILCALL_OPT
7496                     // Must be an implicit tail call.
7497                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7498
7499                     // It is possible that a call node is both an inline candidate and marked
7500                     // for opportunistic tail calling.  In-lining happens before morhphing of
7501                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7502                     // reason, it will survive to the morphing stage at which point it will be
7503                     // transformed into a tail call after performing additional checks.
7504
7505                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7506 #ifdef DEBUG
7507                     if (verbose)
7508                     {
7509                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7510                         printTreeID(call);
7511                         printf("\n");
7512                     }
7513 #endif
7514
7515 #else //! FEATURE_TAILCALL_OPT
7516                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7517
7518 #endif // FEATURE_TAILCALL_OPT
7519                 }
7520
7521                 // we can't report success just yet...
7522             }
7523             else
7524             {
7525                 canTailCall = false;
7526 // canTailCall reported its reasons already
7527 #ifdef DEBUG
7528                 if (verbose)
7529                 {
7530                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7531                     printTreeID(call);
7532                     printf("\n");
7533                 }
7534 #endif
7535             }
7536         }
7537         else
7538         {
7539             // If this assert fires it means that canTailCall was set to false without setting a reason!
7540             assert(szCanTailCallFailReason != nullptr);
7541
7542 #ifdef DEBUG
7543             if (verbose)
7544             {
7545                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7546                 printTreeID(call);
7547                 printf(": %s\n", szCanTailCallFailReason);
7548             }
7549 #endif
7550             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7551                                                      szCanTailCallFailReason);
7552         }
7553     }
7554
7555     // Note: we assume that small return types are already normalized by the managed callee
7556     // or by the pinvoke stub for calls to unmanaged code.
7557
7558     if (!bIntrinsicImported)
7559     {
7560         //
7561         // Things needed to be checked when bIntrinsicImported is false.
7562         //
7563
7564         assert(call->gtOper == GT_CALL);
7565         assert(sig != nullptr);
7566
7567         // Tail calls require us to save the call site's sig info so we can obtain an argument
7568         // copying thunk from the EE later on.
7569         if (call->gtCall.callSig == nullptr)
7570         {
7571             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7572             *call->gtCall.callSig = *sig;
7573         }
7574
7575         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7576         {
7577             GenTreePtr callObj = call->gtCall.gtCallObjp;
7578             assert(callObj != nullptr);
7579
7580             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7581
7582             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7583                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7584                                                                    impInlineInfo->inlArgInfo))
7585             {
7586                 impInlineInfo->thisDereferencedFirst = true;
7587             }
7588         }
7589
7590 #if defined(DEBUG) || defined(INLINE_DATA)
7591
7592         // Keep track of the raw IL offset of the call
7593         call->gtCall.gtRawILOffset = rawILOffset;
7594
7595 #endif // defined(DEBUG) || defined(INLINE_DATA)
7596
7597         // Is it an inline candidate?
7598         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7599     }
7600
7601 DONE_CALL:
7602     // Push or append the result of the call
7603     if (callRetTyp == TYP_VOID)
7604     {
7605         if (opcode == CEE_NEWOBJ)
7606         {
7607             // we actually did push something, so don't spill the thing we just pushed.
7608             assert(verCurrentState.esStackDepth > 0);
7609             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7610         }
7611         else
7612         {
7613             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7614         }
7615     }
7616     else
7617     {
7618         impSpillSpecialSideEff();
7619
7620         if (clsFlags & CORINFO_FLG_ARRAY)
7621         {
7622             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7623         }
7624
7625         // Find the return type used for verification by interpreting the method signature.
7626         // NB: we are clobbering the already established sig.
7627         if (tiVerificationNeeded)
7628         {
7629             // Actually, we never get the sig for the original method.
7630             sig = &(callInfo->verSig);
7631         }
7632
7633         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7634         tiRetVal.NormaliseForStack();
7635
7636         // The CEE_READONLY prefix modifies the verification semantics of an Address
7637         // operation on an array type.
7638         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7639         {
7640             tiRetVal.SetIsReadonlyByRef();
7641         }
7642
7643         if (tiVerificationNeeded)
7644         {
7645             // We assume all calls return permanent home byrefs. If they
7646             // didn't they wouldn't be verifiable. This is also covering
7647             // the Address() helper for multidimensional arrays.
7648             if (tiRetVal.IsByRef())
7649             {
7650                 tiRetVal.SetIsPermanentHomeByRef();
7651             }
7652         }
7653
7654         if (call->IsCall())
7655         {
7656             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7657
7658             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7659             if (varTypeIsStruct(callRetTyp))
7660             {
7661                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7662             }
7663
7664             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7665             {
7666                 assert(opts.OptEnabled(CLFLG_INLINING));
7667                 assert(!fatPointerCandidate); // We should not try to inline calli.
7668
7669                 // Make the call its own tree (spill the stack if needed).
7670                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7671
7672                 // TODO: Still using the widened type.
7673                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7674             }
7675             else
7676             {
7677                 if (fatPointerCandidate)
7678                 {
7679                     // fatPointer candidates should be in statements of the form call() or var = call().
7680                     // Such form allows to find statements with fat calls without walking through whole trees
7681                     // and removes problems with cutting trees.
7682                     assert(!bIntrinsicImported);
7683                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
7684                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7685                     {
7686                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
7687                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
7688                         varDsc->lvVerTypeInfo = tiRetVal;
7689                         impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
7690                         // impAssignTempGen can change src arg list and return type for call that returns struct.
7691                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7692                         call           = gtNewLclvNode(calliSlot, type);
7693                     }
7694                 }
7695                 // For non-candidates we must also spill, since we
7696                 // might have locals live on the eval stack that this
7697                 // call can modify.
7698                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7699             }
7700         }
7701
7702         if (!bIntrinsicImported)
7703         {
7704             //-------------------------------------------------------------------------
7705             //
7706             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7707                 before returning.
7708                 However, we need to normalize small type values returned by unmanaged
7709                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7710                 if we use the shorter inlined pinvoke stub. */
7711
7712             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7713             {
7714                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7715             }
7716         }
7717
7718         impPushOnStack(call, tiRetVal);
7719     }
7720
7721     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7722     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7723     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7724     //  callInfoCache.uncacheCallInfo();
7725
7726     return callRetTyp;
7727 }
7728 #ifdef _PREFAST_
7729 #pragma warning(pop)
7730 #endif
7731
7732 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7733 {
7734     CorInfoType corType = methInfo->args.retType;
7735
7736     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7737     {
7738         // We have some kind of STRUCT being returned
7739
7740         structPassingKind howToReturnStruct = SPK_Unknown;
7741
7742         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7743
7744         if (howToReturnStruct == SPK_ByReference)
7745         {
7746             return true;
7747         }
7748     }
7749
7750     return false;
7751 }
7752
7753 #ifdef DEBUG
7754 //
7755 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7756 {
7757     TestLabelAndNum tlAndN;
7758     if (numArgs == 2)
7759     {
7760         tlAndN.m_num  = 0;
7761         StackEntry se = impPopStack();
7762         assert(se.seTypeInfo.GetType() == TI_INT);
7763         GenTreePtr val = se.val;
7764         assert(val->IsCnsIntOrI());
7765         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7766     }
7767     else if (numArgs == 3)
7768     {
7769         StackEntry se = impPopStack();
7770         assert(se.seTypeInfo.GetType() == TI_INT);
7771         GenTreePtr val = se.val;
7772         assert(val->IsCnsIntOrI());
7773         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7774         se           = impPopStack();
7775         assert(se.seTypeInfo.GetType() == TI_INT);
7776         val = se.val;
7777         assert(val->IsCnsIntOrI());
7778         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7779     }
7780     else
7781     {
7782         assert(false);
7783     }
7784
7785     StackEntry expSe = impPopStack();
7786     GenTreePtr node  = expSe.val;
7787
7788     // There are a small number of special cases, where we actually put the annotation on a subnode.
7789     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7790     {
7791         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7792         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7793         // offset within the the static field block whose address is returned by the helper call.
7794         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7795         GenTreePtr helperCall = nullptr;
7796         assert(node->OperGet() == GT_IND);
7797         tlAndN.m_num -= 100;
7798         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7799         GetNodeTestData()->Remove(node);
7800     }
7801     else
7802     {
7803         GetNodeTestData()->Set(node, tlAndN);
7804     }
7805
7806     impPushOnStack(node, expSe.seTypeInfo);
7807     return node->TypeGet();
7808 }
7809 #endif // DEBUG
7810
7811 //-----------------------------------------------------------------------------------
7812 //  impFixupCallStructReturn: For a call node that returns a struct type either
7813 //  adjust the return type to an enregisterable type, or set the flag to indicate
7814 //  struct return via retbuf arg.
7815 //
7816 //  Arguments:
7817 //    call       -  GT_CALL GenTree node
7818 //    retClsHnd  -  Class handle of return type of the call
7819 //
7820 //  Return Value:
7821 //    Returns new GenTree node after fixing struct return of call node
7822 //
7823 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7824 {
7825     assert(call->gtOper == GT_CALL);
7826
7827     if (!varTypeIsStruct(call))
7828     {
7829         return call;
7830     }
7831
7832     call->gtCall.gtRetClsHnd = retClsHnd;
7833
7834     GenTreeCall* callNode = call->AsCall();
7835
7836 #if FEATURE_MULTIREG_RET
7837     // Initialize Return type descriptor of call node
7838     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7839     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7840 #endif // FEATURE_MULTIREG_RET
7841
7842 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7843
7844     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7845     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7846
7847     // The return type will remain as the incoming struct type unless normalized to a
7848     // single eightbyte return type below.
7849     callNode->gtReturnType = call->gtType;
7850
7851     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7852     if (retRegCount != 0)
7853     {
7854         if (retRegCount == 1)
7855         {
7856             // struct returned in a single register
7857             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7858         }
7859         else
7860         {
7861             // must be a struct returned in two registers
7862             assert(retRegCount == 2);
7863
7864             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7865             {
7866                 // Force a call returning multi-reg struct to be always of the IR form
7867                 //   tmp = call
7868                 //
7869                 // No need to assign a multi-reg struct to a local var if:
7870                 //  - It is a tail call or
7871                 //  - The call is marked for in-lining later
7872                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7873             }
7874         }
7875     }
7876     else
7877     {
7878         // struct not returned in registers i.e returned via hiddden retbuf arg.
7879         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7880     }
7881
7882 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7883
7884 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7885     // There is no fixup necessary if the return type is a HFA struct.
7886     // HFA structs are returned in registers for ARM32 and ARM64
7887     //
7888     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7889     {
7890         if (call->gtCall.CanTailCall())
7891         {
7892             if (info.compIsVarArgs)
7893             {
7894                 // We cannot tail call because control needs to return to fixup the calling
7895                 // convention for result return.
7896                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7897             }
7898             else
7899             {
7900                 // If we can tail call returning HFA, then don't assign it to
7901                 // a variable back and forth.
7902                 return call;
7903             }
7904         }
7905
7906         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7907         {
7908             return call;
7909         }
7910
7911         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7912         if (retRegCount >= 2)
7913         {
7914             return impAssignMultiRegTypeToVar(call, retClsHnd);
7915         }
7916     }
7917 #endif // _TARGET_ARM_
7918
7919     // Check for TYP_STRUCT type that wraps a primitive type
7920     // Such structs are returned using a single register
7921     // and we change the return type on those calls here.
7922     //
7923     structPassingKind howToReturnStruct;
7924     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7925
7926     if (howToReturnStruct == SPK_ByReference)
7927     {
7928         assert(returnType == TYP_UNKNOWN);
7929         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7930     }
7931     else
7932     {
7933         assert(returnType != TYP_UNKNOWN);
7934         call->gtCall.gtReturnType = returnType;
7935
7936         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7937         if ((returnType == TYP_LONG) && (compLongUsed == false))
7938         {
7939             compLongUsed = true;
7940         }
7941         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7942         {
7943             compFloatingPointUsed = true;
7944         }
7945
7946 #if FEATURE_MULTIREG_RET
7947         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7948         assert(retRegCount != 0);
7949
7950         if (retRegCount >= 2)
7951         {
7952             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7953             {
7954                 // Force a call returning multi-reg struct to be always of the IR form
7955                 //   tmp = call
7956                 //
7957                 // No need to assign a multi-reg struct to a local var if:
7958                 //  - It is a tail call or
7959                 //  - The call is marked for in-lining later
7960                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7961             }
7962         }
7963 #endif // FEATURE_MULTIREG_RET
7964     }
7965
7966 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7967
7968     return call;
7969 }
7970
7971 /*****************************************************************************
7972    For struct return values, re-type the operand in the case where the ABI
7973    does not use a struct return buffer
7974    Note that this method is only call for !_TARGET_X86_
7975  */
7976
7977 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7978 {
7979     assert(varTypeIsStruct(info.compRetType));
7980     assert(info.compRetBuffArg == BAD_VAR_NUM);
7981
7982 #if defined(_TARGET_XARCH_)
7983
7984 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7985     // No VarArgs for CoreCLR on x64 Unix
7986     assert(!info.compIsVarArgs);
7987
7988     // Is method returning a multi-reg struct?
7989     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7990     {
7991         // In case of multi-reg struct return, we force IR to be one of the following:
7992         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7993         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7994
7995         if (op->gtOper == GT_LCL_VAR)
7996         {
7997             // Make sure that this struct stays in memory and doesn't get promoted.
7998             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7999             lvaTable[lclNum].lvIsMultiRegRet = true;
8000
8001             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8002             op->gtFlags |= GTF_DONT_CSE;
8003
8004             return op;
8005         }
8006
8007         if (op->gtOper == GT_CALL)
8008         {
8009             return op;
8010         }
8011
8012         return impAssignMultiRegTypeToVar(op, retClsHnd);
8013     }
8014 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8015     assert(info.compRetNativeType != TYP_STRUCT);
8016 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8017
8018 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8019
8020     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8021     {
8022         if (op->gtOper == GT_LCL_VAR)
8023         {
8024             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8025             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8026             // Make sure this struct type stays as struct so that we can return it as an HFA
8027             lvaTable[lclNum].lvIsMultiRegRet = true;
8028
8029             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8030             op->gtFlags |= GTF_DONT_CSE;
8031
8032             return op;
8033         }
8034
8035         if (op->gtOper == GT_CALL)
8036         {
8037             if (op->gtCall.IsVarargs())
8038             {
8039                 // We cannot tail call because control needs to return to fixup the calling
8040                 // convention for result return.
8041                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8042                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8043             }
8044             else
8045             {
8046                 return op;
8047             }
8048         }
8049         return impAssignMultiRegTypeToVar(op, retClsHnd);
8050     }
8051
8052 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8053
8054     // Is method returning a multi-reg struct?
8055     if (IsMultiRegReturnedType(retClsHnd))
8056     {
8057         if (op->gtOper == GT_LCL_VAR)
8058         {
8059             // This LCL_VAR stays as a TYP_STRUCT
8060             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8061
8062             // Make sure this struct type is not struct promoted
8063             lvaTable[lclNum].lvIsMultiRegRet = true;
8064
8065             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8066             op->gtFlags |= GTF_DONT_CSE;
8067
8068             return op;
8069         }
8070
8071         if (op->gtOper == GT_CALL)
8072         {
8073             if (op->gtCall.IsVarargs())
8074             {
8075                 // We cannot tail call because control needs to return to fixup the calling
8076                 // convention for result return.
8077                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8078                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8079             }
8080             else
8081             {
8082                 return op;
8083             }
8084         }
8085         return impAssignMultiRegTypeToVar(op, retClsHnd);
8086     }
8087
8088 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8089
8090 REDO_RETURN_NODE:
8091     // adjust the type away from struct to integral
8092     // and no normalizing
8093     if (op->gtOper == GT_LCL_VAR)
8094     {
8095         op->ChangeOper(GT_LCL_FLD);
8096     }
8097     else if (op->gtOper == GT_OBJ)
8098     {
8099         GenTreePtr op1 = op->AsObj()->Addr();
8100
8101         // We will fold away OBJ/ADDR
8102         // except for OBJ/ADDR/INDEX
8103         //     as the array type influences the array element's offset
8104         //     Later in this method we change op->gtType to info.compRetNativeType
8105         //     This is not correct when op is a GT_INDEX as the starting offset
8106         //     for the array elements 'elemOffs' is different for an array of
8107         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8108         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8109         //
8110         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8111         {
8112             // Change '*(&X)' to 'X' and see if we can do better
8113             op = op1->gtOp.gtOp1;
8114             goto REDO_RETURN_NODE;
8115         }
8116         op->gtObj.gtClass = NO_CLASS_HANDLE;
8117         op->ChangeOperUnchecked(GT_IND);
8118         op->gtFlags |= GTF_IND_TGTANYWHERE;
8119     }
8120     else if (op->gtOper == GT_CALL)
8121     {
8122         if (op->AsCall()->TreatAsHasRetBufArg(this))
8123         {
8124             // This must be one of those 'special' helpers that don't
8125             // really have a return buffer, but instead use it as a way
8126             // to keep the trees cleaner with fewer address-taken temps.
8127             //
8128             // Well now we have to materialize the the return buffer as
8129             // an address-taken temp. Then we can return the temp.
8130             //
8131             // NOTE: this code assumes that since the call directly
8132             // feeds the return, then the call must be returning the
8133             // same structure/class/type.
8134             //
8135             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8136
8137             // No need to spill anything as we're about to return.
8138             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8139
8140             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8141             // jump directly to a GT_LCL_FLD.
8142             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8143             op->ChangeOper(GT_LCL_FLD);
8144         }
8145         else
8146         {
8147             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8148
8149             // Don't change the gtType of the node just yet, it will get changed later.
8150             return op;
8151         }
8152     }
8153     else if (op->gtOper == GT_COMMA)
8154     {
8155         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8156     }
8157
8158     op->gtType = info.compRetNativeType;
8159
8160     return op;
8161 }
8162
8163 /*****************************************************************************
8164    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8165    finally-protected try. We find the finally blocks protecting the current
8166    offset (in order) by walking over the complete exception table and
8167    finding enclosing clauses. This assumes that the table is sorted.
8168    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8169
8170    If we are leaving a catch handler, we need to attach the
8171    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8172
8173    After this function, the BBJ_LEAVE block has been converted to a different type.
8174  */
8175
8176 #if !FEATURE_EH_FUNCLETS
8177
8178 void Compiler::impImportLeave(BasicBlock* block)
8179 {
8180 #ifdef DEBUG
8181     if (verbose)
8182     {
8183         printf("\nBefore import CEE_LEAVE:\n");
8184         fgDispBasicBlocks();
8185         fgDispHandlerTab();
8186     }
8187 #endif // DEBUG
8188
8189     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8190     unsigned    blkAddr         = block->bbCodeOffs;
8191     BasicBlock* leaveTarget     = block->bbJumpDest;
8192     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8193
8194     // LEAVE clears the stack, spill side effects, and set stack to 0
8195
8196     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8197     verCurrentState.esStackDepth = 0;
8198
8199     assert(block->bbJumpKind == BBJ_LEAVE);
8200     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8201
8202     BasicBlock* step         = DUMMY_INIT(NULL);
8203     unsigned    encFinallies = 0; // Number of enclosing finallies.
8204     GenTreePtr  endCatches   = NULL;
8205     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8206
8207     unsigned  XTnum;
8208     EHblkDsc* HBtab;
8209
8210     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8211     {
8212         // Grab the handler offsets
8213
8214         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8215         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8216         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8217         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8218
8219         /* Is this a catch-handler we are CEE_LEAVEing out of?
8220          * If so, we need to call CORINFO_HELP_ENDCATCH.
8221          */
8222
8223         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8224         {
8225             // Can't CEE_LEAVE out of a finally/fault handler
8226             if (HBtab->HasFinallyOrFaultHandler())
8227                 BADCODE("leave out of fault/finally block");
8228
8229             // Create the call to CORINFO_HELP_ENDCATCH
8230             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8231
8232             // Make a list of all the currently pending endCatches
8233             if (endCatches)
8234                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8235             else
8236                 endCatches = endCatch;
8237
8238 #ifdef DEBUG
8239             if (verbose)
8240             {
8241                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8242                        "CORINFO_HELP_ENDCATCH\n",
8243                        block->bbNum, XTnum);
8244             }
8245 #endif
8246         }
8247         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8248                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8249         {
8250             /* This is a finally-protected try we are jumping out of */
8251
8252             /* If there are any pending endCatches, and we have already
8253                jumped out of a finally-protected try, then the endCatches
8254                have to be put in a block in an outer try for async
8255                exceptions to work correctly.
8256                Else, just use append to the original block */
8257
8258             BasicBlock* callBlock;
8259
8260             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8261
8262             if (encFinallies == 0)
8263             {
8264                 assert(step == DUMMY_INIT(NULL));
8265                 callBlock             = block;
8266                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8267
8268                 if (endCatches)
8269                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8270
8271 #ifdef DEBUG
8272                 if (verbose)
8273                 {
8274                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8275                            "block BB%02u [%08p]\n",
8276                            callBlock->bbNum, dspPtr(callBlock));
8277                 }
8278 #endif
8279             }
8280             else
8281             {
8282                 assert(step != DUMMY_INIT(NULL));
8283
8284                 /* Calling the finally block */
8285                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8286                 assert(step->bbJumpKind == BBJ_ALWAYS);
8287                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8288                                               // finally in the chain)
8289                 step->bbJumpDest->bbRefs++;
8290
8291                 /* The new block will inherit this block's weight */
8292                 callBlock->setBBWeight(block->bbWeight);
8293                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8294
8295 #ifdef DEBUG
8296                 if (verbose)
8297                 {
8298                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8299                            "[%08p]\n",
8300                            callBlock->bbNum, dspPtr(callBlock));
8301                 }
8302 #endif
8303
8304                 GenTreePtr lastStmt;
8305
8306                 if (endCatches)
8307                 {
8308                     lastStmt         = gtNewStmt(endCatches);
8309                     endLFin->gtNext  = lastStmt;
8310                     lastStmt->gtPrev = endLFin;
8311                 }
8312                 else
8313                 {
8314                     lastStmt = endLFin;
8315                 }
8316
8317                 // note that this sets BBF_IMPORTED on the block
8318                 impEndTreeList(callBlock, endLFin, lastStmt);
8319             }
8320
8321             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8322             /* The new block will inherit this block's weight */
8323             step->setBBWeight(block->bbWeight);
8324             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8325
8326 #ifdef DEBUG
8327             if (verbose)
8328             {
8329                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8330                        "BB%02u [%08p]\n",
8331                        step->bbNum, dspPtr(step));
8332             }
8333 #endif
8334
8335             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8336             assert(finallyNesting <= compHndBBtabCount);
8337
8338             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8339             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8340             endLFin               = gtNewStmt(endLFin);
8341             endCatches            = NULL;
8342
8343             encFinallies++;
8344
8345             invalidatePreds = true;
8346         }
8347     }
8348
8349     /* Append any remaining endCatches, if any */
8350
8351     assert(!encFinallies == !endLFin);
8352
8353     if (encFinallies == 0)
8354     {
8355         assert(step == DUMMY_INIT(NULL));
8356         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8357
8358         if (endCatches)
8359             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8360
8361 #ifdef DEBUG
8362         if (verbose)
8363         {
8364             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8365                    "block BB%02u [%08p]\n",
8366                    block->bbNum, dspPtr(block));
8367         }
8368 #endif
8369     }
8370     else
8371     {
8372         // If leaveTarget is the start of another try block, we want to make sure that
8373         // we do not insert finalStep into that try block. Hence, we find the enclosing
8374         // try block.
8375         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8376
8377         // Insert a new BB either in the try region indicated by tryIndex or
8378         // the handler region indicated by leaveTarget->bbHndIndex,
8379         // depending on which is the inner region.
8380         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8381         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8382         step->bbJumpDest = finalStep;
8383
8384         /* The new block will inherit this block's weight */
8385         finalStep->setBBWeight(block->bbWeight);
8386         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8387
8388 #ifdef DEBUG
8389         if (verbose)
8390         {
8391             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8392                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8393         }
8394 #endif
8395
8396         GenTreePtr lastStmt;
8397
8398         if (endCatches)
8399         {
8400             lastStmt         = gtNewStmt(endCatches);
8401             endLFin->gtNext  = lastStmt;
8402             lastStmt->gtPrev = endLFin;
8403         }
8404         else
8405         {
8406             lastStmt = endLFin;
8407         }
8408
8409         impEndTreeList(finalStep, endLFin, lastStmt);
8410
8411         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8412
8413         // Queue up the jump target for importing
8414
8415         impImportBlockPending(leaveTarget);
8416
8417         invalidatePreds = true;
8418     }
8419
8420     if (invalidatePreds && fgComputePredsDone)
8421     {
8422         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8423         fgRemovePreds();
8424     }
8425
8426 #ifdef DEBUG
8427     fgVerifyHandlerTab();
8428
8429     if (verbose)
8430     {
8431         printf("\nAfter import CEE_LEAVE:\n");
8432         fgDispBasicBlocks();
8433         fgDispHandlerTab();
8434     }
8435 #endif // DEBUG
8436 }
8437
8438 #else // FEATURE_EH_FUNCLETS
8439
8440 void Compiler::impImportLeave(BasicBlock* block)
8441 {
8442 #ifdef DEBUG
8443     if (verbose)
8444     {
8445         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8446         fgDispBasicBlocks();
8447         fgDispHandlerTab();
8448     }
8449 #endif // DEBUG
8450
8451     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8452     unsigned    blkAddr         = block->bbCodeOffs;
8453     BasicBlock* leaveTarget     = block->bbJumpDest;
8454     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8455
8456     // LEAVE clears the stack, spill side effects, and set stack to 0
8457
8458     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8459     verCurrentState.esStackDepth = 0;
8460
8461     assert(block->bbJumpKind == BBJ_LEAVE);
8462     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8463
8464     BasicBlock* step = nullptr;
8465
8466     enum StepType
8467     {
8468         // No step type; step == NULL.
8469         ST_None,
8470
8471         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8472         // That is, is step->bbJumpDest where a finally will return to?
8473         ST_FinallyReturn,
8474
8475         // The step block is a catch return.
8476         ST_Catch,
8477
8478         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8479         ST_Try
8480     };
8481     StepType stepType = ST_None;
8482
8483     unsigned  XTnum;
8484     EHblkDsc* HBtab;
8485
8486     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8487     {
8488         // Grab the handler offsets
8489
8490         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8491         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8492         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8493         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8494
8495         /* Is this a catch-handler we are CEE_LEAVEing out of?
8496          */
8497
8498         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8499         {
8500             // Can't CEE_LEAVE out of a finally/fault handler
8501             if (HBtab->HasFinallyOrFaultHandler())
8502             {
8503                 BADCODE("leave out of fault/finally block");
8504             }
8505
8506             /* We are jumping out of a catch */
8507
8508             if (step == nullptr)
8509             {
8510                 step             = block;
8511                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8512                 stepType         = ST_Catch;
8513
8514 #ifdef DEBUG
8515                 if (verbose)
8516                 {
8517                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8518                            "block\n",
8519                            XTnum, step->bbNum);
8520                 }
8521 #endif
8522             }
8523             else
8524             {
8525                 BasicBlock* exitBlock;
8526
8527                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8528                  * scope */
8529                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8530
8531                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8532                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8533                                               // exit) returns to this block
8534                 step->bbJumpDest->bbRefs++;
8535
8536 #if defined(_TARGET_ARM_)
8537                 if (stepType == ST_FinallyReturn)
8538                 {
8539                     assert(step->bbJumpKind == BBJ_ALWAYS);
8540                     // Mark the target of a finally return
8541                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8542                 }
8543 #endif // defined(_TARGET_ARM_)
8544
8545                 /* The new block will inherit this block's weight */
8546                 exitBlock->setBBWeight(block->bbWeight);
8547                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8548
8549                 /* This exit block is the new step */
8550                 step     = exitBlock;
8551                 stepType = ST_Catch;
8552
8553                 invalidatePreds = true;
8554
8555 #ifdef DEBUG
8556                 if (verbose)
8557                 {
8558                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8559                            exitBlock->bbNum);
8560                 }
8561 #endif
8562             }
8563         }
8564         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8565                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8566         {
8567             /* We are jumping out of a finally-protected try */
8568
8569             BasicBlock* callBlock;
8570
8571             if (step == nullptr)
8572             {
8573 #if FEATURE_EH_CALLFINALLY_THUNKS
8574
8575                 // Put the call to the finally in the enclosing region.
8576                 unsigned callFinallyTryIndex =
8577                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8578                 unsigned callFinallyHndIndex =
8579                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8580                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8581
8582                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8583                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8584                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8585                 // next block, and flow optimizations will remove it.
8586                 block->bbJumpKind = BBJ_ALWAYS;
8587                 block->bbJumpDest = callBlock;
8588                 block->bbJumpDest->bbRefs++;
8589
8590                 /* The new block will inherit this block's weight */
8591                 callBlock->setBBWeight(block->bbWeight);
8592                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8593
8594 #ifdef DEBUG
8595                 if (verbose)
8596                 {
8597                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8598                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8599                            XTnum, block->bbNum, callBlock->bbNum);
8600                 }
8601 #endif
8602
8603 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8604
8605                 callBlock             = block;
8606                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8607
8608 #ifdef DEBUG
8609                 if (verbose)
8610                 {
8611                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8612                            "BBJ_CALLFINALLY block\n",
8613                            XTnum, callBlock->bbNum);
8614                 }
8615 #endif
8616
8617 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8618             }
8619             else
8620             {
8621                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8622                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8623                 // a 'finally'), or the step block is the return from a catch.
8624                 //
8625                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8626                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8627                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8628                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8629                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8630                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8631                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8632                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8633                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8634                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8635                 // stack walks.)
8636
8637                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8638
8639 #if FEATURE_EH_CALLFINALLY_THUNKS
8640                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8641                 {
8642                     // Need to create another step block in the 'try' region that will actually branch to the
8643                     // call-to-finally thunk.
8644                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8645                     step->bbJumpDest  = step2;
8646                     step->bbJumpDest->bbRefs++;
8647                     step2->setBBWeight(block->bbWeight);
8648                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8649
8650 #ifdef DEBUG
8651                     if (verbose)
8652                     {
8653                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8654                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8655                                XTnum, step->bbNum, step2->bbNum);
8656                     }
8657 #endif
8658
8659                     step = step2;
8660                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8661                 }
8662 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8663
8664 #if FEATURE_EH_CALLFINALLY_THUNKS
8665                 unsigned callFinallyTryIndex =
8666                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8667                 unsigned callFinallyHndIndex =
8668                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8669 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8670                 unsigned callFinallyTryIndex = XTnum + 1;
8671                 unsigned callFinallyHndIndex = 0; // don't care
8672 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8673
8674                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8675                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8676                                               // finally in the chain)
8677                 step->bbJumpDest->bbRefs++;
8678
8679 #if defined(_TARGET_ARM_)
8680                 if (stepType == ST_FinallyReturn)
8681                 {
8682                     assert(step->bbJumpKind == BBJ_ALWAYS);
8683                     // Mark the target of a finally return
8684                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8685                 }
8686 #endif // defined(_TARGET_ARM_)
8687
8688                 /* The new block will inherit this block's weight */
8689                 callBlock->setBBWeight(block->bbWeight);
8690                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8691
8692 #ifdef DEBUG
8693                 if (verbose)
8694                 {
8695                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8696                            "BB%02u\n",
8697                            XTnum, callBlock->bbNum);
8698                 }
8699 #endif
8700             }
8701
8702             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8703             stepType = ST_FinallyReturn;
8704
8705             /* The new block will inherit this block's weight */
8706             step->setBBWeight(block->bbWeight);
8707             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8708
8709 #ifdef DEBUG
8710             if (verbose)
8711             {
8712                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8713                        "block BB%02u\n",
8714                        XTnum, step->bbNum);
8715             }
8716 #endif
8717
8718             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8719
8720             invalidatePreds = true;
8721         }
8722         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8723                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8724         {
8725             // We are jumping out of a catch-protected try.
8726             //
8727             // If we are returning from a call to a finally, then we must have a step block within a try
8728             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8729             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8730             // and invoke the appropriate catch.
8731             //
8732             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8733             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8734             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8735             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8736             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8737             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8738             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8739             // For example:
8740             //
8741             // try {
8742             //    try {
8743             //       // something here raises ThreadAbortException
8744             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8745             //    } catch (Exception) {
8746             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8747             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8748             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8749             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8750             //       // need to do this transformation if the current EH block is a try/catch that catches
8751             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8752             //       // information, so currently we do it for all catch types.
8753             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8754             //    }
8755             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8756             // } catch (ThreadAbortException) {
8757             // }
8758             // LABEL_1:
8759             //
8760             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8761             // compiler.
8762
8763             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8764             {
8765                 BasicBlock* catchStep;
8766
8767                 assert(step);
8768
8769                 if (stepType == ST_FinallyReturn)
8770                 {
8771                     assert(step->bbJumpKind == BBJ_ALWAYS);
8772                 }
8773                 else
8774                 {
8775                     assert(stepType == ST_Catch);
8776                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8777                 }
8778
8779                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8780                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8781                 step->bbJumpDest = catchStep;
8782                 step->bbJumpDest->bbRefs++;
8783
8784 #if defined(_TARGET_ARM_)
8785                 if (stepType == ST_FinallyReturn)
8786                 {
8787                     // Mark the target of a finally return
8788                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8789                 }
8790 #endif // defined(_TARGET_ARM_)
8791
8792                 /* The new block will inherit this block's weight */
8793                 catchStep->setBBWeight(block->bbWeight);
8794                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8795
8796 #ifdef DEBUG
8797                 if (verbose)
8798                 {
8799                     if (stepType == ST_FinallyReturn)
8800                     {
8801                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8802                                "BBJ_ALWAYS block BB%02u\n",
8803                                XTnum, catchStep->bbNum);
8804                     }
8805                     else
8806                     {
8807                         assert(stepType == ST_Catch);
8808                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8809                                "BBJ_ALWAYS block BB%02u\n",
8810                                XTnum, catchStep->bbNum);
8811                     }
8812                 }
8813 #endif // DEBUG
8814
8815                 /* This block is the new step */
8816                 step     = catchStep;
8817                 stepType = ST_Try;
8818
8819                 invalidatePreds = true;
8820             }
8821         }
8822     }
8823
8824     if (step == nullptr)
8825     {
8826         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8827
8828 #ifdef DEBUG
8829         if (verbose)
8830         {
8831             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8832                    "block BB%02u to BBJ_ALWAYS\n",
8833                    block->bbNum);
8834         }
8835 #endif
8836     }
8837     else
8838     {
8839         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8840
8841 #if defined(_TARGET_ARM_)
8842         if (stepType == ST_FinallyReturn)
8843         {
8844             assert(step->bbJumpKind == BBJ_ALWAYS);
8845             // Mark the target of a finally return
8846             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8847         }
8848 #endif // defined(_TARGET_ARM_)
8849
8850 #ifdef DEBUG
8851         if (verbose)
8852         {
8853             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8854         }
8855 #endif
8856
8857         // Queue up the jump target for importing
8858
8859         impImportBlockPending(leaveTarget);
8860     }
8861
8862     if (invalidatePreds && fgComputePredsDone)
8863     {
8864         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8865         fgRemovePreds();
8866     }
8867
8868 #ifdef DEBUG
8869     fgVerifyHandlerTab();
8870
8871     if (verbose)
8872     {
8873         printf("\nAfter import CEE_LEAVE:\n");
8874         fgDispBasicBlocks();
8875         fgDispHandlerTab();
8876     }
8877 #endif // DEBUG
8878 }
8879
8880 #endif // FEATURE_EH_FUNCLETS
8881
8882 /*****************************************************************************/
8883 // This is called when reimporting a leave block. It resets the JumpKind,
8884 // JumpDest, and bbNext to the original values
8885
8886 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8887 {
8888 #if FEATURE_EH_FUNCLETS
8889     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8890     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8891     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8892     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8893     // only predecessor are also considered orphans and attempted to be deleted.
8894     //
8895     //  try  {
8896     //     ....
8897     //     try
8898     //     {
8899     //         ....
8900     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8901     //     } finally { }
8902     //  } finally { }
8903     //  OUTSIDE:
8904     //
8905     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8906     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8907     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8908     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8909     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8910     // will be treated as pair and handled correctly.
8911     if (block->bbJumpKind == BBJ_CALLFINALLY)
8912     {
8913         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8914         dupBlock->bbFlags    = block->bbFlags;
8915         dupBlock->bbJumpDest = block->bbJumpDest;
8916         dupBlock->copyEHRegion(block);
8917         dupBlock->bbCatchTyp = block->bbCatchTyp;
8918
8919         // Mark this block as
8920         //  a) not referenced by any other block to make sure that it gets deleted
8921         //  b) weight zero
8922         //  c) prevent from being imported
8923         //  d) as internal
8924         //  e) as rarely run
8925         dupBlock->bbRefs   = 0;
8926         dupBlock->bbWeight = 0;
8927         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8928
8929         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8930         // will be next to each other.
8931         fgInsertBBafter(block, dupBlock);
8932
8933 #ifdef DEBUG
8934         if (verbose)
8935         {
8936             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8937         }
8938 #endif
8939     }
8940 #endif // FEATURE_EH_FUNCLETS
8941
8942     block->bbJumpKind = BBJ_LEAVE;
8943     fgInitBBLookup();
8944     block->bbJumpDest = fgLookupBB(jmpAddr);
8945
8946     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8947     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8948     // reason we don't want to remove the block at this point is that if we call
8949     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8950     // added and the linked list length will be different than fgBBcount.
8951 }
8952
8953 /*****************************************************************************/
8954 // Get the first non-prefix opcode. Used for verification of valid combinations
8955 // of prefixes and actual opcodes.
8956
8957 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8958 {
8959     while (codeAddr < codeEndp)
8960     {
8961         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8962         codeAddr += sizeof(__int8);
8963
8964         if (opcode == CEE_PREFIX1)
8965         {
8966             if (codeAddr >= codeEndp)
8967             {
8968                 break;
8969             }
8970             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8971             codeAddr += sizeof(__int8);
8972         }
8973
8974         switch (opcode)
8975         {
8976             case CEE_UNALIGNED:
8977             case CEE_VOLATILE:
8978             case CEE_TAILCALL:
8979             case CEE_CONSTRAINED:
8980             case CEE_READONLY:
8981                 break;
8982             default:
8983                 return opcode;
8984         }
8985
8986         codeAddr += opcodeSizes[opcode];
8987     }
8988
8989     return CEE_ILLEGAL;
8990 }
8991
8992 /*****************************************************************************/
8993 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8994
8995 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8996 {
8997     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8998
8999     if (!(
9000             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9001             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9002             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9003             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9004             // volatile. prefix is allowed with the ldsfld and stsfld
9005             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9006     {
9007         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9008     }
9009 }
9010
9011 /*****************************************************************************/
9012
9013 #ifdef DEBUG
9014
9015 #undef RETURN // undef contracts RETURN macro
9016
9017 enum controlFlow_t
9018 {
9019     NEXT,
9020     CALL,
9021     RETURN,
9022     THROW,
9023     BRANCH,
9024     COND_BRANCH,
9025     BREAK,
9026     PHI,
9027     META,
9028 };
9029
9030 const static controlFlow_t controlFlow[] = {
9031 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9032 #include "opcode.def"
9033 #undef OPDEF
9034 };
9035
9036 #endif // DEBUG
9037
9038 /*****************************************************************************
9039  *  Determine the result type of an arithemetic operation
9040  *  On 64-bit inserts upcasts when native int is mixed with int32
9041  */
9042 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9043 {
9044     var_types  type = TYP_UNDEF;
9045     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9046
9047     // Arithemetic operations are generally only allowed with
9048     // primitive types, but certain operations are allowed
9049     // with byrefs
9050
9051     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9052     {
9053         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9054         {
9055             // byref1-byref2 => gives a native int
9056             type = TYP_I_IMPL;
9057         }
9058         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9059         {
9060             // [native] int - byref => gives a native int
9061
9062             //
9063             // The reason is that it is possible, in managed C++,
9064             // to have a tree like this:
9065             //
9066             //              -
9067             //             / \
9068             //            /   \
9069             //           /     \
9070             //          /       \
9071             // const(h) int     addr byref
9072             //
9073             // <BUGNUM> VSW 318822 </BUGNUM>
9074             //
9075             // So here we decide to make the resulting type to be a native int.
9076             CLANG_FORMAT_COMMENT_ANCHOR;
9077
9078 #ifdef _TARGET_64BIT_
9079             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9080             {
9081                 // insert an explicit upcast
9082                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9083             }
9084 #endif // _TARGET_64BIT_
9085
9086             type = TYP_I_IMPL;
9087         }
9088         else
9089         {
9090             // byref - [native] int => gives a byref
9091             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9092
9093 #ifdef _TARGET_64BIT_
9094             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9095             {
9096                 // insert an explicit upcast
9097                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9098             }
9099 #endif // _TARGET_64BIT_
9100
9101             type = TYP_BYREF;
9102         }
9103     }
9104     else if ((oper == GT_ADD) &&
9105              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9106     {
9107         // byref + [native] int => gives a byref
9108         // (or)
9109         // [native] int + byref => gives a byref
9110
9111         // only one can be a byref : byref op byref not allowed
9112         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9113         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9114
9115 #ifdef _TARGET_64BIT_
9116         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9117         {
9118             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9119             {
9120                 // insert an explicit upcast
9121                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9122             }
9123         }
9124         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9125         {
9126             // insert an explicit upcast
9127             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9128         }
9129 #endif // _TARGET_64BIT_
9130
9131         type = TYP_BYREF;
9132     }
9133 #ifdef _TARGET_64BIT_
9134     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9135     {
9136         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9137
9138         // int + long => gives long
9139         // long + int => gives long
9140         // we get this because in the IL the long isn't Int64, it's just IntPtr
9141
9142         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9143         {
9144             // insert an explicit upcast
9145             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9146         }
9147         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9148         {
9149             // insert an explicit upcast
9150             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9151         }
9152
9153         type = TYP_I_IMPL;
9154     }
9155 #else  // 32-bit TARGET
9156     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9157     {
9158         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9159
9160         // int + long => gives long
9161         // long + int => gives long
9162
9163         type = TYP_LONG;
9164     }
9165 #endif // _TARGET_64BIT_
9166     else
9167     {
9168         // int + int => gives an int
9169         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9170
9171         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9172                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9173
9174         type = genActualType(op1->gtType);
9175
9176 #if FEATURE_X87_DOUBLES
9177
9178         // For x87, since we only have 1 size of registers, prefer double
9179         // For everybody else, be more precise
9180         if (type == TYP_FLOAT)
9181             type = TYP_DOUBLE;
9182
9183 #else // !FEATURE_X87_DOUBLES
9184
9185         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9186         // Otherwise, turn floats into doubles
9187         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9188         {
9189             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9190             type = TYP_DOUBLE;
9191         }
9192
9193 #endif // FEATURE_X87_DOUBLES
9194     }
9195
9196 #if FEATURE_X87_DOUBLES
9197     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9198 #else  // FEATURE_X87_DOUBLES
9199     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9200 #endif // FEATURE_X87_DOUBLES
9201
9202     return type;
9203 }
9204
9205 /*****************************************************************************
9206  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9207  *
9208  * typeRef contains the token, op1 to contain the value being cast,
9209  * and op2 to contain code that creates the type handle corresponding to typeRef
9210  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9211  */
9212 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9213                                                 GenTreePtr              op2,
9214                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9215                                                 bool                    isCastClass)
9216 {
9217     bool expandInline;
9218
9219     assert(op1->TypeGet() == TYP_REF);
9220
9221     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9222
9223     if (isCastClass)
9224     {
9225         // We only want to expand inline the normal CHKCASTCLASS helper;
9226         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9227     }
9228     else
9229     {
9230         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9231         {
9232             // Get the Class Handle abd class attributes for the type we are casting to
9233             //
9234             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9235
9236             //
9237             // If the class handle is marked as final we can also expand the IsInst check inline
9238             //
9239             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9240
9241             //
9242             // But don't expand inline these two cases
9243             //
9244             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9245             {
9246                 expandInline = false;
9247             }
9248             else if (flags & CORINFO_FLG_CONTEXTFUL)
9249             {
9250                 expandInline = false;
9251             }
9252         }
9253         else
9254         {
9255             //
9256             // We can't expand inline any other helpers
9257             //
9258             expandInline = false;
9259         }
9260     }
9261
9262     if (expandInline)
9263     {
9264         if (compCurBB->isRunRarely())
9265         {
9266             expandInline = false; // not worth the code expansion in a rarely run block
9267         }
9268
9269         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9270         {
9271             expandInline = false; // not worth creating an untracked local variable
9272         }
9273     }
9274
9275     if (!expandInline)
9276     {
9277         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9278         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9279         //
9280         op2->gtFlags |= GTF_DONT_CSE;
9281
9282         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9283     }
9284
9285     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9286
9287     GenTreePtr temp;
9288     GenTreePtr condMT;
9289     //
9290     // expand the methodtable match:
9291     //
9292     //  condMT ==>   GT_NE
9293     //               /    \
9294     //           GT_IND   op2 (typically CNS_INT)
9295     //              |
9296     //           op1Copy
9297     //
9298
9299     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9300     //
9301     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9302     //
9303     // op1 is now known to be a non-complex tree
9304     // thus we can use gtClone(op1) from now on
9305     //
9306
9307     GenTreePtr op2Var = op2;
9308     if (isCastClass)
9309     {
9310         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9311         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9312     }
9313     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9314     temp->gtFlags |= GTF_EXCEPT;
9315     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9316
9317     GenTreePtr condNull;
9318     //
9319     // expand the null check:
9320     //
9321     //  condNull ==>   GT_EQ
9322     //                 /    \
9323     //             op1Copy CNS_INT
9324     //                      null
9325     //
9326     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9327
9328     //
9329     // expand the true and false trees for the condMT
9330     //
9331     GenTreePtr condFalse = gtClone(op1);
9332     GenTreePtr condTrue;
9333     if (isCastClass)
9334     {
9335         //
9336         // use the special helper that skips the cases checked by our inlined cast
9337         //
9338         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9339
9340         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9341     }
9342     else
9343     {
9344         condTrue = gtNewIconNode(0, TYP_REF);
9345     }
9346
9347 #define USE_QMARK_TREES
9348
9349 #ifdef USE_QMARK_TREES
9350     GenTreePtr qmarkMT;
9351     //
9352     // Generate first QMARK - COLON tree
9353     //
9354     //  qmarkMT ==>   GT_QMARK
9355     //                 /     \
9356     //            condMT   GT_COLON
9357     //                      /     \
9358     //                condFalse  condTrue
9359     //
9360     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9361     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9362     condMT->gtFlags |= GTF_RELOP_QMARK;
9363
9364     GenTreePtr qmarkNull;
9365     //
9366     // Generate second QMARK - COLON tree
9367     //
9368     //  qmarkNull ==>  GT_QMARK
9369     //                 /     \
9370     //           condNull  GT_COLON
9371     //                      /     \
9372     //                qmarkMT   op1Copy
9373     //
9374     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9375     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9376     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9377     condNull->gtFlags |= GTF_RELOP_QMARK;
9378
9379     // Make QMark node a top level node by spilling it.
9380     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9381     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9382     return gtNewLclvNode(tmp, TYP_REF);
9383 #endif
9384 }
9385
9386 #ifndef DEBUG
9387 #define assertImp(cond) ((void)0)
9388 #else
9389 #define assertImp(cond)                                                                                                \
9390     do                                                                                                                 \
9391     {                                                                                                                  \
9392         if (!(cond))                                                                                                   \
9393         {                                                                                                              \
9394             const int cchAssertImpBuf = 600;                                                                           \
9395             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9396             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9397                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9398                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9399                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9400             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9401         }                                                                                                              \
9402     } while (0)
9403 #endif // DEBUG
9404
9405 #ifdef _PREFAST_
9406 #pragma warning(push)
9407 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9408 #endif
9409 /*****************************************************************************
9410  *  Import the instr for the given basic block
9411  */
9412 void Compiler::impImportBlockCode(BasicBlock* block)
9413 {
9414 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9415
9416 #ifdef DEBUG
9417
9418     if (verbose)
9419     {
9420         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9421     }
9422 #endif
9423
9424     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9425     IL_OFFSET nxtStmtOffs;
9426
9427     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9428     bool                         expandInline;
9429     CorInfoHelpFunc              helper;
9430     CorInfoIsAccessAllowedResult accessAllowedResult;
9431     CORINFO_HELPER_DESC          calloutHelper;
9432     const BYTE*                  lastLoadToken = nullptr;
9433
9434     // reject cyclic constraints
9435     if (tiVerificationNeeded)
9436     {
9437         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9438         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9439     }
9440
9441     /* Get the tree list started */
9442
9443     impBeginTreeList();
9444
9445     /* Walk the opcodes that comprise the basic block */
9446
9447     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9448     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9449
9450     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9451     IL_OFFSET lastSpillOffs = opcodeOffs;
9452
9453     signed jmpDist;
9454
9455     /* remember the start of the delegate creation sequence (used for verification) */
9456     const BYTE* delegateCreateStart = nullptr;
9457
9458     int  prefixFlags = 0;
9459     bool explicitTailCall, constraintCall, readonlyCall;
9460
9461     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9462     typeInfo tiRetVal;
9463
9464     unsigned numArgs = info.compArgsCount;
9465
9466     /* Now process all the opcodes in the block */
9467
9468     var_types callTyp    = TYP_COUNT;
9469     OPCODE    prevOpcode = CEE_ILLEGAL;
9470
9471     if (block->bbCatchTyp)
9472     {
9473         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9474         {
9475             impCurStmtOffsSet(block->bbCodeOffs);
9476         }
9477
9478         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9479         // to a temp. This is a trade off for code simplicity
9480         impSpillSpecialSideEff();
9481     }
9482
9483     while (codeAddr < codeEndp)
9484     {
9485         bool                   usingReadyToRunHelper = false;
9486         CORINFO_RESOLVED_TOKEN resolvedToken;
9487         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9488         CORINFO_CALL_INFO      callInfo;
9489         CORINFO_FIELD_INFO     fieldInfo;
9490
9491         tiRetVal = typeInfo(); // Default type info
9492
9493         //---------------------------------------------------------------------
9494
9495         /* We need to restrict the max tree depth as many of the Compiler
9496            functions are recursive. We do this by spilling the stack */
9497
9498         if (verCurrentState.esStackDepth)
9499         {
9500             /* Has it been a while since we last saw a non-empty stack (which
9501                guarantees that the tree depth isnt accumulating. */
9502
9503             if ((opcodeOffs - lastSpillOffs) > 200)
9504             {
9505                 impSpillStackEnsure();
9506                 lastSpillOffs = opcodeOffs;
9507             }
9508         }
9509         else
9510         {
9511             lastSpillOffs   = opcodeOffs;
9512             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9513         }
9514
9515         /* Compute the current instr offset */
9516
9517         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9518
9519 #ifndef DEBUG
9520         if (opts.compDbgInfo)
9521 #endif
9522         {
9523             if (!compIsForInlining())
9524             {
9525                 nxtStmtOffs =
9526                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9527
9528                 /* Have we reached the next stmt boundary ? */
9529
9530                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9531                 {
9532                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9533
9534                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9535                     {
9536                         /* We need to provide accurate IP-mapping at this point.
9537                            So spill anything on the stack so that it will form
9538                            gtStmts with the correct stmt offset noted */
9539
9540                         impSpillStackEnsure(true);
9541                     }
9542
9543                     // Has impCurStmtOffs been reported in any tree?
9544
9545                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9546                     {
9547                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9548                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9549
9550                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9551                     }
9552
9553                     if (impCurStmtOffs == BAD_IL_OFFSET)
9554                     {
9555                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9556                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9557
9558                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9559                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9560                         {
9561                             nxtStmtIndex++;
9562                         }
9563
9564                         /* Go to the new stmt */
9565
9566                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9567
9568                         /* Update the stmt boundary index */
9569
9570                         nxtStmtIndex++;
9571                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9572
9573                         /* Are there any more line# entries after this one? */
9574
9575                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9576                         {
9577                             /* Remember where the next line# starts */
9578
9579                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9580                         }
9581                         else
9582                         {
9583                             /* No more line# entries */
9584
9585                             nxtStmtOffs = BAD_IL_OFFSET;
9586                         }
9587                     }
9588                 }
9589                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9590                          (verCurrentState.esStackDepth == 0))
9591                 {
9592                     /* At stack-empty locations, we have already added the tree to
9593                        the stmt list with the last offset. We just need to update
9594                        impCurStmtOffs
9595                      */
9596
9597                     impCurStmtOffsSet(opcodeOffs);
9598                 }
9599                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9600                          impOpcodeIsCallSiteBoundary(prevOpcode))
9601                 {
9602                     /* Make sure we have a type cached */
9603                     assert(callTyp != TYP_COUNT);
9604
9605                     if (callTyp == TYP_VOID)
9606                     {
9607                         impCurStmtOffsSet(opcodeOffs);
9608                     }
9609                     else if (opts.compDbgCode)
9610                     {
9611                         impSpillStackEnsure(true);
9612                         impCurStmtOffsSet(opcodeOffs);
9613                     }
9614                 }
9615                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9616                 {
9617                     if (opts.compDbgCode)
9618                     {
9619                         impSpillStackEnsure(true);
9620                     }
9621
9622                     impCurStmtOffsSet(opcodeOffs);
9623                 }
9624
9625                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9626                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9627             }
9628         }
9629
9630         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9631         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9632         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9633
9634         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9635         GenTreePtr      op1           = DUMMY_INIT(NULL);
9636         GenTreePtr      op2           = DUMMY_INIT(NULL);
9637         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9638         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9639         bool            uns           = DUMMY_INIT(false);
9640
9641         /* Get the next opcode and the size of its parameters */
9642
9643         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9644         codeAddr += sizeof(__int8);
9645
9646 #ifdef DEBUG
9647         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9648         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9649 #endif
9650
9651     DECODE_OPCODE:
9652
9653         // Return if any previous code has caused inline to fail.
9654         if (compDonotInline())
9655         {
9656             return;
9657         }
9658
9659         /* Get the size of additional parameters */
9660
9661         signed int sz = opcodeSizes[opcode];
9662
9663 #ifdef DEBUG
9664         clsHnd  = NO_CLASS_HANDLE;
9665         lclTyp  = TYP_COUNT;
9666         callTyp = TYP_COUNT;
9667
9668         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9669         impCurOpcName = opcodeNames[opcode];
9670
9671         if (verbose && (opcode != CEE_PREFIX1))
9672         {
9673             printf("%s", impCurOpcName);
9674         }
9675
9676         /* Use assertImp() to display the opcode */
9677
9678         op1 = op2 = nullptr;
9679 #endif
9680
9681         /* See what kind of an opcode we have, then */
9682
9683         unsigned mflags   = 0;
9684         unsigned clsFlags = 0;
9685
9686         switch (opcode)
9687         {
9688             unsigned  lclNum;
9689             var_types type;
9690
9691             GenTreePtr op3;
9692             genTreeOps oper;
9693             unsigned   size;
9694
9695             int val;
9696
9697             CORINFO_SIG_INFO     sig;
9698             unsigned             flags;
9699             IL_OFFSET            jmpAddr;
9700             bool                 ovfl, unordered, callNode;
9701             bool                 ldstruct;
9702             CORINFO_CLASS_HANDLE tokenType;
9703
9704             union {
9705                 int     intVal;
9706                 float   fltVal;
9707                 __int64 lngVal;
9708                 double  dblVal;
9709             } cval;
9710
9711             case CEE_PREFIX1:
9712                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9713                 codeAddr += sizeof(__int8);
9714                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9715                 goto DECODE_OPCODE;
9716
9717             SPILL_APPEND:
9718
9719                 // We need to call impSpillLclRefs() for a struct type lclVar.
9720                 // This is done for non-block assignments in the handling of stloc.
9721                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9722                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9723                 {
9724                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9725                 }
9726
9727                 /* Append 'op1' to the list of statements */
9728                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9729                 goto DONE_APPEND;
9730
9731             APPEND:
9732
9733                 /* Append 'op1' to the list of statements */
9734
9735                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9736                 goto DONE_APPEND;
9737
9738             DONE_APPEND:
9739
9740 #ifdef DEBUG
9741                 // Remember at which BC offset the tree was finished
9742                 impNoteLastILoffs();
9743 #endif
9744                 break;
9745
9746             case CEE_LDNULL:
9747                 impPushNullObjRefOnStack();
9748                 break;
9749
9750             case CEE_LDC_I4_M1:
9751             case CEE_LDC_I4_0:
9752             case CEE_LDC_I4_1:
9753             case CEE_LDC_I4_2:
9754             case CEE_LDC_I4_3:
9755             case CEE_LDC_I4_4:
9756             case CEE_LDC_I4_5:
9757             case CEE_LDC_I4_6:
9758             case CEE_LDC_I4_7:
9759             case CEE_LDC_I4_8:
9760                 cval.intVal = (opcode - CEE_LDC_I4_0);
9761                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9762                 goto PUSH_I4CON;
9763
9764             case CEE_LDC_I4_S:
9765                 cval.intVal = getI1LittleEndian(codeAddr);
9766                 goto PUSH_I4CON;
9767             case CEE_LDC_I4:
9768                 cval.intVal = getI4LittleEndian(codeAddr);
9769                 goto PUSH_I4CON;
9770             PUSH_I4CON:
9771                 JITDUMP(" %d", cval.intVal);
9772                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9773                 break;
9774
9775             case CEE_LDC_I8:
9776                 cval.lngVal = getI8LittleEndian(codeAddr);
9777                 JITDUMP(" 0x%016llx", cval.lngVal);
9778                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9779                 break;
9780
9781             case CEE_LDC_R8:
9782                 cval.dblVal = getR8LittleEndian(codeAddr);
9783                 JITDUMP(" %#.17g", cval.dblVal);
9784                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9785                 break;
9786
9787             case CEE_LDC_R4:
9788                 cval.dblVal = getR4LittleEndian(codeAddr);
9789                 JITDUMP(" %#.17g", cval.dblVal);
9790                 {
9791                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9792 #if !FEATURE_X87_DOUBLES
9793                     // X87 stack doesn't differentiate between float/double
9794                     // so R4 is treated as R8, but everybody else does
9795                     cnsOp->gtType = TYP_FLOAT;
9796 #endif // FEATURE_X87_DOUBLES
9797                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9798                 }
9799                 break;
9800
9801             case CEE_LDSTR:
9802
9803                 if (compIsForInlining())
9804                 {
9805                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9806                     {
9807                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9808                         return;
9809                     }
9810                 }
9811
9812                 val = getU4LittleEndian(codeAddr);
9813                 JITDUMP(" %08X", val);
9814                 if (tiVerificationNeeded)
9815                 {
9816                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9817                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9818                 }
9819                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9820
9821                 break;
9822
9823             case CEE_LDARG:
9824                 lclNum = getU2LittleEndian(codeAddr);
9825                 JITDUMP(" %u", lclNum);
9826                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9827                 break;
9828
9829             case CEE_LDARG_S:
9830                 lclNum = getU1LittleEndian(codeAddr);
9831                 JITDUMP(" %u", lclNum);
9832                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9833                 break;
9834
9835             case CEE_LDARG_0:
9836             case CEE_LDARG_1:
9837             case CEE_LDARG_2:
9838             case CEE_LDARG_3:
9839                 lclNum = (opcode - CEE_LDARG_0);
9840                 assert(lclNum >= 0 && lclNum < 4);
9841                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9842                 break;
9843
9844             case CEE_LDLOC:
9845                 lclNum = getU2LittleEndian(codeAddr);
9846                 JITDUMP(" %u", lclNum);
9847                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9848                 break;
9849
9850             case CEE_LDLOC_S:
9851                 lclNum = getU1LittleEndian(codeAddr);
9852                 JITDUMP(" %u", lclNum);
9853                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9854                 break;
9855
9856             case CEE_LDLOC_0:
9857             case CEE_LDLOC_1:
9858             case CEE_LDLOC_2:
9859             case CEE_LDLOC_3:
9860                 lclNum = (opcode - CEE_LDLOC_0);
9861                 assert(lclNum >= 0 && lclNum < 4);
9862                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9863                 break;
9864
9865             case CEE_STARG:
9866                 lclNum = getU2LittleEndian(codeAddr);
9867                 goto STARG;
9868
9869             case CEE_STARG_S:
9870                 lclNum = getU1LittleEndian(codeAddr);
9871             STARG:
9872                 JITDUMP(" %u", lclNum);
9873
9874                 if (tiVerificationNeeded)
9875                 {
9876                     Verify(lclNum < info.compILargsCount, "bad arg num");
9877                 }
9878
9879                 if (compIsForInlining())
9880                 {
9881                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9882                     noway_assert(op1->gtOper == GT_LCL_VAR);
9883                     lclNum = op1->AsLclVar()->gtLclNum;
9884
9885                     goto VAR_ST_VALID;
9886                 }
9887
9888                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9889                 assertImp(lclNum < numArgs);
9890
9891                 if (lclNum == info.compThisArg)
9892                 {
9893                     lclNum = lvaArg0Var;
9894                 }
9895                 lvaTable[lclNum].lvArgWrite = 1;
9896
9897                 if (tiVerificationNeeded)
9898                 {
9899                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9900                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9901                            "type mismatch");
9902
9903                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9904                     {
9905                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9906                     }
9907                 }
9908
9909                 goto VAR_ST;
9910
9911             case CEE_STLOC:
9912                 lclNum = getU2LittleEndian(codeAddr);
9913                 JITDUMP(" %u", lclNum);
9914                 goto LOC_ST;
9915
9916             case CEE_STLOC_S:
9917                 lclNum = getU1LittleEndian(codeAddr);
9918                 JITDUMP(" %u", lclNum);
9919                 goto LOC_ST;
9920
9921             case CEE_STLOC_0:
9922             case CEE_STLOC_1:
9923             case CEE_STLOC_2:
9924             case CEE_STLOC_3:
9925                 lclNum = (opcode - CEE_STLOC_0);
9926                 assert(lclNum >= 0 && lclNum < 4);
9927
9928             LOC_ST:
9929                 if (tiVerificationNeeded)
9930                 {
9931                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9932                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9933                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9934                            "type mismatch");
9935                 }
9936
9937                 if (compIsForInlining())
9938                 {
9939                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9940
9941                     /* Have we allocated a temp for this local? */
9942
9943                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9944
9945                     goto _PopValue;
9946                 }
9947
9948                 lclNum += numArgs;
9949
9950             VAR_ST:
9951
9952                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9953                 {
9954                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9955                     BADCODE("Bad IL");
9956                 }
9957
9958             VAR_ST_VALID:
9959
9960                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9961                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9962
9963                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9964                 {
9965                     lclTyp = lvaGetRealType(lclNum);
9966                 }
9967                 else
9968                 {
9969                     lclTyp = lvaGetActualType(lclNum);
9970                 }
9971
9972             _PopValue:
9973                 /* Pop the value being assigned */
9974
9975                 {
9976                     StackEntry se = impPopStack(clsHnd);
9977                     op1           = se.val;
9978                     tiRetVal      = se.seTypeInfo;
9979                 }
9980
9981 #ifdef FEATURE_SIMD
9982                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9983                 {
9984                     assert(op1->TypeGet() == TYP_STRUCT);
9985                     op1->gtType = lclTyp;
9986                 }
9987 #endif // FEATURE_SIMD
9988
9989                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9990
9991 #ifdef _TARGET_64BIT_
9992                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9993                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9994                 {
9995                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9996                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9997                 }
9998 #endif // _TARGET_64BIT_
9999
10000                 // We had better assign it a value of the correct type
10001                 assertImp(
10002                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10003                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10004                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10005                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10006                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10007                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10008
10009                 /* If op1 is "&var" then its type is the transient "*" and it can
10010                    be used either as TYP_BYREF or TYP_I_IMPL */
10011
10012                 if (op1->IsVarAddr())
10013                 {
10014                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10015
10016                     /* When "&var" is created, we assume it is a byref. If it is
10017                        being assigned to a TYP_I_IMPL var, change the type to
10018                        prevent unnecessary GC info */
10019
10020                     if (genActualType(lclTyp) == TYP_I_IMPL)
10021                     {
10022                         op1->gtType = TYP_I_IMPL;
10023                     }
10024                 }
10025
10026                 /* Filter out simple assignments to itself */
10027
10028                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10029                 {
10030                     if (insertLdloc)
10031                     {
10032                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
10033                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
10034                         CLANG_FORMAT_COMMENT_ANCHOR;
10035
10036 #ifdef DEBUG
10037                         if (tiVerificationNeeded)
10038                         {
10039                             assert(
10040                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10041                         }
10042 #endif
10043
10044                         op1         = nullptr;
10045                         insertLdloc = false;
10046
10047                         impLoadVar(lclNum, opcodeOffs + sz + 1);
10048                         break;
10049                     }
10050                     else if (opts.compDbgCode)
10051                     {
10052                         op1 = gtNewNothingNode();
10053                         goto SPILL_APPEND;
10054                     }
10055                     else
10056                     {
10057                         break;
10058                     }
10059                 }
10060
10061                 /* Create the assignment node */
10062
10063                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10064
10065                 /* If the local is aliased, we need to spill calls and
10066                    indirections from the stack. */
10067
10068                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10069                     verCurrentState.esStackDepth > 0)
10070                 {
10071                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10072                 }
10073
10074                 /* Spill any refs to the local from the stack */
10075
10076                 impSpillLclRefs(lclNum);
10077
10078 #if !FEATURE_X87_DOUBLES
10079                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10080                 // We insert a cast to the dest 'op2' type
10081                 //
10082                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10083                     varTypeIsFloating(op2->gtType))
10084                 {
10085                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10086                 }
10087 #endif // !FEATURE_X87_DOUBLES
10088
10089                 if (varTypeIsStruct(lclTyp))
10090                 {
10091                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10092                 }
10093                 else
10094                 {
10095                     // The code generator generates GC tracking information
10096                     // based on the RHS of the assignment.  Later the LHS (which is
10097                     // is a BYREF) gets used and the emitter checks that that variable
10098                     // is being tracked.  It is not (since the RHS was an int and did
10099                     // not need tracking).  To keep this assert happy, we change the RHS
10100                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10101                     {
10102                         op1->gtType = TYP_BYREF;
10103                     }
10104                     op1 = gtNewAssignNode(op2, op1);
10105                 }
10106
10107                 /* If insertLdloc is true, then we need to insert a ldloc following the
10108                    stloc.  This is done when converting a (dup, stloc) sequence into
10109                    a (stloc, ldloc) sequence. */
10110
10111                 if (insertLdloc)
10112                 {
10113                     // From SPILL_APPEND
10114                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10115
10116 #ifdef DEBUG
10117                     // From DONE_APPEND
10118                     impNoteLastILoffs();
10119 #endif
10120                     op1         = nullptr;
10121                     insertLdloc = false;
10122
10123                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10124                     break;
10125                 }
10126
10127                 goto SPILL_APPEND;
10128
10129             case CEE_LDLOCA:
10130                 lclNum = getU2LittleEndian(codeAddr);
10131                 goto LDLOCA;
10132
10133             case CEE_LDLOCA_S:
10134                 lclNum = getU1LittleEndian(codeAddr);
10135             LDLOCA:
10136                 JITDUMP(" %u", lclNum);
10137                 if (tiVerificationNeeded)
10138                 {
10139                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10140                     Verify(info.compInitMem, "initLocals not set");
10141                 }
10142
10143                 if (compIsForInlining())
10144                 {
10145                     // Get the local type
10146                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10147
10148                     /* Have we allocated a temp for this local? */
10149
10150                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10151
10152                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10153
10154                     goto _PUSH_ADRVAR;
10155                 }
10156
10157                 lclNum += numArgs;
10158                 assertImp(lclNum < info.compLocalsCount);
10159                 goto ADRVAR;
10160
10161             case CEE_LDARGA:
10162                 lclNum = getU2LittleEndian(codeAddr);
10163                 goto LDARGA;
10164
10165             case CEE_LDARGA_S:
10166                 lclNum = getU1LittleEndian(codeAddr);
10167             LDARGA:
10168                 JITDUMP(" %u", lclNum);
10169                 Verify(lclNum < info.compILargsCount, "bad arg num");
10170
10171                 if (compIsForInlining())
10172                 {
10173                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10174                     // followed by a ldfld to load the field.
10175
10176                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10177                     if (op1->gtOper != GT_LCL_VAR)
10178                     {
10179                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10180                         return;
10181                     }
10182
10183                     assert(op1->gtOper == GT_LCL_VAR);
10184
10185                     goto _PUSH_ADRVAR;
10186                 }
10187
10188                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10189                 assertImp(lclNum < numArgs);
10190
10191                 if (lclNum == info.compThisArg)
10192                 {
10193                     lclNum = lvaArg0Var;
10194                 }
10195
10196                 goto ADRVAR;
10197
10198             ADRVAR:
10199
10200                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10201
10202             _PUSH_ADRVAR:
10203                 assert(op1->gtOper == GT_LCL_VAR);
10204
10205                 /* Note that this is supposed to create the transient type "*"
10206                    which may be used as a TYP_I_IMPL. However we catch places
10207                    where it is used as a TYP_I_IMPL and change the node if needed.
10208                    Thus we are pessimistic and may report byrefs in the GC info
10209                    where it was not absolutely needed, but it is safer this way.
10210                  */
10211                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10212
10213                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10214                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10215
10216                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10217                 if (tiVerificationNeeded)
10218                 {
10219                     // Don't allow taking address of uninit this ptr.
10220                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10221                     {
10222                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10223                     }
10224
10225                     if (!tiRetVal.IsByRef())
10226                     {
10227                         tiRetVal.MakeByRef();
10228                     }
10229                     else
10230                     {
10231                         Verify(false, "byref to byref");
10232                     }
10233                 }
10234
10235                 impPushOnStack(op1, tiRetVal);
10236                 break;
10237
10238             case CEE_ARGLIST:
10239
10240                 if (!info.compIsVarArgs)
10241                 {
10242                     BADCODE("arglist in non-vararg method");
10243                 }
10244
10245                 if (tiVerificationNeeded)
10246                 {
10247                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10248                 }
10249                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10250
10251                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10252                    adjusted the arg count cos this is like fetching the last param */
10253                 assertImp(0 < numArgs);
10254                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10255                 lclNum = lvaVarargsHandleArg;
10256                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10257                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10258                 impPushOnStack(op1, tiRetVal);
10259                 break;
10260
10261             case CEE_ENDFINALLY:
10262
10263                 if (compIsForInlining())
10264                 {
10265                     assert(!"Shouldn't have exception handlers in the inliner!");
10266                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10267                     return;
10268                 }
10269
10270                 if (verCurrentState.esStackDepth > 0)
10271                 {
10272                     impEvalSideEffects();
10273                 }
10274
10275                 if (info.compXcptnsCount == 0)
10276                 {
10277                     BADCODE("endfinally outside finally");
10278                 }
10279
10280                 assert(verCurrentState.esStackDepth == 0);
10281
10282                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10283                 goto APPEND;
10284
10285             case CEE_ENDFILTER:
10286
10287                 if (compIsForInlining())
10288                 {
10289                     assert(!"Shouldn't have exception handlers in the inliner!");
10290                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10291                     return;
10292                 }
10293
10294                 block->bbSetRunRarely(); // filters are rare
10295
10296                 if (info.compXcptnsCount == 0)
10297                 {
10298                     BADCODE("endfilter outside filter");
10299                 }
10300
10301                 if (tiVerificationNeeded)
10302                 {
10303                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10304                 }
10305
10306                 op1 = impPopStack().val;
10307                 assertImp(op1->gtType == TYP_INT);
10308                 if (!bbInFilterILRange(block))
10309                 {
10310                     BADCODE("EndFilter outside a filter handler");
10311                 }
10312
10313                 /* Mark current bb as end of filter */
10314
10315                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10316                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10317
10318                 /* Mark catch handler as successor */
10319
10320                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10321                 if (verCurrentState.esStackDepth != 0)
10322                 {
10323                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10324                                                 DEBUGARG(__LINE__));
10325                 }
10326                 goto APPEND;
10327
10328             case CEE_RET:
10329                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10330             RET:
10331                 if (!impReturnInstruction(block, prefixFlags, opcode))
10332                 {
10333                     return; // abort
10334                 }
10335                 else
10336                 {
10337                     break;
10338                 }
10339
10340             case CEE_JMP:
10341
10342                 assert(!compIsForInlining());
10343
10344                 if (tiVerificationNeeded)
10345                 {
10346                     Verify(false, "Invalid opcode: CEE_JMP");
10347                 }
10348
10349                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10350                 {
10351                     /* CEE_JMP does not make sense in some "protected" regions. */
10352
10353                     BADCODE("Jmp not allowed in protected region");
10354                 }
10355
10356                 if (verCurrentState.esStackDepth != 0)
10357                 {
10358                     BADCODE("Stack must be empty after CEE_JMPs");
10359                 }
10360
10361                 _impResolveToken(CORINFO_TOKENKIND_Method);
10362
10363                 JITDUMP(" %08X", resolvedToken.token);
10364
10365                 /* The signature of the target has to be identical to ours.
10366                    At least check that argCnt and returnType match */
10367
10368                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10369                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10370                     sig.retType != info.compMethodInfo->args.retType ||
10371                     sig.callConv != info.compMethodInfo->args.callConv)
10372                 {
10373                     BADCODE("Incompatible target for CEE_JMPs");
10374                 }
10375
10376 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10377
10378                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10379
10380                 /* Mark the basic block as being a JUMP instead of RETURN */
10381
10382                 block->bbFlags |= BBF_HAS_JMP;
10383
10384                 /* Set this flag to make sure register arguments have a location assigned
10385                  * even if we don't use them inside the method */
10386
10387                 compJmpOpUsed = true;
10388
10389                 fgNoStructPromotion = true;
10390
10391                 goto APPEND;
10392
10393 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10394
10395                 // Import this just like a series of LDARGs + tail. + call + ret
10396
10397                 if (info.compIsVarArgs)
10398                 {
10399                     // For now we don't implement true tail calls, so this breaks varargs.
10400                     // So warn the user instead of generating bad code.
10401                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10402                     // implement true tail calls.
10403                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10404                 }
10405
10406                 // First load up the arguments (0 - N)
10407                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10408                 {
10409                     impLoadArg(argNum, opcodeOffs + sz + 1);
10410                 }
10411
10412                 // Now generate the tail call
10413                 noway_assert(prefixFlags == 0);
10414                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10415                 opcode      = CEE_CALL;
10416
10417                 eeGetCallInfo(&resolvedToken, NULL,
10418                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10419
10420                 // All calls and delegates need a security callout.
10421                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10422
10423                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10424                                         opcodeOffs);
10425
10426                 // And finish with the ret
10427                 goto RET;
10428
10429 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10430
10431             case CEE_LDELEMA:
10432                 assertImp(sz == sizeof(unsigned));
10433
10434                 _impResolveToken(CORINFO_TOKENKIND_Class);
10435
10436                 JITDUMP(" %08X", resolvedToken.token);
10437
10438                 ldelemClsHnd = resolvedToken.hClass;
10439
10440                 if (tiVerificationNeeded)
10441                 {
10442                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10443                     typeInfo tiIndex = impStackTop().seTypeInfo;
10444
10445                     // As per ECMA 'index' specified can be either int32 or native int.
10446                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10447
10448                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10449                     Verify(tiArray.IsNullObjRef() ||
10450                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10451                            "bad array");
10452
10453                     tiRetVal = arrayElemType;
10454                     tiRetVal.MakeByRef();
10455                     if (prefixFlags & PREFIX_READONLY)
10456                     {
10457                         tiRetVal.SetIsReadonlyByRef();
10458                     }
10459
10460                     // an array interior pointer is always in the heap
10461                     tiRetVal.SetIsPermanentHomeByRef();
10462                 }
10463
10464                 // If it's a value class array we just do a simple address-of
10465                 if (eeIsValueClass(ldelemClsHnd))
10466                 {
10467                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10468                     if (cit == CORINFO_TYPE_UNDEF)
10469                     {
10470                         lclTyp = TYP_STRUCT;
10471                     }
10472                     else
10473                     {
10474                         lclTyp = JITtype2varType(cit);
10475                     }
10476                     goto ARR_LD_POST_VERIFY;
10477                 }
10478
10479                 // Similarly, if its a readonly access, we can do a simple address-of
10480                 // without doing a runtime type-check
10481                 if (prefixFlags & PREFIX_READONLY)
10482                 {
10483                     lclTyp = TYP_REF;
10484                     goto ARR_LD_POST_VERIFY;
10485                 }
10486
10487                 // Otherwise we need the full helper function with run-time type check
10488                 op1 = impTokenToHandle(&resolvedToken);
10489                 if (op1 == nullptr)
10490                 { // compDonotInline()
10491                     return;
10492                 }
10493
10494                 args = gtNewArgList(op1);                      // Type
10495                 args = gtNewListNode(impPopStack().val, args); // index
10496                 args = gtNewListNode(impPopStack().val, args); // array
10497                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10498
10499                 impPushOnStack(op1, tiRetVal);
10500                 break;
10501
10502             // ldelem for reference and value types
10503             case CEE_LDELEM:
10504                 assertImp(sz == sizeof(unsigned));
10505
10506                 _impResolveToken(CORINFO_TOKENKIND_Class);
10507
10508                 JITDUMP(" %08X", resolvedToken.token);
10509
10510                 ldelemClsHnd = resolvedToken.hClass;
10511
10512                 if (tiVerificationNeeded)
10513                 {
10514                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10515                     typeInfo tiIndex = impStackTop().seTypeInfo;
10516
10517                     // As per ECMA 'index' specified can be either int32 or native int.
10518                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10519                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10520
10521                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10522                            "type of array incompatible with type operand");
10523                     tiRetVal.NormaliseForStack();
10524                 }
10525
10526                 // If it's a reference type or generic variable type
10527                 // then just generate code as though it's a ldelem.ref instruction
10528                 if (!eeIsValueClass(ldelemClsHnd))
10529                 {
10530                     lclTyp = TYP_REF;
10531                     opcode = CEE_LDELEM_REF;
10532                 }
10533                 else
10534                 {
10535                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10536                     lclTyp             = JITtype2varType(jitTyp);
10537                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10538                     tiRetVal.NormaliseForStack();
10539                 }
10540                 goto ARR_LD_POST_VERIFY;
10541
10542             case CEE_LDELEM_I1:
10543                 lclTyp = TYP_BYTE;
10544                 goto ARR_LD;
10545             case CEE_LDELEM_I2:
10546                 lclTyp = TYP_SHORT;
10547                 goto ARR_LD;
10548             case CEE_LDELEM_I:
10549                 lclTyp = TYP_I_IMPL;
10550                 goto ARR_LD;
10551
10552             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10553             // and treating it as TYP_INT avoids other asserts.
10554             case CEE_LDELEM_U4:
10555                 lclTyp = TYP_INT;
10556                 goto ARR_LD;
10557
10558             case CEE_LDELEM_I4:
10559                 lclTyp = TYP_INT;
10560                 goto ARR_LD;
10561             case CEE_LDELEM_I8:
10562                 lclTyp = TYP_LONG;
10563                 goto ARR_LD;
10564             case CEE_LDELEM_REF:
10565                 lclTyp = TYP_REF;
10566                 goto ARR_LD;
10567             case CEE_LDELEM_R4:
10568                 lclTyp = TYP_FLOAT;
10569                 goto ARR_LD;
10570             case CEE_LDELEM_R8:
10571                 lclTyp = TYP_DOUBLE;
10572                 goto ARR_LD;
10573             case CEE_LDELEM_U1:
10574                 lclTyp = TYP_UBYTE;
10575                 goto ARR_LD;
10576             case CEE_LDELEM_U2:
10577                 lclTyp = TYP_CHAR;
10578                 goto ARR_LD;
10579
10580             ARR_LD:
10581
10582                 if (tiVerificationNeeded)
10583                 {
10584                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10585                     typeInfo tiIndex = impStackTop().seTypeInfo;
10586
10587                     // As per ECMA 'index' specified can be either int32 or native int.
10588                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10589                     if (tiArray.IsNullObjRef())
10590                     {
10591                         if (lclTyp == TYP_REF)
10592                         { // we will say a deref of a null array yields a null ref
10593                             tiRetVal = typeInfo(TI_NULL);
10594                         }
10595                         else
10596                         {
10597                             tiRetVal = typeInfo(lclTyp);
10598                         }
10599                     }
10600                     else
10601                     {
10602                         tiRetVal             = verGetArrayElemType(tiArray);
10603                         typeInfo arrayElemTi = typeInfo(lclTyp);
10604 #ifdef _TARGET_64BIT_
10605                         if (opcode == CEE_LDELEM_I)
10606                         {
10607                             arrayElemTi = typeInfo::nativeInt();
10608                         }
10609
10610                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10611                         {
10612                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10613                         }
10614                         else
10615 #endif // _TARGET_64BIT_
10616                         {
10617                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10618                         }
10619                     }
10620                     tiRetVal.NormaliseForStack();
10621                 }
10622             ARR_LD_POST_VERIFY:
10623
10624                 /* Pull the index value and array address */
10625                 op2 = impPopStack().val;
10626                 op1 = impPopStack().val;
10627                 assertImp(op1->gtType == TYP_REF);
10628
10629                 /* Check for null pointer - in the inliner case we simply abort */
10630
10631                 if (compIsForInlining())
10632                 {
10633                     if (op1->gtOper == GT_CNS_INT)
10634                     {
10635                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10636                         return;
10637                     }
10638                 }
10639
10640                 op1 = impCheckForNullPointer(op1);
10641
10642                 /* Mark the block as containing an index expression */
10643
10644                 if (op1->gtOper == GT_LCL_VAR)
10645                 {
10646                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10647                     {
10648                         block->bbFlags |= BBF_HAS_IDX_LEN;
10649                         optMethodFlags |= OMF_HAS_ARRAYREF;
10650                     }
10651                 }
10652
10653                 /* Create the index node and push it on the stack */
10654
10655                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10656
10657                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10658
10659                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10660                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10661                 {
10662                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10663
10664                     // remember the element size
10665                     if (lclTyp == TYP_REF)
10666                     {
10667                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10668                     }
10669                     else
10670                     {
10671                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10672                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10673                         {
10674                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10675                         }
10676                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10677                         if (lclTyp == TYP_STRUCT)
10678                         {
10679                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10680                             op1->gtIndex.gtIndElemSize = size;
10681                             op1->gtType                = lclTyp;
10682                         }
10683                     }
10684
10685                     if ((opcode == CEE_LDELEMA) || ldstruct)
10686                     {
10687                         // wrap it in a &
10688                         lclTyp = TYP_BYREF;
10689
10690                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10691                     }
10692                     else
10693                     {
10694                         assert(lclTyp != TYP_STRUCT);
10695                     }
10696                 }
10697
10698                 if (ldstruct)
10699                 {
10700                     // Create an OBJ for the result
10701                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10702                     op1->gtFlags |= GTF_EXCEPT;
10703                 }
10704                 impPushOnStack(op1, tiRetVal);
10705                 break;
10706
10707             // stelem for reference and value types
10708             case CEE_STELEM:
10709
10710                 assertImp(sz == sizeof(unsigned));
10711
10712                 _impResolveToken(CORINFO_TOKENKIND_Class);
10713
10714                 JITDUMP(" %08X", resolvedToken.token);
10715
10716                 stelemClsHnd = resolvedToken.hClass;
10717
10718                 if (tiVerificationNeeded)
10719                 {
10720                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10721                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10722                     typeInfo tiValue = impStackTop().seTypeInfo;
10723
10724                     // As per ECMA 'index' specified can be either int32 or native int.
10725                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10726                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10727
10728                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10729                            "type operand incompatible with array element type");
10730                     arrayElem.NormaliseForStack();
10731                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10732                 }
10733
10734                 // If it's a reference type just behave as though it's a stelem.ref instruction
10735                 if (!eeIsValueClass(stelemClsHnd))
10736                 {
10737                     goto STELEM_REF_POST_VERIFY;
10738                 }
10739
10740                 // Otherwise extract the type
10741                 {
10742                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10743                     lclTyp             = JITtype2varType(jitTyp);
10744                     goto ARR_ST_POST_VERIFY;
10745                 }
10746
10747             case CEE_STELEM_REF:
10748
10749                 if (tiVerificationNeeded)
10750                 {
10751                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10752                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10753                     typeInfo tiValue = impStackTop().seTypeInfo;
10754
10755                     // As per ECMA 'index' specified can be either int32 or native int.
10756                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10757                     Verify(tiValue.IsObjRef(), "bad value");
10758
10759                     // we only check that it is an object referece, The helper does additional checks
10760                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10761                 }
10762
10763                 arrayNodeTo      = impStackTop(2).val;
10764                 arrayNodeToIndex = impStackTop(1).val;
10765                 arrayNodeFrom    = impStackTop().val;
10766
10767                 //
10768                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10769                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10770                 //
10771
10772                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10773                 // This does not need CORINFO_HELP_ARRADDR_ST
10774
10775                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10776                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10777                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10778                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10779                 {
10780                     lclTyp = TYP_REF;
10781                     goto ARR_ST_POST_VERIFY;
10782                 }
10783
10784                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10785
10786                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10787                 {
10788                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10789
10790                     lclTyp = TYP_REF;
10791                     goto ARR_ST_POST_VERIFY;
10792                 }
10793
10794             STELEM_REF_POST_VERIFY:
10795
10796                 /* Call a helper function to do the assignment */
10797                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10798
10799                 goto SPILL_APPEND;
10800
10801             case CEE_STELEM_I1:
10802                 lclTyp = TYP_BYTE;
10803                 goto ARR_ST;
10804             case CEE_STELEM_I2:
10805                 lclTyp = TYP_SHORT;
10806                 goto ARR_ST;
10807             case CEE_STELEM_I:
10808                 lclTyp = TYP_I_IMPL;
10809                 goto ARR_ST;
10810             case CEE_STELEM_I4:
10811                 lclTyp = TYP_INT;
10812                 goto ARR_ST;
10813             case CEE_STELEM_I8:
10814                 lclTyp = TYP_LONG;
10815                 goto ARR_ST;
10816             case CEE_STELEM_R4:
10817                 lclTyp = TYP_FLOAT;
10818                 goto ARR_ST;
10819             case CEE_STELEM_R8:
10820                 lclTyp = TYP_DOUBLE;
10821                 goto ARR_ST;
10822
10823             ARR_ST:
10824
10825                 if (tiVerificationNeeded)
10826                 {
10827                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10828                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10829                     typeInfo tiValue = impStackTop().seTypeInfo;
10830
10831                     // As per ECMA 'index' specified can be either int32 or native int.
10832                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10833                     typeInfo arrayElem = typeInfo(lclTyp);
10834 #ifdef _TARGET_64BIT_
10835                     if (opcode == CEE_STELEM_I)
10836                     {
10837                         arrayElem = typeInfo::nativeInt();
10838                     }
10839 #endif // _TARGET_64BIT_
10840                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10841                            "bad array");
10842
10843                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10844                            "bad value");
10845                 }
10846
10847             ARR_ST_POST_VERIFY:
10848                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10849                    range-check, and then assignment. However, codegen currently
10850                    does the range-check before evaluation the RHS-operands. So to
10851                    maintain strict ordering, we spill the stack. */
10852
10853                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10854                 {
10855                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10856                                                    "Strict ordering of exceptions for Array store"));
10857                 }
10858
10859                 /* Pull the new value from the stack */
10860                 op2 = impPopStack().val;
10861
10862                 /* Pull the index value */
10863                 op1 = impPopStack().val;
10864
10865                 /* Pull the array address */
10866                 op3 = impPopStack().val;
10867
10868                 assertImp(op3->gtType == TYP_REF);
10869                 if (op2->IsVarAddr())
10870                 {
10871                     op2->gtType = TYP_I_IMPL;
10872                 }
10873
10874                 op3 = impCheckForNullPointer(op3);
10875
10876                 // Mark the block as containing an index expression
10877
10878                 if (op3->gtOper == GT_LCL_VAR)
10879                 {
10880                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10881                     {
10882                         block->bbFlags |= BBF_HAS_IDX_LEN;
10883                         optMethodFlags |= OMF_HAS_ARRAYREF;
10884                     }
10885                 }
10886
10887                 /* Create the index node */
10888
10889                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10890
10891                 /* Create the assignment node and append it */
10892
10893                 if (lclTyp == TYP_STRUCT)
10894                 {
10895                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10896
10897                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10898                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10899                 }
10900                 if (varTypeIsStruct(op1))
10901                 {
10902                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10903                 }
10904                 else
10905                 {
10906                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10907                     op1 = gtNewAssignNode(op1, op2);
10908                 }
10909
10910                 /* Mark the expression as containing an assignment */
10911
10912                 op1->gtFlags |= GTF_ASG;
10913
10914                 goto SPILL_APPEND;
10915
10916             case CEE_ADD:
10917                 oper = GT_ADD;
10918                 goto MATH_OP2;
10919
10920             case CEE_ADD_OVF:
10921                 uns = false;
10922                 goto ADD_OVF;
10923             case CEE_ADD_OVF_UN:
10924                 uns = true;
10925                 goto ADD_OVF;
10926
10927             ADD_OVF:
10928                 ovfl     = true;
10929                 callNode = false;
10930                 oper     = GT_ADD;
10931                 goto MATH_OP2_FLAGS;
10932
10933             case CEE_SUB:
10934                 oper = GT_SUB;
10935                 goto MATH_OP2;
10936
10937             case CEE_SUB_OVF:
10938                 uns = false;
10939                 goto SUB_OVF;
10940             case CEE_SUB_OVF_UN:
10941                 uns = true;
10942                 goto SUB_OVF;
10943
10944             SUB_OVF:
10945                 ovfl     = true;
10946                 callNode = false;
10947                 oper     = GT_SUB;
10948                 goto MATH_OP2_FLAGS;
10949
10950             case CEE_MUL:
10951                 oper = GT_MUL;
10952                 goto MATH_MAYBE_CALL_NO_OVF;
10953
10954             case CEE_MUL_OVF:
10955                 uns = false;
10956                 goto MUL_OVF;
10957             case CEE_MUL_OVF_UN:
10958                 uns = true;
10959                 goto MUL_OVF;
10960
10961             MUL_OVF:
10962                 ovfl = true;
10963                 oper = GT_MUL;
10964                 goto MATH_MAYBE_CALL_OVF;
10965
10966             // Other binary math operations
10967
10968             case CEE_DIV:
10969                 oper = GT_DIV;
10970                 goto MATH_MAYBE_CALL_NO_OVF;
10971
10972             case CEE_DIV_UN:
10973                 oper = GT_UDIV;
10974                 goto MATH_MAYBE_CALL_NO_OVF;
10975
10976             case CEE_REM:
10977                 oper = GT_MOD;
10978                 goto MATH_MAYBE_CALL_NO_OVF;
10979
10980             case CEE_REM_UN:
10981                 oper = GT_UMOD;
10982                 goto MATH_MAYBE_CALL_NO_OVF;
10983
10984             MATH_MAYBE_CALL_NO_OVF:
10985                 ovfl = false;
10986             MATH_MAYBE_CALL_OVF:
10987                 // Morpher has some complex logic about when to turn different
10988                 // typed nodes on different platforms into helper calls. We
10989                 // need to either duplicate that logic here, or just
10990                 // pessimistically make all the nodes large enough to become
10991                 // call nodes.  Since call nodes aren't that much larger and
10992                 // these opcodes are infrequent enough I chose the latter.
10993                 callNode = true;
10994                 goto MATH_OP2_FLAGS;
10995
10996             case CEE_AND:
10997                 oper = GT_AND;
10998                 goto MATH_OP2;
10999             case CEE_OR:
11000                 oper = GT_OR;
11001                 goto MATH_OP2;
11002             case CEE_XOR:
11003                 oper = GT_XOR;
11004                 goto MATH_OP2;
11005
11006             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11007
11008                 ovfl     = false;
11009                 callNode = false;
11010
11011             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11012
11013                 /* Pull two values and push back the result */
11014
11015                 if (tiVerificationNeeded)
11016                 {
11017                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11018                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11019
11020                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11021                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11022                     {
11023                         Verify(tiOp1.IsNumberType(), "not number");
11024                     }
11025                     else
11026                     {
11027                         Verify(tiOp1.IsIntegerType(), "not integer");
11028                     }
11029
11030                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11031
11032                     tiRetVal = tiOp1;
11033
11034 #ifdef _TARGET_64BIT_
11035                     if (tiOp2.IsNativeIntType())
11036                     {
11037                         tiRetVal = tiOp2;
11038                     }
11039 #endif // _TARGET_64BIT_
11040                 }
11041
11042                 op2 = impPopStack().val;
11043                 op1 = impPopStack().val;
11044
11045 #if !CPU_HAS_FP_SUPPORT
11046                 if (varTypeIsFloating(op1->gtType))
11047                 {
11048                     callNode = true;
11049                 }
11050 #endif
11051                 /* Can't do arithmetic with references */
11052                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11053
11054                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11055                 // if it is in the stack)
11056                 impBashVarAddrsToI(op1, op2);
11057
11058                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11059
11060                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11061
11062                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11063
11064                 if (op2->gtOper == GT_CNS_INT)
11065                 {
11066                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11067                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11068
11069                     {
11070                         impPushOnStack(op1, tiRetVal);
11071                         break;
11072                     }
11073                 }
11074
11075 #if !FEATURE_X87_DOUBLES
11076                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11077                 //
11078                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11079                 {
11080                     if (op1->TypeGet() != type)
11081                     {
11082                         // We insert a cast of op1 to 'type'
11083                         op1 = gtNewCastNode(type, op1, type);
11084                     }
11085                     if (op2->TypeGet() != type)
11086                     {
11087                         // We insert a cast of op2 to 'type'
11088                         op2 = gtNewCastNode(type, op2, type);
11089                     }
11090                 }
11091 #endif // !FEATURE_X87_DOUBLES
11092
11093 #if SMALL_TREE_NODES
11094                 if (callNode)
11095                 {
11096                     /* These operators can later be transformed into 'GT_CALL' */
11097
11098                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11099 #ifndef _TARGET_ARM_
11100                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11101                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11102                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11103                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11104 #endif
11105                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11106                     // that we'll need to transform into a general large node, but rather specifically
11107                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11108                     // and a CALL is no longer the largest.
11109                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11110                     // than an "if".
11111                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11112                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11113                 }
11114                 else
11115 #endif // SMALL_TREE_NODES
11116                 {
11117                     op1 = gtNewOperNode(oper, type, op1, op2);
11118                 }
11119
11120                 /* Special case: integer/long division may throw an exception */
11121
11122                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11123                 {
11124                     op1->gtFlags |= GTF_EXCEPT;
11125                 }
11126
11127                 if (ovfl)
11128                 {
11129                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11130                     if (ovflType != TYP_UNKNOWN)
11131                     {
11132                         op1->gtType = ovflType;
11133                     }
11134                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11135                     if (uns)
11136                     {
11137                         op1->gtFlags |= GTF_UNSIGNED;
11138                     }
11139                 }
11140
11141                 impPushOnStack(op1, tiRetVal);
11142                 break;
11143
11144             case CEE_SHL:
11145                 oper = GT_LSH;
11146                 goto CEE_SH_OP2;
11147
11148             case CEE_SHR:
11149                 oper = GT_RSH;
11150                 goto CEE_SH_OP2;
11151             case CEE_SHR_UN:
11152                 oper = GT_RSZ;
11153                 goto CEE_SH_OP2;
11154
11155             CEE_SH_OP2:
11156                 if (tiVerificationNeeded)
11157                 {
11158                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11159                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11160                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11161                     tiRetVal = tiVal;
11162                 }
11163                 op2 = impPopStack().val;
11164                 op1 = impPopStack().val; // operand to be shifted
11165                 impBashVarAddrsToI(op1, op2);
11166
11167                 type = genActualType(op1->TypeGet());
11168                 op1  = gtNewOperNode(oper, type, op1, op2);
11169
11170                 impPushOnStack(op1, tiRetVal);
11171                 break;
11172
11173             case CEE_NOT:
11174                 if (tiVerificationNeeded)
11175                 {
11176                     tiRetVal = impStackTop().seTypeInfo;
11177                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11178                 }
11179
11180                 op1 = impPopStack().val;
11181                 impBashVarAddrsToI(op1, nullptr);
11182                 type = genActualType(op1->TypeGet());
11183                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11184                 break;
11185
11186             case CEE_CKFINITE:
11187                 if (tiVerificationNeeded)
11188                 {
11189                     tiRetVal = impStackTop().seTypeInfo;
11190                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11191                 }
11192                 op1  = impPopStack().val;
11193                 type = op1->TypeGet();
11194                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11195                 op1->gtFlags |= GTF_EXCEPT;
11196
11197                 impPushOnStack(op1, tiRetVal);
11198                 break;
11199
11200             case CEE_LEAVE:
11201
11202                 val     = getI4LittleEndian(codeAddr); // jump distance
11203                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11204                 goto LEAVE;
11205
11206             case CEE_LEAVE_S:
11207                 val     = getI1LittleEndian(codeAddr); // jump distance
11208                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11209
11210             LEAVE:
11211
11212                 if (compIsForInlining())
11213                 {
11214                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11215                     return;
11216                 }
11217
11218                 JITDUMP(" %04X", jmpAddr);
11219                 if (block->bbJumpKind != BBJ_LEAVE)
11220                 {
11221                     impResetLeaveBlock(block, jmpAddr);
11222                 }
11223
11224                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11225                 impImportLeave(block);
11226                 impNoteBranchOffs();
11227
11228                 break;
11229
11230             case CEE_BR:
11231             case CEE_BR_S:
11232                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11233
11234                 if (compIsForInlining() && jmpDist == 0)
11235                 {
11236                     break; /* NOP */
11237                 }
11238
11239                 impNoteBranchOffs();
11240                 break;
11241
11242             case CEE_BRTRUE:
11243             case CEE_BRTRUE_S:
11244             case CEE_BRFALSE:
11245             case CEE_BRFALSE_S:
11246
11247                 /* Pop the comparand (now there's a neat term) from the stack */
11248                 if (tiVerificationNeeded)
11249                 {
11250                     typeInfo& tiVal = impStackTop().seTypeInfo;
11251                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11252                            "bad value");
11253                 }
11254
11255                 op1  = impPopStack().val;
11256                 type = op1->TypeGet();
11257
11258                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11259                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11260                 {
11261                     block->bbJumpKind = BBJ_NONE;
11262
11263                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11264                     {
11265                         op1 = gtUnusedValNode(op1);
11266                         goto SPILL_APPEND;
11267                     }
11268                     else
11269                     {
11270                         break;
11271                     }
11272                 }
11273
11274                 if (op1->OperIsCompare())
11275                 {
11276                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11277                     {
11278                         // Flip the sense of the compare
11279
11280                         op1 = gtReverseCond(op1);
11281                     }
11282                 }
11283                 else
11284                 {
11285                     /* We'll compare against an equally-sized integer 0 */
11286                     /* For small types, we always compare against int   */
11287                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11288
11289                     /* Create the comparison operator and try to fold it */
11290
11291                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11292                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11293                 }
11294
11295             // fall through
11296
11297             COND_JUMP:
11298
11299                 /* Fold comparison if we can */
11300
11301                 op1 = gtFoldExpr(op1);
11302
11303                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11304                 /* Don't make any blocks unreachable in import only mode */
11305
11306                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11307                 {
11308                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11309                        unreachable under compDbgCode */
11310                     assert(!opts.compDbgCode);
11311
11312                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11313                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11314                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11315                                                                          // block for the second time
11316
11317                     block->bbJumpKind = foldedJumpKind;
11318 #ifdef DEBUG
11319                     if (verbose)
11320                     {
11321                         if (op1->gtIntCon.gtIconVal)
11322                         {
11323                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11324                                    block->bbJumpDest->bbNum);
11325                         }
11326                         else
11327                         {
11328                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11329                         }
11330                     }
11331 #endif
11332                     break;
11333                 }
11334
11335                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11336
11337                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11338                    in impImportBlock(block). For correct line numbers, spill stack. */
11339
11340                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11341                 {
11342                     impSpillStackEnsure(true);
11343                 }
11344
11345                 goto SPILL_APPEND;
11346
11347             case CEE_CEQ:
11348                 oper = GT_EQ;
11349                 uns  = false;
11350                 goto CMP_2_OPs;
11351             case CEE_CGT_UN:
11352                 oper = GT_GT;
11353                 uns  = true;
11354                 goto CMP_2_OPs;
11355             case CEE_CGT:
11356                 oper = GT_GT;
11357                 uns  = false;
11358                 goto CMP_2_OPs;
11359             case CEE_CLT_UN:
11360                 oper = GT_LT;
11361                 uns  = true;
11362                 goto CMP_2_OPs;
11363             case CEE_CLT:
11364                 oper = GT_LT;
11365                 uns  = false;
11366                 goto CMP_2_OPs;
11367
11368             CMP_2_OPs:
11369                 if (tiVerificationNeeded)
11370                 {
11371                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11372                     tiRetVal = typeInfo(TI_INT);
11373                 }
11374
11375                 op2 = impPopStack().val;
11376                 op1 = impPopStack().val;
11377
11378 #ifdef _TARGET_64BIT_
11379                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11380                 {
11381                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11382                 }
11383                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11384                 {
11385                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11386                 }
11387 #endif // _TARGET_64BIT_
11388
11389                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11390                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11391                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11392
11393                 /* Create the comparison node */
11394
11395                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11396
11397                 /* TODO: setting both flags when only one is appropriate */
11398                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11399                 {
11400                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11401                 }
11402
11403                 impPushOnStack(op1, tiRetVal);
11404                 break;
11405
11406             case CEE_BEQ_S:
11407             case CEE_BEQ:
11408                 oper = GT_EQ;
11409                 goto CMP_2_OPs_AND_BR;
11410
11411             case CEE_BGE_S:
11412             case CEE_BGE:
11413                 oper = GT_GE;
11414                 goto CMP_2_OPs_AND_BR;
11415
11416             case CEE_BGE_UN_S:
11417             case CEE_BGE_UN:
11418                 oper = GT_GE;
11419                 goto CMP_2_OPs_AND_BR_UN;
11420
11421             case CEE_BGT_S:
11422             case CEE_BGT:
11423                 oper = GT_GT;
11424                 goto CMP_2_OPs_AND_BR;
11425
11426             case CEE_BGT_UN_S:
11427             case CEE_BGT_UN:
11428                 oper = GT_GT;
11429                 goto CMP_2_OPs_AND_BR_UN;
11430
11431             case CEE_BLE_S:
11432             case CEE_BLE:
11433                 oper = GT_LE;
11434                 goto CMP_2_OPs_AND_BR;
11435
11436             case CEE_BLE_UN_S:
11437             case CEE_BLE_UN:
11438                 oper = GT_LE;
11439                 goto CMP_2_OPs_AND_BR_UN;
11440
11441             case CEE_BLT_S:
11442             case CEE_BLT:
11443                 oper = GT_LT;
11444                 goto CMP_2_OPs_AND_BR;
11445
11446             case CEE_BLT_UN_S:
11447             case CEE_BLT_UN:
11448                 oper = GT_LT;
11449                 goto CMP_2_OPs_AND_BR_UN;
11450
11451             case CEE_BNE_UN_S:
11452             case CEE_BNE_UN:
11453                 oper = GT_NE;
11454                 goto CMP_2_OPs_AND_BR_UN;
11455
11456             CMP_2_OPs_AND_BR_UN:
11457                 uns       = true;
11458                 unordered = true;
11459                 goto CMP_2_OPs_AND_BR_ALL;
11460             CMP_2_OPs_AND_BR:
11461                 uns       = false;
11462                 unordered = false;
11463                 goto CMP_2_OPs_AND_BR_ALL;
11464             CMP_2_OPs_AND_BR_ALL:
11465
11466                 if (tiVerificationNeeded)
11467                 {
11468                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11469                 }
11470
11471                 /* Pull two values */
11472                 op2 = impPopStack().val;
11473                 op1 = impPopStack().val;
11474
11475 #ifdef _TARGET_64BIT_
11476                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11477                 {
11478                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11479                 }
11480                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11481                 {
11482                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11483                 }
11484 #endif // _TARGET_64BIT_
11485
11486                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11487                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11488                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11489
11490                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11491                 {
11492                     block->bbJumpKind = BBJ_NONE;
11493
11494                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11495                     {
11496                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11497                                                        "Branch to next Optimization, op1 side effect"));
11498                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11499                     }
11500                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11501                     {
11502                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11503                                                        "Branch to next Optimization, op2 side effect"));
11504                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11505                     }
11506
11507 #ifdef DEBUG
11508                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11509                     {
11510                         impNoteLastILoffs();
11511                     }
11512 #endif
11513                     break;
11514                 }
11515 #if !FEATURE_X87_DOUBLES
11516                 // We can generate an compare of different sized floating point op1 and op2
11517                 // We insert a cast
11518                 //
11519                 if (varTypeIsFloating(op1->TypeGet()))
11520                 {
11521                     if (op1->TypeGet() != op2->TypeGet())
11522                     {
11523                         assert(varTypeIsFloating(op2->TypeGet()));
11524
11525                         // say op1=double, op2=float. To avoid loss of precision
11526                         // while comparing, op2 is converted to double and double
11527                         // comparison is done.
11528                         if (op1->TypeGet() == TYP_DOUBLE)
11529                         {
11530                             // We insert a cast of op2 to TYP_DOUBLE
11531                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11532                         }
11533                         else if (op2->TypeGet() == TYP_DOUBLE)
11534                         {
11535                             // We insert a cast of op1 to TYP_DOUBLE
11536                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11537                         }
11538                     }
11539                 }
11540 #endif // !FEATURE_X87_DOUBLES
11541
11542                 /* Create and append the operator */
11543
11544                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11545
11546                 if (uns)
11547                 {
11548                     op1->gtFlags |= GTF_UNSIGNED;
11549                 }
11550
11551                 if (unordered)
11552                 {
11553                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11554                 }
11555
11556                 goto COND_JUMP;
11557
11558             case CEE_SWITCH:
11559                 assert(!compIsForInlining());
11560
11561                 if (tiVerificationNeeded)
11562                 {
11563                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11564                 }
11565                 /* Pop the switch value off the stack */
11566                 op1 = impPopStack().val;
11567                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11568
11569 #ifdef _TARGET_64BIT_
11570                 // Widen 'op1' on 64-bit targets
11571                 if (op1->TypeGet() != TYP_I_IMPL)
11572                 {
11573                     if (op1->OperGet() == GT_CNS_INT)
11574                     {
11575                         op1->gtType = TYP_I_IMPL;
11576                     }
11577                     else
11578                     {
11579                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11580                     }
11581                 }
11582 #endif // _TARGET_64BIT_
11583                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11584
11585                 /* We can create a switch node */
11586
11587                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11588
11589                 val = (int)getU4LittleEndian(codeAddr);
11590                 codeAddr += 4 + val * 4; // skip over the switch-table
11591
11592                 goto SPILL_APPEND;
11593
11594             /************************** Casting OPCODES ***************************/
11595
11596             case CEE_CONV_OVF_I1:
11597                 lclTyp = TYP_BYTE;
11598                 goto CONV_OVF;
11599             case CEE_CONV_OVF_I2:
11600                 lclTyp = TYP_SHORT;
11601                 goto CONV_OVF;
11602             case CEE_CONV_OVF_I:
11603                 lclTyp = TYP_I_IMPL;
11604                 goto CONV_OVF;
11605             case CEE_CONV_OVF_I4:
11606                 lclTyp = TYP_INT;
11607                 goto CONV_OVF;
11608             case CEE_CONV_OVF_I8:
11609                 lclTyp = TYP_LONG;
11610                 goto CONV_OVF;
11611
11612             case CEE_CONV_OVF_U1:
11613                 lclTyp = TYP_UBYTE;
11614                 goto CONV_OVF;
11615             case CEE_CONV_OVF_U2:
11616                 lclTyp = TYP_CHAR;
11617                 goto CONV_OVF;
11618             case CEE_CONV_OVF_U:
11619                 lclTyp = TYP_U_IMPL;
11620                 goto CONV_OVF;
11621             case CEE_CONV_OVF_U4:
11622                 lclTyp = TYP_UINT;
11623                 goto CONV_OVF;
11624             case CEE_CONV_OVF_U8:
11625                 lclTyp = TYP_ULONG;
11626                 goto CONV_OVF;
11627
11628             case CEE_CONV_OVF_I1_UN:
11629                 lclTyp = TYP_BYTE;
11630                 goto CONV_OVF_UN;
11631             case CEE_CONV_OVF_I2_UN:
11632                 lclTyp = TYP_SHORT;
11633                 goto CONV_OVF_UN;
11634             case CEE_CONV_OVF_I_UN:
11635                 lclTyp = TYP_I_IMPL;
11636                 goto CONV_OVF_UN;
11637             case CEE_CONV_OVF_I4_UN:
11638                 lclTyp = TYP_INT;
11639                 goto CONV_OVF_UN;
11640             case CEE_CONV_OVF_I8_UN:
11641                 lclTyp = TYP_LONG;
11642                 goto CONV_OVF_UN;
11643
11644             case CEE_CONV_OVF_U1_UN:
11645                 lclTyp = TYP_UBYTE;
11646                 goto CONV_OVF_UN;
11647             case CEE_CONV_OVF_U2_UN:
11648                 lclTyp = TYP_CHAR;
11649                 goto CONV_OVF_UN;
11650             case CEE_CONV_OVF_U_UN:
11651                 lclTyp = TYP_U_IMPL;
11652                 goto CONV_OVF_UN;
11653             case CEE_CONV_OVF_U4_UN:
11654                 lclTyp = TYP_UINT;
11655                 goto CONV_OVF_UN;
11656             case CEE_CONV_OVF_U8_UN:
11657                 lclTyp = TYP_ULONG;
11658                 goto CONV_OVF_UN;
11659
11660             CONV_OVF_UN:
11661                 uns = true;
11662                 goto CONV_OVF_COMMON;
11663             CONV_OVF:
11664                 uns = false;
11665                 goto CONV_OVF_COMMON;
11666
11667             CONV_OVF_COMMON:
11668                 ovfl = true;
11669                 goto _CONV;
11670
11671             case CEE_CONV_I1:
11672                 lclTyp = TYP_BYTE;
11673                 goto CONV;
11674             case CEE_CONV_I2:
11675                 lclTyp = TYP_SHORT;
11676                 goto CONV;
11677             case CEE_CONV_I:
11678                 lclTyp = TYP_I_IMPL;
11679                 goto CONV;
11680             case CEE_CONV_I4:
11681                 lclTyp = TYP_INT;
11682                 goto CONV;
11683             case CEE_CONV_I8:
11684                 lclTyp = TYP_LONG;
11685                 goto CONV;
11686
11687             case CEE_CONV_U1:
11688                 lclTyp = TYP_UBYTE;
11689                 goto CONV;
11690             case CEE_CONV_U2:
11691                 lclTyp = TYP_CHAR;
11692                 goto CONV;
11693 #if (REGSIZE_BYTES == 8)
11694             case CEE_CONV_U:
11695                 lclTyp = TYP_U_IMPL;
11696                 goto CONV_UN;
11697 #else
11698             case CEE_CONV_U:
11699                 lclTyp = TYP_U_IMPL;
11700                 goto CONV;
11701 #endif
11702             case CEE_CONV_U4:
11703                 lclTyp = TYP_UINT;
11704                 goto CONV;
11705             case CEE_CONV_U8:
11706                 lclTyp = TYP_ULONG;
11707                 goto CONV_UN;
11708
11709             case CEE_CONV_R4:
11710                 lclTyp = TYP_FLOAT;
11711                 goto CONV;
11712             case CEE_CONV_R8:
11713                 lclTyp = TYP_DOUBLE;
11714                 goto CONV;
11715
11716             case CEE_CONV_R_UN:
11717                 lclTyp = TYP_DOUBLE;
11718                 goto CONV_UN;
11719
11720             CONV_UN:
11721                 uns  = true;
11722                 ovfl = false;
11723                 goto _CONV;
11724
11725             CONV:
11726                 uns  = false;
11727                 ovfl = false;
11728                 goto _CONV;
11729
11730             _CONV:
11731                 // just check that we have a number on the stack
11732                 if (tiVerificationNeeded)
11733                 {
11734                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11735                     Verify(tiVal.IsNumberType(), "bad arg");
11736
11737 #ifdef _TARGET_64BIT_
11738                     bool isNative = false;
11739
11740                     switch (opcode)
11741                     {
11742                         case CEE_CONV_OVF_I:
11743                         case CEE_CONV_OVF_I_UN:
11744                         case CEE_CONV_I:
11745                         case CEE_CONV_OVF_U:
11746                         case CEE_CONV_OVF_U_UN:
11747                         case CEE_CONV_U:
11748                             isNative = true;
11749                         default:
11750                             // leave 'isNative' = false;
11751                             break;
11752                     }
11753                     if (isNative)
11754                     {
11755                         tiRetVal = typeInfo::nativeInt();
11756                     }
11757                     else
11758 #endif // _TARGET_64BIT_
11759                     {
11760                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11761                     }
11762                 }
11763
11764                 // only converts from FLOAT or DOUBLE to an integer type
11765                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11766
11767                 if (varTypeIsFloating(lclTyp))
11768                 {
11769                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11770 #ifdef _TARGET_64BIT_
11771                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11772                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11773                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11774                                // and generate SSE2 code instead of going through helper calls.
11775                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11776 #endif
11777                         ;
11778                 }
11779                 else
11780                 {
11781                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11782                 }
11783
11784                 // At this point uns, ovf, callNode all set
11785
11786                 op1 = impPopStack().val;
11787                 impBashVarAddrsToI(op1);
11788
11789                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11790                 {
11791                     op2 = op1->gtOp.gtOp2;
11792
11793                     if (op2->gtOper == GT_CNS_INT)
11794                     {
11795                         ssize_t ival = op2->gtIntCon.gtIconVal;
11796                         ssize_t mask, umask;
11797
11798                         switch (lclTyp)
11799                         {
11800                             case TYP_BYTE:
11801                             case TYP_UBYTE:
11802                                 mask  = 0x00FF;
11803                                 umask = 0x007F;
11804                                 break;
11805                             case TYP_CHAR:
11806                             case TYP_SHORT:
11807                                 mask  = 0xFFFF;
11808                                 umask = 0x7FFF;
11809                                 break;
11810
11811                             default:
11812                                 assert(!"unexpected type");
11813                                 return;
11814                         }
11815
11816                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11817                         {
11818                             /* Toss the cast, it's a waste of time */
11819
11820                             impPushOnStack(op1, tiRetVal);
11821                             break;
11822                         }
11823                         else if (ival == mask)
11824                         {
11825                             /* Toss the masking, it's a waste of time, since
11826                                we sign-extend from the small value anyways */
11827
11828                             op1 = op1->gtOp.gtOp1;
11829                         }
11830                     }
11831                 }
11832
11833                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11834                     since the result of a cast to one of the 'small' integer
11835                     types is an integer.
11836                  */
11837
11838                 type = genActualType(lclTyp);
11839
11840 #if SMALL_TREE_NODES
11841                 if (callNode)
11842                 {
11843                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11844                 }
11845                 else
11846 #endif // SMALL_TREE_NODES
11847                 {
11848                     op1 = gtNewCastNode(type, op1, lclTyp);
11849                 }
11850
11851                 if (ovfl)
11852                 {
11853                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11854                 }
11855                 if (uns)
11856                 {
11857                     op1->gtFlags |= GTF_UNSIGNED;
11858                 }
11859                 impPushOnStack(op1, tiRetVal);
11860                 break;
11861
11862             case CEE_NEG:
11863                 if (tiVerificationNeeded)
11864                 {
11865                     tiRetVal = impStackTop().seTypeInfo;
11866                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11867                 }
11868
11869                 op1 = impPopStack().val;
11870                 impBashVarAddrsToI(op1, nullptr);
11871                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11872                 break;
11873
11874             case CEE_POP:
11875                 if (tiVerificationNeeded)
11876                 {
11877                     impStackTop(0);
11878                 }
11879
11880                 /* Pull the top value from the stack */
11881
11882                 op1 = impPopStack(clsHnd).val;
11883
11884                 /* Get hold of the type of the value being duplicated */
11885
11886                 lclTyp = genActualType(op1->gtType);
11887
11888                 /* Does the value have any side effects? */
11889
11890                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11891                 {
11892                     // Since we are throwing away the value, just normalize
11893                     // it to its address.  This is more efficient.
11894
11895                     if (varTypeIsStruct(op1))
11896                     {
11897 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11898                         // Non-calls, such as obj or ret_expr, have to go through this.
11899                         // Calls with large struct return value have to go through this.
11900                         // Helper calls with small struct return value also have to go
11901                         // through this since they do not follow Unix calling convention.
11902                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11903                             op1->AsCall()->gtCallType == CT_HELPER)
11904 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11905                         {
11906                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11907                         }
11908                     }
11909
11910                     // If op1 is non-overflow cast, throw it away since it is useless.
11911                     // Another reason for throwing away the useless cast is in the context of
11912                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11913                     // The cast gets added as part of importing GT_CALL, which gets in the way
11914                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11915                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11916                     {
11917                         op1 = op1->gtOp.gtOp1;
11918                     }
11919
11920                     // If 'op1' is an expression, create an assignment node.
11921                     // Helps analyses (like CSE) to work fine.
11922
11923                     if (op1->gtOper != GT_CALL)
11924                     {
11925                         op1 = gtUnusedValNode(op1);
11926                     }
11927
11928                     /* Append the value to the tree list */
11929                     goto SPILL_APPEND;
11930                 }
11931
11932                 /* No side effects - just throw the <BEEP> thing away */
11933                 break;
11934
11935             case CEE_DUP:
11936
11937                 if (tiVerificationNeeded)
11938                 {
11939                     // Dup could start the begining of delegate creation sequence, remember that
11940                     delegateCreateStart = codeAddr - 1;
11941                     impStackTop(0);
11942                 }
11943
11944                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11945                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11946                 //   This helps eliminate a redundant bounds check in cases such as:
11947                 //       ariba[i+3] += some_value;
11948                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11949
11950                 if (codeAddr < codeEndp)
11951                 {
11952                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11953                     if (impIsAnySTLOC(nextOpcode))
11954                     {
11955                         if (!opts.compDbgCode)
11956                         {
11957                             insertLdloc = true;
11958                             break;
11959                         }
11960                         GenTree* stackTop = impStackTop().val;
11961                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11962                         {
11963                             insertLdloc = true;
11964                             break;
11965                         }
11966                     }
11967                 }
11968
11969                 /* Pull the top value from the stack */
11970                 op1 = impPopStack(tiRetVal);
11971
11972                 /* Clone the value */
11973                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11974                                    nullptr DEBUGARG("DUP instruction"));
11975
11976                 /* Either the tree started with no global effects, or impCloneExpr
11977                    evaluated the tree to a temp and returned two copies of that
11978                    temp. Either way, neither op1 nor op2 should have side effects.
11979                 */
11980                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11981
11982                 /* Push the tree/temp back on the stack */
11983                 impPushOnStack(op1, tiRetVal);
11984
11985                 /* Push the copy on the stack */
11986                 impPushOnStack(op2, tiRetVal);
11987
11988                 break;
11989
11990             case CEE_STIND_I1:
11991                 lclTyp = TYP_BYTE;
11992                 goto STIND;
11993             case CEE_STIND_I2:
11994                 lclTyp = TYP_SHORT;
11995                 goto STIND;
11996             case CEE_STIND_I4:
11997                 lclTyp = TYP_INT;
11998                 goto STIND;
11999             case CEE_STIND_I8:
12000                 lclTyp = TYP_LONG;
12001                 goto STIND;
12002             case CEE_STIND_I:
12003                 lclTyp = TYP_I_IMPL;
12004                 goto STIND;
12005             case CEE_STIND_REF:
12006                 lclTyp = TYP_REF;
12007                 goto STIND;
12008             case CEE_STIND_R4:
12009                 lclTyp = TYP_FLOAT;
12010                 goto STIND;
12011             case CEE_STIND_R8:
12012                 lclTyp = TYP_DOUBLE;
12013                 goto STIND;
12014             STIND:
12015
12016                 if (tiVerificationNeeded)
12017                 {
12018                     typeInfo instrType(lclTyp);
12019 #ifdef _TARGET_64BIT_
12020                     if (opcode == CEE_STIND_I)
12021                     {
12022                         instrType = typeInfo::nativeInt();
12023                     }
12024 #endif // _TARGET_64BIT_
12025                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12026                 }
12027                 else
12028                 {
12029                     compUnsafeCastUsed = true; // Have to go conservative
12030                 }
12031
12032             STIND_POST_VERIFY:
12033
12034                 op2 = impPopStack().val; // value to store
12035                 op1 = impPopStack().val; // address to store to
12036
12037                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12038                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12039
12040                 impBashVarAddrsToI(op1, op2);
12041
12042                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12043
12044 #ifdef _TARGET_64BIT_
12045                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12046                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12047                 {
12048                     op2->gtType = TYP_I_IMPL;
12049                 }
12050                 else
12051                 {
12052                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12053                     //
12054                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12055                     {
12056                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12057                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12058                     }
12059                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12060                     //
12061                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12062                     {
12063                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12064                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12065                     }
12066                 }
12067 #endif // _TARGET_64BIT_
12068
12069                 if (opcode == CEE_STIND_REF)
12070                 {
12071                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12072                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12073                     lclTyp = genActualType(op2->TypeGet());
12074                 }
12075
12076 // Check target type.
12077 #ifdef DEBUG
12078                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12079                 {
12080                     if (op2->gtType == TYP_BYREF)
12081                     {
12082                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12083                     }
12084                     else if (lclTyp == TYP_BYREF)
12085                     {
12086                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12087                     }
12088                 }
12089                 else
12090                 {
12091                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12092                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12093                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12094                 }
12095 #endif
12096
12097                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12098
12099                 // stind could point anywhere, example a boxed class static int
12100                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12101
12102                 if (prefixFlags & PREFIX_VOLATILE)
12103                 {
12104                     assert(op1->OperGet() == GT_IND);
12105                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12106                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12107                     op1->gtFlags |= GTF_IND_VOLATILE;
12108                 }
12109
12110                 if (prefixFlags & PREFIX_UNALIGNED)
12111                 {
12112                     assert(op1->OperGet() == GT_IND);
12113                     op1->gtFlags |= GTF_IND_UNALIGNED;
12114                 }
12115
12116                 op1 = gtNewAssignNode(op1, op2);
12117                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12118
12119                 // Spill side-effects AND global-data-accesses
12120                 if (verCurrentState.esStackDepth > 0)
12121                 {
12122                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12123                 }
12124
12125                 goto APPEND;
12126
12127             case CEE_LDIND_I1:
12128                 lclTyp = TYP_BYTE;
12129                 goto LDIND;
12130             case CEE_LDIND_I2:
12131                 lclTyp = TYP_SHORT;
12132                 goto LDIND;
12133             case CEE_LDIND_U4:
12134             case CEE_LDIND_I4:
12135                 lclTyp = TYP_INT;
12136                 goto LDIND;
12137             case CEE_LDIND_I8:
12138                 lclTyp = TYP_LONG;
12139                 goto LDIND;
12140             case CEE_LDIND_REF:
12141                 lclTyp = TYP_REF;
12142                 goto LDIND;
12143             case CEE_LDIND_I:
12144                 lclTyp = TYP_I_IMPL;
12145                 goto LDIND;
12146             case CEE_LDIND_R4:
12147                 lclTyp = TYP_FLOAT;
12148                 goto LDIND;
12149             case CEE_LDIND_R8:
12150                 lclTyp = TYP_DOUBLE;
12151                 goto LDIND;
12152             case CEE_LDIND_U1:
12153                 lclTyp = TYP_UBYTE;
12154                 goto LDIND;
12155             case CEE_LDIND_U2:
12156                 lclTyp = TYP_CHAR;
12157                 goto LDIND;
12158             LDIND:
12159
12160                 if (tiVerificationNeeded)
12161                 {
12162                     typeInfo lclTiType(lclTyp);
12163 #ifdef _TARGET_64BIT_
12164                     if (opcode == CEE_LDIND_I)
12165                     {
12166                         lclTiType = typeInfo::nativeInt();
12167                     }
12168 #endif // _TARGET_64BIT_
12169                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12170                     tiRetVal.NormaliseForStack();
12171                 }
12172                 else
12173                 {
12174                     compUnsafeCastUsed = true; // Have to go conservative
12175                 }
12176
12177             LDIND_POST_VERIFY:
12178
12179                 op1 = impPopStack().val; // address to load from
12180                 impBashVarAddrsToI(op1);
12181
12182 #ifdef _TARGET_64BIT_
12183                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12184                 //
12185                 if (genActualType(op1->gtType) == TYP_INT)
12186                 {
12187                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12188                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12189                 }
12190 #endif
12191
12192                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12193
12194                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12195
12196                 // ldind could point anywhere, example a boxed class static int
12197                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12198
12199                 if (prefixFlags & PREFIX_VOLATILE)
12200                 {
12201                     assert(op1->OperGet() == GT_IND);
12202                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12203                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12204                     op1->gtFlags |= GTF_IND_VOLATILE;
12205                 }
12206
12207                 if (prefixFlags & PREFIX_UNALIGNED)
12208                 {
12209                     assert(op1->OperGet() == GT_IND);
12210                     op1->gtFlags |= GTF_IND_UNALIGNED;
12211                 }
12212
12213                 impPushOnStack(op1, tiRetVal);
12214
12215                 break;
12216
12217             case CEE_UNALIGNED:
12218
12219                 assert(sz == 1);
12220                 val = getU1LittleEndian(codeAddr);
12221                 ++codeAddr;
12222                 JITDUMP(" %u", val);
12223                 if ((val != 1) && (val != 2) && (val != 4))
12224                 {
12225                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12226                 }
12227
12228                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12229                 prefixFlags |= PREFIX_UNALIGNED;
12230
12231                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12232
12233             PREFIX:
12234                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12235                 codeAddr += sizeof(__int8);
12236                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12237                 goto DECODE_OPCODE;
12238
12239             case CEE_VOLATILE:
12240
12241                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12242                 prefixFlags |= PREFIX_VOLATILE;
12243
12244                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12245
12246                 assert(sz == 0);
12247                 goto PREFIX;
12248
12249             case CEE_LDFTN:
12250             {
12251                 // Need to do a lookup here so that we perform an access check
12252                 // and do a NOWAY if protections are violated
12253                 _impResolveToken(CORINFO_TOKENKIND_Method);
12254
12255                 JITDUMP(" %08X", resolvedToken.token);
12256
12257                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12258                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12259                               &callInfo);
12260
12261                 // This check really only applies to intrinsic Array.Address methods
12262                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12263                 {
12264                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12265                 }
12266
12267                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12268                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12269
12270                 if (tiVerificationNeeded)
12271                 {
12272                     // LDFTN could start the begining of delegate creation sequence, remember that
12273                     delegateCreateStart = codeAddr - 2;
12274
12275                     // check any constraints on the callee's class and type parameters
12276                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12277                                    "method has unsatisfied class constraints");
12278                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12279                                                                                 resolvedToken.hMethod),
12280                                    "method has unsatisfied method constraints");
12281
12282                     mflags = callInfo.verMethodFlags;
12283                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12284                 }
12285
12286             DO_LDFTN:
12287                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12288                 if (compDonotInline())
12289                 {
12290                     return;
12291                 }
12292
12293                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12294
12295                 break;
12296             }
12297
12298             case CEE_LDVIRTFTN:
12299             {
12300                 /* Get the method token */
12301
12302                 _impResolveToken(CORINFO_TOKENKIND_Method);
12303
12304                 JITDUMP(" %08X", resolvedToken.token);
12305
12306                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12307                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12308                                                     CORINFO_CALLINFO_CALLVIRT)),
12309                               &callInfo);
12310
12311                 // This check really only applies to intrinsic Array.Address methods
12312                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12313                 {
12314                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12315                 }
12316
12317                 mflags = callInfo.methodFlags;
12318
12319                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12320
12321                 if (compIsForInlining())
12322                 {
12323                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12324                     {
12325                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12326                         return;
12327                     }
12328                 }
12329
12330                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12331
12332                 if (tiVerificationNeeded)
12333                 {
12334
12335                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12336                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12337
12338                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12339                     typeInfo declType =
12340                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12341
12342                     typeInfo arg = impStackTop().seTypeInfo;
12343                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12344                            "bad ldvirtftn");
12345
12346                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12347                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12348                     {
12349                         instanceClassHnd = arg.GetClassHandleForObjRef();
12350                     }
12351
12352                     // check any constraints on the method's class and type parameters
12353                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12354                                    "method has unsatisfied class constraints");
12355                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12356                                                                                 resolvedToken.hMethod),
12357                                    "method has unsatisfied method constraints");
12358
12359                     if (mflags & CORINFO_FLG_PROTECTED)
12360                     {
12361                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12362                                "Accessing protected method through wrong type.");
12363                     }
12364                 }
12365
12366                 /* Get the object-ref */
12367                 op1 = impPopStack().val;
12368                 assertImp(op1->gtType == TYP_REF);
12369
12370                 if (opts.IsReadyToRun())
12371                 {
12372                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12373                     {
12374                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12375                         {
12376                             op1 = gtUnusedValNode(op1);
12377                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12378                         }
12379                         goto DO_LDFTN;
12380                     }
12381                 }
12382                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12383                 {
12384                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12385                     {
12386                         op1 = gtUnusedValNode(op1);
12387                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12388                     }
12389                     goto DO_LDFTN;
12390                 }
12391
12392                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12393                 if (compDonotInline())
12394                 {
12395                     return;
12396                 }
12397
12398                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12399
12400                 break;
12401             }
12402
12403             case CEE_CONSTRAINED:
12404
12405                 assertImp(sz == sizeof(unsigned));
12406                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12407                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12408                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12409
12410                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12411                 prefixFlags |= PREFIX_CONSTRAINED;
12412
12413                 {
12414                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12415                     if (actualOpcode != CEE_CALLVIRT)
12416                     {
12417                         BADCODE("constrained. has to be followed by callvirt");
12418                     }
12419                 }
12420
12421                 goto PREFIX;
12422
12423             case CEE_READONLY:
12424                 JITDUMP(" readonly.");
12425
12426                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12427                 prefixFlags |= PREFIX_READONLY;
12428
12429                 {
12430                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12431                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12432                     {
12433                         BADCODE("readonly. has to be followed by ldelema or call");
12434                     }
12435                 }
12436
12437                 assert(sz == 0);
12438                 goto PREFIX;
12439
12440             case CEE_TAILCALL:
12441                 JITDUMP(" tail.");
12442
12443                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12444                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12445
12446                 {
12447                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12448                     if (!impOpcodeIsCallOpcode(actualOpcode))
12449                     {
12450                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12451                     }
12452                 }
12453                 assert(sz == 0);
12454                 goto PREFIX;
12455
12456             case CEE_NEWOBJ:
12457
12458                 /* Since we will implicitly insert newObjThisPtr at the start of the
12459                    argument list, spill any GTF_ORDER_SIDEEFF */
12460                 impSpillSpecialSideEff();
12461
12462                 /* NEWOBJ does not respond to TAIL */
12463                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12464
12465                 /* NEWOBJ does not respond to CONSTRAINED */
12466                 prefixFlags &= ~PREFIX_CONSTRAINED;
12467
12468 #if COR_JIT_EE_VERSION > 460
12469                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12470 #else
12471                 _impResolveToken(CORINFO_TOKENKIND_Method);
12472 #endif
12473
12474                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12475                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12476                               &callInfo);
12477
12478                 if (compIsForInlining())
12479                 {
12480                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12481                     {
12482                         // Check to see if this call violates the boundary.
12483                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12484                         return;
12485                     }
12486                 }
12487
12488                 mflags = callInfo.methodFlags;
12489
12490                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12491                 {
12492                     BADCODE("newobj on static or abstract method");
12493                 }
12494
12495                 // Insert the security callout before any actual code is generated
12496                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12497
12498                 // There are three different cases for new
12499                 // Object size is variable (depends on arguments)
12500                 //      1) Object is an array (arrays treated specially by the EE)
12501                 //      2) Object is some other variable sized object (e.g. String)
12502                 //      3) Class Size can be determined beforehand (normal case)
12503                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12504                 // in the second case we call the constructor with a '0' this pointer
12505                 // In the third case we alloc the memory, then call the constuctor
12506
12507                 clsFlags = callInfo.classFlags;
12508                 if (clsFlags & CORINFO_FLG_ARRAY)
12509                 {
12510                     if (tiVerificationNeeded)
12511                     {
12512                         CORINFO_CLASS_HANDLE elemTypeHnd;
12513                         INDEBUG(CorInfoType corType =)
12514                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12515                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12516                         Verify(elemTypeHnd == nullptr ||
12517                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12518                                "newarr of byref-like objects");
12519                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12520                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12521                                       &callInfo DEBUGARG(info.compFullName));
12522                     }
12523                     // Arrays need to call the NEWOBJ helper.
12524                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12525
12526                     impImportNewObjArray(&resolvedToken, &callInfo);
12527                     if (compDonotInline())
12528                     {
12529                         return;
12530                     }
12531
12532                     callTyp = TYP_REF;
12533                     break;
12534                 }
12535                 // At present this can only be String
12536                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12537                 {
12538                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12539                     {
12540                         // The dummy argument does not exist in CoreRT
12541                         newObjThisPtr = nullptr;
12542                     }
12543                     else
12544                     {
12545                         // This is the case for variable-sized objects that are not
12546                         // arrays.  In this case, call the constructor with a null 'this'
12547                         // pointer
12548                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12549                     }
12550
12551                     /* Remember that this basic block contains 'new' of an object */
12552                     block->bbFlags |= BBF_HAS_NEWOBJ;
12553                     optMethodFlags |= OMF_HAS_NEWOBJ;
12554                 }
12555                 else
12556                 {
12557                     // This is the normal case where the size of the object is
12558                     // fixed.  Allocate the memory and call the constructor.
12559
12560                     // Note: We cannot add a peep to avoid use of temp here
12561                     // becase we don't have enough interference info to detect when
12562                     // sources and destination interfere, example: s = new S(ref);
12563
12564                     // TODO: We find the correct place to introduce a general
12565                     // reverse copy prop for struct return values from newobj or
12566                     // any function returning structs.
12567
12568                     /* get a temporary for the new object */
12569                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12570
12571                     // In the value class case we only need clsHnd for size calcs.
12572                     //
12573                     // The lookup of the code pointer will be handled by CALL in this case
12574                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12575                     {
12576                         if (compIsForInlining())
12577                         {
12578                             // If value class has GC fields, inform the inliner. It may choose to
12579                             // bail out on the inline.
12580                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12581                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12582                             {
12583                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12584                                 if (compInlineResult->IsFailure())
12585                                 {
12586                                     return;
12587                                 }
12588
12589                                 // Do further notification in the case where the call site is rare;
12590                                 // some policies do not track the relative hotness of call sites for
12591                                 // "always" inline cases.
12592                                 if (impInlineInfo->iciBlock->isRunRarely())
12593                                 {
12594                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12595                                     if (compInlineResult->IsFailure())
12596                                     {
12597                                         return;
12598                                     }
12599                                 }
12600                             }
12601                         }
12602
12603                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12604                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12605
12606                         if (impIsPrimitive(jitTyp))
12607                         {
12608                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12609                         }
12610                         else
12611                         {
12612                             // The local variable itself is the allocated space.
12613                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12614                             // and potentially exploitable.
12615                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12616                         }
12617
12618                         // Append a tree to zero-out the temp
12619                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12620
12621                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12622                                                        gtNewIconNode(0), // Value
12623                                                        size,             // Size
12624                                                        false,            // isVolatile
12625                                                        false);           // not copyBlock
12626                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12627
12628                         // Obtain the address of the temp
12629                         newObjThisPtr =
12630                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12631                     }
12632                     else
12633                     {
12634 #ifdef FEATURE_READYTORUN_COMPILER
12635                         if (opts.IsReadyToRun())
12636                         {
12637                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12638                             usingReadyToRunHelper = (op1 != nullptr);
12639                         }
12640
12641                         if (!usingReadyToRunHelper)
12642 #endif
12643                         {
12644                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12645                             if (op1 == nullptr)
12646                             { // compDonotInline()
12647                                 return;
12648                             }
12649
12650                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12651                             // and the newfast call with a single call to a dynamic R2R cell that will:
12652                             //      1) Load the context
12653                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12654                             //      stub
12655                             //      3) Allocate and return the new object
12656                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12657
12658                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12659                                                     resolvedToken.hClass, TYP_REF, op1);
12660                         }
12661
12662                         // Remember that this basic block contains 'new' of an object
12663                         block->bbFlags |= BBF_HAS_NEWOBJ;
12664                         optMethodFlags |= OMF_HAS_NEWOBJ;
12665
12666                         // Append the assignment to the temp/local. Dont need to spill
12667                         // at all as we are just calling an EE-Jit helper which can only
12668                         // cause an (async) OutOfMemoryException.
12669
12670                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12671                         // to a temp. Note that the pattern "temp = allocObj" is required
12672                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12673                         // without exhaustive walk over all expressions.
12674
12675                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12676
12677                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12678                     }
12679                 }
12680                 goto CALL;
12681
12682             case CEE_CALLI:
12683
12684                 /* CALLI does not respond to CONSTRAINED */
12685                 prefixFlags &= ~PREFIX_CONSTRAINED;
12686
12687                 if (compIsForInlining())
12688                 {
12689                     // CALLI doesn't have a method handle, so assume the worst.
12690                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12691                     {
12692                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12693                         return;
12694                     }
12695                 }
12696
12697             // fall through
12698
12699             case CEE_CALLVIRT:
12700             case CEE_CALL:
12701
12702                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12703                 // many other places.  We unfortunately embed that knowledge here.
12704                 if (opcode != CEE_CALLI)
12705                 {
12706                     _impResolveToken(CORINFO_TOKENKIND_Method);
12707
12708                     eeGetCallInfo(&resolvedToken,
12709                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12710                                   // this is how impImportCall invokes getCallInfo
12711                                   addVerifyFlag(
12712                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12713                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12714                                                                        : CORINFO_CALLINFO_NONE)),
12715                                   &callInfo);
12716                 }
12717                 else
12718                 {
12719                     // Suppress uninitialized use warning.
12720                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12721                     memset(&callInfo, 0, sizeof(callInfo));
12722
12723                     resolvedToken.token = getU4LittleEndian(codeAddr);
12724                 }
12725
12726             CALL: // memberRef should be set.
12727                 // newObjThisPtr should be set for CEE_NEWOBJ
12728
12729                 JITDUMP(" %08X", resolvedToken.token);
12730                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12731
12732                 bool newBBcreatedForTailcallStress;
12733
12734                 newBBcreatedForTailcallStress = false;
12735
12736                 if (compIsForInlining())
12737                 {
12738                     if (compDonotInline())
12739                     {
12740                         return;
12741                     }
12742                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12743                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12744                 }
12745                 else
12746                 {
12747                     if (compTailCallStress())
12748                     {
12749                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12750                         // Tail call stress only recognizes call+ret patterns and forces them to be
12751                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12752                         // doesn't import 'ret' opcode following the call into the basic block containing
12753                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12754                         // is already checking that there is an opcode following call and hence it is
12755                         // safe here to read next opcode without bounds check.
12756                         newBBcreatedForTailcallStress =
12757                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12758                                                              // make it jump to RET.
12759                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12760
12761                         if (newBBcreatedForTailcallStress &&
12762                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12763                             verCheckTailCallConstraint(opcode, &resolvedToken,
12764                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12765                                                        true) // Is it legal to do talcall?
12766                             )
12767                         {
12768                             // Stress the tailcall.
12769                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12770                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12771                         }
12772                     }
12773                 }
12774
12775                 // This is split up to avoid goto flow warnings.
12776                 bool isRecursive;
12777                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
12778
12779                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12780                 // hence will not be considered for implicit tail calling.
12781                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12782                 {
12783                     if (compIsForInlining())
12784                     {
12785 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
12786                         // Are we inlining at an implicit tail call site? If so the we can flag
12787                         // implicit tail call sites in the inline body. These call sites
12788                         // often end up in non BBJ_RETURN blocks, so only flag them when
12789                         // we're able to handle shared returns.
12790                         if (impInlineInfo->iciCall->IsImplicitTailCall())
12791                         {
12792                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12793                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12794                         }
12795 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
12796                     }
12797                     else
12798                     {
12799                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12800                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12801                     }
12802                 }
12803
12804                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12805                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12806                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12807
12808                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12809                 {
12810                     // All calls and delegates need a security callout.
12811                     // For delegates, this is the call to the delegate constructor, not the access check on the
12812                     // LD(virt)FTN.
12813                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12814
12815 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12816      
12817                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12818                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12819                 // ldtoken <filed token>, and we now check accessibility
12820                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12821                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12822                 {
12823                     if (prevOpcode != CEE_LDTOKEN)
12824                     {
12825                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12826                     }
12827                     else
12828                     {
12829                         assert(lastLoadToken != NULL);
12830                         // Now that we know we have a token, verify that it is accessible for loading
12831                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12832                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12833                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12834                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12835                     }
12836                 }
12837
12838 #endif // DevDiv 410397
12839                 }
12840
12841                 if (tiVerificationNeeded)
12842                 {
12843                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12844                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12845                                   &callInfo DEBUGARG(info.compFullName));
12846                 }
12847
12848                 // Insert delegate callout here.
12849                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12850                 {
12851 #ifdef DEBUG
12852                     // We should do this only if verification is enabled
12853                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12854                     if (tiVerificationNeeded)
12855                     {
12856                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12857                         // We should get here only for well formed delegate creation.
12858                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12859                     }
12860 #endif
12861
12862 #ifdef FEATURE_CORECLR
12863                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12864                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12865                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12866
12867                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12868 #endif // FEATURE_CORECLR
12869                 }
12870
12871                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12872                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12873                 if (compDonotInline())
12874                 {
12875                     return;
12876                 }
12877
12878                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12879                                                                        // have created a new BB after the "call"
12880                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12881                 {
12882                     assert(!compIsForInlining());
12883                     goto RET;
12884                 }
12885
12886                 break;
12887
12888             case CEE_LDFLD:
12889             case CEE_LDSFLD:
12890             case CEE_LDFLDA:
12891             case CEE_LDSFLDA:
12892             {
12893
12894                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12895                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12896
12897                 /* Get the CP_Fieldref index */
12898                 assertImp(sz == sizeof(unsigned));
12899
12900                 _impResolveToken(CORINFO_TOKENKIND_Field);
12901
12902                 JITDUMP(" %08X", resolvedToken.token);
12903
12904                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12905
12906                 GenTreePtr           obj     = nullptr;
12907                 typeInfo*            tiObj   = nullptr;
12908                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12909
12910                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12911                 {
12912                     tiObj = &impStackTop().seTypeInfo;
12913                     obj   = impPopStack(objType).val;
12914
12915                     if (impIsThis(obj))
12916                     {
12917                         aflags |= CORINFO_ACCESS_THIS;
12918
12919                         // An optimization for Contextful classes:
12920                         // we unwrap the proxy when we have a 'this reference'
12921
12922                         if (info.compUnwrapContextful)
12923                         {
12924                             aflags |= CORINFO_ACCESS_UNWRAP;
12925                         }
12926                     }
12927                 }
12928
12929                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12930
12931                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12932                 // handle
12933                 CorInfoType ciType = fieldInfo.fieldType;
12934                 clsHnd             = fieldInfo.structType;
12935
12936                 lclTyp = JITtype2varType(ciType);
12937
12938 #ifdef _TARGET_AMD64
12939                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12940 #endif // _TARGET_AMD64
12941
12942                 if (compIsForInlining())
12943                 {
12944                     switch (fieldInfo.fieldAccessor)
12945                     {
12946                         case CORINFO_FIELD_INSTANCE_HELPER:
12947                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12948                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12949                         case CORINFO_FIELD_STATIC_TLS:
12950
12951                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12952                             return;
12953
12954                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12955 #if COR_JIT_EE_VERSION > 460
12956                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12957 #endif
12958                             /* We may be able to inline the field accessors in specific instantiations of generic
12959                              * methods */
12960                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12961                             return;
12962
12963                         default:
12964                             break;
12965                     }
12966
12967                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12968                         clsHnd)
12969                     {
12970                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12971                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12972                         {
12973                             // Loading a static valuetype field usually will cause a JitHelper to be called
12974                             // for the static base. This will bloat the code.
12975                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12976
12977                             if (compInlineResult->IsFailure())
12978                             {
12979                                 return;
12980                             }
12981                         }
12982                     }
12983                 }
12984
12985                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12986                 if (isLoadAddress)
12987                 {
12988                     tiRetVal.MakeByRef();
12989                 }
12990                 else
12991                 {
12992                     tiRetVal.NormaliseForStack();
12993                 }
12994
12995                 // Perform this check always to ensure that we get field access exceptions even with
12996                 // SkipVerification.
12997                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12998
12999                 if (tiVerificationNeeded)
13000                 {
13001                     // You can also pass the unboxed struct to  LDFLD
13002                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13003                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13004                     {
13005                         bAllowPlainValueTypeAsThis = TRUE;
13006                     }
13007
13008                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13009
13010                     // If we're doing this on a heap object or from a 'safe' byref
13011                     // then the result is a safe byref too
13012                     if (isLoadAddress) // load address
13013                     {
13014                         if (fieldInfo.fieldFlags &
13015                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13016                         {
13017                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13018                             {
13019                                 tiRetVal.SetIsPermanentHomeByRef();
13020                             }
13021                         }
13022                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13023                         {
13024                             // ldflda of byref is safe if done on a gc object or on  a
13025                             // safe byref
13026                             tiRetVal.SetIsPermanentHomeByRef();
13027                         }
13028                     }
13029                 }
13030                 else
13031                 {
13032                     // tiVerificationNeeded is false.
13033                     // Raise InvalidProgramException if static load accesses non-static field
13034                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13035                     {
13036                         BADCODE("static access on an instance field");
13037                     }
13038                 }
13039
13040                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13041                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13042                 {
13043                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13044                     {
13045                         obj = gtUnusedValNode(obj);
13046                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13047                     }
13048                     obj = nullptr;
13049                 }
13050
13051                 /* Preserve 'small' int types */
13052                 if (lclTyp > TYP_INT)
13053                 {
13054                     lclTyp = genActualType(lclTyp);
13055                 }
13056
13057                 bool usesHelper = false;
13058
13059                 switch (fieldInfo.fieldAccessor)
13060                 {
13061                     case CORINFO_FIELD_INSTANCE:
13062 #ifdef FEATURE_READYTORUN_COMPILER
13063                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13064 #endif
13065                     {
13066                         bool nullcheckNeeded = false;
13067
13068                         obj = impCheckForNullPointer(obj);
13069
13070                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13071                         {
13072                             nullcheckNeeded = true;
13073                         }
13074
13075                         // If the object is a struct, what we really want is
13076                         // for the field to operate on the address of the struct.
13077                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13078                         {
13079                             assert(opcode == CEE_LDFLD && objType != nullptr);
13080
13081                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13082                         }
13083
13084                         /* Create the data member node */
13085                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13086
13087 #ifdef FEATURE_READYTORUN_COMPILER
13088                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13089                         {
13090                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13091                         }
13092 #endif
13093
13094                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13095
13096                         if (fgAddrCouldBeNull(obj))
13097                         {
13098                             op1->gtFlags |= GTF_EXCEPT;
13099                         }
13100
13101                         // If gtFldObj is a BYREF then our target is a value class and
13102                         // it could point anywhere, example a boxed class static int
13103                         if (obj->gtType == TYP_BYREF)
13104                         {
13105                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13106                         }
13107
13108                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13109                         if (StructHasOverlappingFields(typeFlags))
13110                         {
13111                             op1->gtField.gtFldMayOverlap = true;
13112                         }
13113
13114                         // wrap it in a address of operator if necessary
13115                         if (isLoadAddress)
13116                         {
13117                             op1 = gtNewOperNode(GT_ADDR,
13118                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13119                         }
13120                         else
13121                         {
13122                             if (compIsForInlining() &&
13123                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13124                                                                                    impInlineInfo->inlArgInfo))
13125                             {
13126                                 impInlineInfo->thisDereferencedFirst = true;
13127                             }
13128                         }
13129                     }
13130                     break;
13131
13132                     case CORINFO_FIELD_STATIC_TLS:
13133 #ifdef _TARGET_X86_
13134                         // Legacy TLS access is implemented as intrinsic on x86 only
13135
13136                         /* Create the data member node */
13137                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13138                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13139
13140                         if (isLoadAddress)
13141                         {
13142                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13143                         }
13144                         break;
13145 #else
13146                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13147
13148                         __fallthrough;
13149 #endif
13150
13151                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13152                     case CORINFO_FIELD_INSTANCE_HELPER:
13153                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13154                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13155                                                clsHnd, nullptr);
13156                         usesHelper = true;
13157                         break;
13158
13159                     case CORINFO_FIELD_STATIC_ADDRESS:
13160                         // Replace static read-only fields with constant if possible
13161                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13162                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13163                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13164                         {
13165                             CorInfoInitClassResult initClassResult =
13166                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13167                                                             impTokenLookupContextHandle);
13168
13169                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13170                             {
13171                                 void** pFldAddr = nullptr;
13172                                 void*  fldAddr =
13173                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13174
13175                                 // We should always be able to access this static's address directly
13176                                 assert(pFldAddr == nullptr);
13177
13178                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13179                                 goto FIELD_DONE;
13180                             }
13181                         }
13182
13183                         __fallthrough;
13184
13185                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13186                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13187                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13188 #if COR_JIT_EE_VERSION > 460
13189                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13190 #endif
13191                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13192                                                          lclTyp);
13193                         break;
13194
13195                     case CORINFO_FIELD_INTRINSIC_ZERO:
13196                     {
13197                         assert(aflags & CORINFO_ACCESS_GET);
13198                         op1 = gtNewIconNode(0, lclTyp);
13199                         goto FIELD_DONE;
13200                     }
13201                     break;
13202
13203                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13204                     {
13205                         assert(aflags & CORINFO_ACCESS_GET);
13206
13207                         LPVOID         pValue;
13208                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13209                         op1                = gtNewStringLiteralNode(iat, pValue);
13210                         goto FIELD_DONE;
13211                     }
13212                     break;
13213
13214                     default:
13215                         assert(!"Unexpected fieldAccessor");
13216                 }
13217
13218                 if (!isLoadAddress)
13219                 {
13220
13221                     if (prefixFlags & PREFIX_VOLATILE)
13222                     {
13223                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13224                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13225
13226                         if (!usesHelper)
13227                         {
13228                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13229                                    (op1->OperGet() == GT_OBJ));
13230                             op1->gtFlags |= GTF_IND_VOLATILE;
13231                         }
13232                     }
13233
13234                     if (prefixFlags & PREFIX_UNALIGNED)
13235                     {
13236                         if (!usesHelper)
13237                         {
13238                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13239                                    (op1->OperGet() == GT_OBJ));
13240                             op1->gtFlags |= GTF_IND_UNALIGNED;
13241                         }
13242                     }
13243                 }
13244
13245                 /* Check if the class needs explicit initialization */
13246
13247                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13248                 {
13249                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13250                     if (compDonotInline())
13251                     {
13252                         return;
13253                     }
13254                     if (helperNode != nullptr)
13255                     {
13256                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13257                     }
13258                 }
13259
13260             FIELD_DONE:
13261                 impPushOnStack(op1, tiRetVal);
13262             }
13263             break;
13264
13265             case CEE_STFLD:
13266             case CEE_STSFLD:
13267             {
13268
13269                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13270
13271                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13272
13273                 /* Get the CP_Fieldref index */
13274
13275                 assertImp(sz == sizeof(unsigned));
13276
13277                 _impResolveToken(CORINFO_TOKENKIND_Field);
13278
13279                 JITDUMP(" %08X", resolvedToken.token);
13280
13281                 int        aflags = CORINFO_ACCESS_SET;
13282                 GenTreePtr obj    = nullptr;
13283                 typeInfo*  tiObj  = nullptr;
13284                 typeInfo   tiVal;
13285
13286                 /* Pull the value from the stack */
13287                 op2    = impPopStack(tiVal);
13288                 clsHnd = tiVal.GetClassHandle();
13289
13290                 if (opcode == CEE_STFLD)
13291                 {
13292                     tiObj = &impStackTop().seTypeInfo;
13293                     obj   = impPopStack().val;
13294
13295                     if (impIsThis(obj))
13296                     {
13297                         aflags |= CORINFO_ACCESS_THIS;
13298
13299                         // An optimization for Contextful classes:
13300                         // we unwrap the proxy when we have a 'this reference'
13301
13302                         if (info.compUnwrapContextful)
13303                         {
13304                             aflags |= CORINFO_ACCESS_UNWRAP;
13305                         }
13306                     }
13307                 }
13308
13309                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13310
13311                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13312                 // handle
13313                 CorInfoType ciType = fieldInfo.fieldType;
13314                 fieldClsHnd        = fieldInfo.structType;
13315
13316                 lclTyp = JITtype2varType(ciType);
13317
13318                 if (compIsForInlining())
13319                 {
13320                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13321                      * per-inst static? */
13322
13323                     switch (fieldInfo.fieldAccessor)
13324                     {
13325                         case CORINFO_FIELD_INSTANCE_HELPER:
13326                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13327                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13328                         case CORINFO_FIELD_STATIC_TLS:
13329
13330                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13331                             return;
13332
13333                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13334 #if COR_JIT_EE_VERSION > 460
13335                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13336 #endif
13337
13338                             /* We may be able to inline the field accessors in specific instantiations of generic
13339                              * methods */
13340                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13341                             return;
13342
13343                         default:
13344                             break;
13345                     }
13346                 }
13347
13348                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13349
13350                 if (tiVerificationNeeded)
13351                 {
13352                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13353                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13354                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13355                 }
13356                 else
13357                 {
13358                     // tiVerificationNeed is false.
13359                     // Raise InvalidProgramException if static store accesses non-static field
13360                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13361                     {
13362                         BADCODE("static access on an instance field");
13363                     }
13364                 }
13365
13366                 // We are using stfld on a static field.
13367                 // We allow it, but need to eval any side-effects for obj
13368                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13369                 {
13370                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13371                     {
13372                         obj = gtUnusedValNode(obj);
13373                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13374                     }
13375                     obj = nullptr;
13376                 }
13377
13378                 /* Preserve 'small' int types */
13379                 if (lclTyp > TYP_INT)
13380                 {
13381                     lclTyp = genActualType(lclTyp);
13382                 }
13383
13384                 switch (fieldInfo.fieldAccessor)
13385                 {
13386                     case CORINFO_FIELD_INSTANCE:
13387 #ifdef FEATURE_READYTORUN_COMPILER
13388                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13389 #endif
13390                     {
13391                         obj = impCheckForNullPointer(obj);
13392
13393                         /* Create the data member node */
13394                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13395                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13396                         if (StructHasOverlappingFields(typeFlags))
13397                         {
13398                             op1->gtField.gtFldMayOverlap = true;
13399                         }
13400
13401 #ifdef FEATURE_READYTORUN_COMPILER
13402                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13403                         {
13404                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13405                         }
13406 #endif
13407
13408                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13409
13410                         if (fgAddrCouldBeNull(obj))
13411                         {
13412                             op1->gtFlags |= GTF_EXCEPT;
13413                         }
13414
13415                         // If gtFldObj is a BYREF then our target is a value class and
13416                         // it could point anywhere, example a boxed class static int
13417                         if (obj->gtType == TYP_BYREF)
13418                         {
13419                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13420                         }
13421
13422                         if (compIsForInlining() &&
13423                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13424                         {
13425                             impInlineInfo->thisDereferencedFirst = true;
13426                         }
13427                     }
13428                     break;
13429
13430                     case CORINFO_FIELD_STATIC_TLS:
13431 #ifdef _TARGET_X86_
13432                         // Legacy TLS access is implemented as intrinsic on x86 only
13433
13434                         /* Create the data member node */
13435                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13436                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13437
13438                         break;
13439 #else
13440                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13441
13442                         __fallthrough;
13443 #endif
13444
13445                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13446                     case CORINFO_FIELD_INSTANCE_HELPER:
13447                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13448                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13449                                                clsHnd, op2);
13450                         goto SPILL_APPEND;
13451
13452                     case CORINFO_FIELD_STATIC_ADDRESS:
13453                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13454                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13455                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13456 #if COR_JIT_EE_VERSION > 460
13457                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13458 #endif
13459                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13460                                                          lclTyp);
13461                         break;
13462
13463                     default:
13464                         assert(!"Unexpected fieldAccessor");
13465                 }
13466
13467                 // Create the member assignment, unless we have a struct.
13468                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13469                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13470
13471                 if (!deferStructAssign)
13472                 {
13473                     if (prefixFlags & PREFIX_VOLATILE)
13474                     {
13475                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13476                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13477                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13478                         op1->gtFlags |= GTF_IND_VOLATILE;
13479                     }
13480                     if (prefixFlags & PREFIX_UNALIGNED)
13481                     {
13482                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13483                         op1->gtFlags |= GTF_IND_UNALIGNED;
13484                     }
13485
13486                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13487                        trust
13488                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13489                        importation
13490                        and reads from the union as if it were a long during code generation. Though this can potentially
13491                        read garbage, one can get lucky to have this working correctly.
13492
13493                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13494                        /O2
13495                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13496                        on
13497                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13498                        correctly
13499                        always.
13500
13501                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13502                        V4.0.
13503                     */
13504                     CLANG_FORMAT_COMMENT_ANCHOR;
13505
13506 #ifdef _TARGET_X86_
13507                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13508                         varTypeIsLong(op1->TypeGet()))
13509                     {
13510                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13511                     }
13512 #endif
13513
13514 #ifdef _TARGET_64BIT_
13515                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13516                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13517                     {
13518                         op2->gtType = TYP_I_IMPL;
13519                     }
13520                     else
13521                     {
13522                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13523                         //
13524                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13525                         {
13526                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13527                         }
13528                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13529                         //
13530                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13531                         {
13532                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13533                         }
13534                     }
13535 #endif
13536
13537 #if !FEATURE_X87_DOUBLES
13538                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13539                     // We insert a cast to the dest 'op1' type
13540                     //
13541                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13542                         varTypeIsFloating(op2->gtType))
13543                     {
13544                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13545                     }
13546 #endif // !FEATURE_X87_DOUBLES
13547
13548                     op1 = gtNewAssignNode(op1, op2);
13549
13550                     /* Mark the expression as containing an assignment */
13551
13552                     op1->gtFlags |= GTF_ASG;
13553                 }
13554
13555                 /* Check if the class needs explicit initialization */
13556
13557                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13558                 {
13559                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13560                     if (compDonotInline())
13561                     {
13562                         return;
13563                     }
13564                     if (helperNode != nullptr)
13565                     {
13566                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13567                     }
13568                 }
13569
13570                 /* stfld can interfere with value classes (consider the sequence
13571                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13572                    spill all value class references from the stack. */
13573
13574                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13575                 {
13576                     assert(tiObj);
13577
13578                     if (impIsValueType(tiObj))
13579                     {
13580                         impSpillEvalStack();
13581                     }
13582                     else
13583                     {
13584                         impSpillValueClasses();
13585                     }
13586                 }
13587
13588                 /* Spill any refs to the same member from the stack */
13589
13590                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13591
13592                 /* stsfld also interferes with indirect accesses (for aliased
13593                    statics) and calls. But don't need to spill other statics
13594                    as we have explicitly spilled this particular static field. */
13595
13596                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13597
13598                 if (deferStructAssign)
13599                 {
13600                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13601                 }
13602             }
13603                 goto APPEND;
13604
13605             case CEE_NEWARR:
13606             {
13607
13608                 /* Get the class type index operand */
13609
13610                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13611
13612                 JITDUMP(" %08X", resolvedToken.token);
13613
13614                 if (!opts.IsReadyToRun())
13615                 {
13616                     // Need to restore array classes before creating array objects on the heap
13617                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13618                     if (op1 == nullptr)
13619                     { // compDonotInline()
13620                         return;
13621                     }
13622                 }
13623
13624                 if (tiVerificationNeeded)
13625                 {
13626                     // As per ECMA 'numElems' specified can be either int32 or native int.
13627                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13628
13629                     CORINFO_CLASS_HANDLE elemTypeHnd;
13630                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13631                     Verify(elemTypeHnd == nullptr ||
13632                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13633                            "array of byref-like type");
13634                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13635                 }
13636
13637                 accessAllowedResult =
13638                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13639                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13640
13641                 /* Form the arglist: array class handle, size */
13642                 op2 = impPopStack().val;
13643                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13644
13645 #ifdef FEATURE_READYTORUN_COMPILER
13646                 if (opts.IsReadyToRun())
13647                 {
13648                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13649                                                     gtNewArgList(op2));
13650                     usingReadyToRunHelper = (op1 != nullptr);
13651
13652                     if (!usingReadyToRunHelper)
13653                     {
13654                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13655                         // and the newarr call with a single call to a dynamic R2R cell that will:
13656                         //      1) Load the context
13657                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13658                         //      3) Allocate the new array
13659                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13660
13661                         // Need to restore array classes before creating array objects on the heap
13662                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13663                         if (op1 == nullptr)
13664                         { // compDonotInline()
13665                             return;
13666                         }
13667                     }
13668                 }
13669
13670                 if (!usingReadyToRunHelper)
13671 #endif
13672                 {
13673                     args = gtNewArgList(op1, op2);
13674
13675                     /* Create a call to 'new' */
13676
13677                     // Note that this only works for shared generic code because the same helper is used for all
13678                     // reference array types
13679                     op1 =
13680                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13681                 }
13682
13683                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13684
13685                 /* Remember that this basic block contains 'new' of an sd array */
13686
13687                 block->bbFlags |= BBF_HAS_NEWARRAY;
13688                 optMethodFlags |= OMF_HAS_NEWARRAY;
13689
13690                 /* Push the result of the call on the stack */
13691
13692                 impPushOnStack(op1, tiRetVal);
13693
13694                 callTyp = TYP_REF;
13695             }
13696             break;
13697
13698             case CEE_LOCALLOC:
13699                 assert(!compIsForInlining());
13700
13701                 if (tiVerificationNeeded)
13702                 {
13703                     Verify(false, "bad opcode");
13704                 }
13705
13706                 // We don't allow locallocs inside handlers
13707                 if (block->hasHndIndex())
13708                 {
13709                     BADCODE("Localloc can't be inside handler");
13710                 }
13711
13712                 /* The FP register may not be back to the original value at the end
13713                    of the method, even if the frame size is 0, as localloc may
13714                    have modified it. So we will HAVE to reset it */
13715
13716                 compLocallocUsed = true;
13717                 setNeedsGSSecurityCookie();
13718
13719                 // Get the size to allocate
13720
13721                 op2 = impPopStack().val;
13722                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13723
13724                 if (verCurrentState.esStackDepth != 0)
13725                 {
13726                     BADCODE("Localloc can only be used when the stack is empty");
13727                 }
13728
13729                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13730
13731                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13732
13733                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13734
13735                 impPushOnStack(op1, tiRetVal);
13736                 break;
13737
13738             case CEE_ISINST:
13739
13740                 /* Get the type token */
13741                 assertImp(sz == sizeof(unsigned));
13742
13743                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13744
13745                 JITDUMP(" %08X", resolvedToken.token);
13746
13747                 if (!opts.IsReadyToRun())
13748                 {
13749                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13750                     if (op2 == nullptr)
13751                     { // compDonotInline()
13752                         return;
13753                     }
13754                 }
13755
13756                 if (tiVerificationNeeded)
13757                 {
13758                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13759                     // Even if this is a value class, we know it is boxed.
13760                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13761                 }
13762                 accessAllowedResult =
13763                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13764                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13765
13766                 op1 = impPopStack().val;
13767
13768 #ifdef FEATURE_READYTORUN_COMPILER
13769                 if (opts.IsReadyToRun())
13770                 {
13771                     GenTreePtr opLookup =
13772                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13773                                                   gtNewArgList(op1));
13774                     usingReadyToRunHelper = (opLookup != nullptr);
13775                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13776
13777                     if (!usingReadyToRunHelper)
13778                     {
13779                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13780                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13781                         //      1) Load the context
13782                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13783                         //      3) Perform the 'is instance' check on the input object
13784                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13785
13786                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13787                         if (op2 == nullptr)
13788                         { // compDonotInline()
13789                             return;
13790                         }
13791                     }
13792                 }
13793
13794                 if (!usingReadyToRunHelper)
13795 #endif
13796                 {
13797                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13798                 }
13799                 if (compDonotInline())
13800                 {
13801                     return;
13802                 }
13803
13804                 impPushOnStack(op1, tiRetVal);
13805
13806                 break;
13807
13808             case CEE_REFANYVAL:
13809
13810                 // get the class handle and make a ICON node out of it
13811
13812                 _impResolveToken(CORINFO_TOKENKIND_Class);
13813
13814                 JITDUMP(" %08X", resolvedToken.token);
13815
13816                 op2 = impTokenToHandle(&resolvedToken);
13817                 if (op2 == nullptr)
13818                 { // compDonotInline()
13819                     return;
13820                 }
13821
13822                 if (tiVerificationNeeded)
13823                 {
13824                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13825                            "need refany");
13826                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13827                 }
13828
13829                 op1 = impPopStack().val;
13830                 // make certain it is normalized;
13831                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13832
13833                 // Call helper GETREFANY(classHandle, op1);
13834                 args = gtNewArgList(op2, op1);
13835                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13836
13837                 impPushOnStack(op1, tiRetVal);
13838                 break;
13839
13840             case CEE_REFANYTYPE:
13841
13842                 if (tiVerificationNeeded)
13843                 {
13844                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13845                            "need refany");
13846                 }
13847
13848                 op1 = impPopStack().val;
13849
13850                 // make certain it is normalized;
13851                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13852
13853                 if (op1->gtOper == GT_OBJ)
13854                 {
13855                     // Get the address of the refany
13856                     op1 = op1->gtOp.gtOp1;
13857
13858                     // Fetch the type from the correct slot
13859                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13860                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13861                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13862                 }
13863                 else
13864                 {
13865                     assertImp(op1->gtOper == GT_MKREFANY);
13866
13867                     // The pointer may have side-effects
13868                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13869                     {
13870                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13871 #ifdef DEBUG
13872                         impNoteLastILoffs();
13873 #endif
13874                     }
13875
13876                     // We already have the class handle
13877                     op1 = op1->gtOp.gtOp2;
13878                 }
13879
13880                 // convert native TypeHandle to RuntimeTypeHandle
13881                 {
13882                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13883
13884                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13885                                               helperArgs);
13886
13887                     // The handle struct is returned in register
13888                     op1->gtCall.gtReturnType = TYP_REF;
13889
13890                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13891                 }
13892
13893                 impPushOnStack(op1, tiRetVal);
13894                 break;
13895
13896             case CEE_LDTOKEN:
13897             {
13898                 /* Get the Class index */
13899                 assertImp(sz == sizeof(unsigned));
13900                 lastLoadToken = codeAddr;
13901                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13902
13903                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13904
13905                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13906                 if (op1 == nullptr)
13907                 { // compDonotInline()
13908                     return;
13909                 }
13910
13911                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13912                 assert(resolvedToken.hClass != nullptr);
13913
13914                 if (resolvedToken.hMethod != nullptr)
13915                 {
13916                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13917                 }
13918                 else if (resolvedToken.hField != nullptr)
13919                 {
13920                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13921                 }
13922
13923                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13924
13925                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13926
13927                 // The handle struct is returned in register
13928                 op1->gtCall.gtReturnType = TYP_REF;
13929
13930                 tiRetVal = verMakeTypeInfo(tokenType);
13931                 impPushOnStack(op1, tiRetVal);
13932             }
13933             break;
13934
13935             case CEE_UNBOX:
13936             case CEE_UNBOX_ANY:
13937             {
13938                 /* Get the Class index */
13939                 assertImp(sz == sizeof(unsigned));
13940
13941                 _impResolveToken(CORINFO_TOKENKIND_Class);
13942
13943                 JITDUMP(" %08X", resolvedToken.token);
13944
13945                 BOOL runtimeLookup;
13946                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13947                 if (op2 == nullptr)
13948                 { // compDonotInline()
13949                     return;
13950                 }
13951
13952                 // Run this always so we can get access exceptions even with SkipVerification.
13953                 accessAllowedResult =
13954                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13955                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13956
13957                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13958                 {
13959                     if (tiVerificationNeeded)
13960                     {
13961                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13962                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13963                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13964                         tiRetVal.NormaliseForStack();
13965                     }
13966                     op1 = impPopStack().val;
13967                     goto CASTCLASS;
13968                 }
13969
13970                 /* Pop the object and create the unbox helper call */
13971                 /* You might think that for UNBOX_ANY we need to push a different */
13972                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13973                 /* for the intermediate pointer which we then transfer onto the OBJ */
13974                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13975                 if (tiVerificationNeeded)
13976                 {
13977                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13978                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13979
13980                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13981                     Verify(tiRetVal.IsValueClass(), "not value class");
13982                     tiRetVal.MakeByRef();
13983
13984                     // We always come from an objref, so this is safe byref
13985                     tiRetVal.SetIsPermanentHomeByRef();
13986                     tiRetVal.SetIsReadonlyByRef();
13987                 }
13988
13989                 op1 = impPopStack().val;
13990                 assertImp(op1->gtType == TYP_REF);
13991
13992                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13993                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13994
13995                 // We only want to expand inline the normal UNBOX helper;
13996                 expandInline = (helper == CORINFO_HELP_UNBOX);
13997
13998                 if (expandInline)
13999                 {
14000                     if (compCurBB->isRunRarely())
14001                     {
14002                         expandInline = false; // not worth the code expansion
14003                     }
14004                 }
14005
14006                 if (expandInline)
14007                 {
14008                     // we are doing normal unboxing
14009                     // inline the common case of the unbox helper
14010                     // UNBOX(exp) morphs into
14011                     // clone = pop(exp);
14012                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14013                     // push(clone + sizeof(void*))
14014                     //
14015                     GenTreePtr cloneOperand;
14016                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14017                                        nullptr DEBUGARG("inline UNBOX clone1"));
14018                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14019
14020                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14021
14022                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14023                                        nullptr DEBUGARG("inline UNBOX clone2"));
14024                     op2 = impTokenToHandle(&resolvedToken);
14025                     if (op2 == nullptr)
14026                     { // compDonotInline()
14027                         return;
14028                     }
14029                     args = gtNewArgList(op2, op1);
14030                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14031
14032                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14033                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14034                     condBox->gtFlags |= GTF_RELOP_QMARK;
14035
14036                     // QMARK nodes cannot reside on the evaluation stack. Because there
14037                     // may be other trees on the evaluation stack that side-effect the
14038                     // sources of the UNBOX operation we must spill the stack.
14039
14040                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14041
14042                     // Create the address-expression to reference past the object header
14043                     // to the beginning of the value-type. Today this means adjusting
14044                     // past the base of the objects vtable field which is pointer sized.
14045
14046                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14047                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14048                 }
14049                 else
14050                 {
14051                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14052
14053                     // Don't optimize, just call the helper and be done with it
14054                     args = gtNewArgList(op2, op1);
14055                     op1  = gtNewHelperCallNode(helper,
14056                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14057                                               callFlags, args);
14058                 }
14059
14060                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14061                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14062                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14063                        );
14064
14065                 /*
14066                   ----------------------------------------------------------------------
14067                   | \ helper  |                         |                              |
14068                   |   \       |                         |                              |
14069                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14070                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14071                   | opcode  \ |                         |                              |
14072                   |---------------------------------------------------------------------
14073                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14074                   |           |                         | push the BYREF to this local |
14075                   |---------------------------------------------------------------------
14076                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14077                   |           | the BYREF               | For Linux when the           |
14078                   |           |                         |  struct is returned in two   |
14079                   |           |                         |  registers create a temp     |
14080                   |           |                         |  which address is passed to  |
14081                   |           |                         |  the unbox_nullable helper.  |
14082                   |---------------------------------------------------------------------
14083                 */
14084
14085                 if (opcode == CEE_UNBOX)
14086                 {
14087                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14088                     {
14089                         // Unbox nullable helper returns a struct type.
14090                         // We need to spill it to a temp so than can take the address of it.
14091                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14092                         // further along and potetially be exploitable.
14093
14094                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14095                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14096
14097                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14098                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14099                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14100
14101                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14102                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14103                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14104                     }
14105
14106                     assert(op1->gtType == TYP_BYREF);
14107                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14108                 }
14109                 else
14110                 {
14111                     assert(opcode == CEE_UNBOX_ANY);
14112
14113                     if (helper == CORINFO_HELP_UNBOX)
14114                     {
14115                         // Normal unbox helper returns a TYP_BYREF.
14116                         impPushOnStack(op1, tiRetVal);
14117                         oper = GT_OBJ;
14118                         goto OBJ;
14119                     }
14120
14121                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14122
14123 #if FEATURE_MULTIREG_RET
14124
14125                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14126                     {
14127                         // Unbox nullable helper returns a TYP_STRUCT.
14128                         // For the multi-reg case we need to spill it to a temp so that
14129                         // we can pass the address to the unbox_nullable jit helper.
14130
14131                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14132                         lvaTable[tmp].lvIsMultiRegArg = true;
14133                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14134
14135                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14136                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14137                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14138
14139                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14140                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14141                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14142
14143                         // In this case the return value of the unbox helper is TYP_BYREF.
14144                         // Make sure the right type is placed on the operand type stack.
14145                         impPushOnStack(op1, tiRetVal);
14146
14147                         // Load the struct.
14148                         oper = GT_OBJ;
14149
14150                         assert(op1->gtType == TYP_BYREF);
14151                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14152
14153                         goto OBJ;
14154                     }
14155                     else
14156
14157 #endif // !FEATURE_MULTIREG_RET
14158
14159                     {
14160                         // If non register passable struct we have it materialized in the RetBuf.
14161                         assert(op1->gtType == TYP_STRUCT);
14162                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14163                         assert(tiRetVal.IsValueClass());
14164                     }
14165                 }
14166
14167                 impPushOnStack(op1, tiRetVal);
14168             }
14169             break;
14170
14171             case CEE_BOX:
14172             {
14173                 /* Get the Class index */
14174                 assertImp(sz == sizeof(unsigned));
14175
14176                 _impResolveToken(CORINFO_TOKENKIND_Box);
14177
14178                 JITDUMP(" %08X", resolvedToken.token);
14179
14180                 if (tiVerificationNeeded)
14181                 {
14182                     typeInfo tiActual = impStackTop().seTypeInfo;
14183                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14184
14185                     Verify(verIsBoxable(tiBox), "boxable type expected");
14186
14187                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14188                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14189                            "boxed type has unsatisfied class constraints");
14190
14191                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14192
14193                     // Observation: the following code introduces a boxed value class on the stack, but,
14194                     // according to the ECMA spec, one would simply expect: tiRetVal =
14195                     // typeInfo(TI_REF,impGetObjectClass());
14196
14197                     // Push the result back on the stack,
14198                     // even if clsHnd is a value class we want the TI_REF
14199                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14200                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14201                 }
14202
14203                 accessAllowedResult =
14204                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14205                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14206
14207                 // Note BOX can be used on things that are not value classes, in which
14208                 // case we get a NOP.  However the verifier's view of the type on the
14209                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14210                 if (!eeIsValueClass(resolvedToken.hClass))
14211                 {
14212                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14213                     break;
14214                 }
14215
14216                 // Look ahead for unbox.any
14217                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14218                 {
14219                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14220                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14221                     {
14222                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14223
14224                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14225
14226                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14227                         {
14228                             // Skip the next unbox.any instruction
14229                             sz += sizeof(mdToken) + 1;
14230                             break;
14231                         }
14232                     }
14233                 }
14234
14235                 impImportAndPushBox(&resolvedToken);
14236                 if (compDonotInline())
14237                 {
14238                     return;
14239                 }
14240             }
14241             break;
14242
14243             case CEE_SIZEOF:
14244
14245                 /* Get the Class index */
14246                 assertImp(sz == sizeof(unsigned));
14247
14248                 _impResolveToken(CORINFO_TOKENKIND_Class);
14249
14250                 JITDUMP(" %08X", resolvedToken.token);
14251
14252                 if (tiVerificationNeeded)
14253                 {
14254                     tiRetVal = typeInfo(TI_INT);
14255                 }
14256
14257                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14258                 impPushOnStack(op1, tiRetVal);
14259                 break;
14260
14261             case CEE_CASTCLASS:
14262
14263                 /* Get the Class index */
14264
14265                 assertImp(sz == sizeof(unsigned));
14266
14267                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14268
14269                 JITDUMP(" %08X", resolvedToken.token);
14270
14271                 if (!opts.IsReadyToRun())
14272                 {
14273                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14274                     if (op2 == nullptr)
14275                     { // compDonotInline()
14276                         return;
14277                     }
14278                 }
14279
14280                 if (tiVerificationNeeded)
14281                 {
14282                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14283                     // box it
14284                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14285                 }
14286
14287                 accessAllowedResult =
14288                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14289                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14290
14291                 op1 = impPopStack().val;
14292
14293             /* Pop the address and create the 'checked cast' helper call */
14294
14295             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14296             // and op2 to contain code that creates the type handle corresponding to typeRef
14297             CASTCLASS:
14298
14299 #ifdef FEATURE_READYTORUN_COMPILER
14300                 if (opts.IsReadyToRun())
14301                 {
14302                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14303                                                                     TYP_REF, gtNewArgList(op1));
14304                     usingReadyToRunHelper = (opLookup != nullptr);
14305                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14306
14307                     if (!usingReadyToRunHelper)
14308                     {
14309                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14310                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14311                         //      1) Load the context
14312                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14313                         //      3) Check the object on the stack for the type-cast
14314                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14315
14316                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14317                         if (op2 == nullptr)
14318                         { // compDonotInline()
14319                             return;
14320                         }
14321                     }
14322                 }
14323
14324                 if (!usingReadyToRunHelper)
14325 #endif
14326                 {
14327                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14328                 }
14329                 if (compDonotInline())
14330                 {
14331                     return;
14332                 }
14333
14334                 /* Push the result back on the stack */
14335                 impPushOnStack(op1, tiRetVal);
14336                 break;
14337
14338             case CEE_THROW:
14339
14340                 if (compIsForInlining())
14341                 {
14342                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14343                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14344                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14345
14346                     /* Do we have just the exception on the stack ?*/
14347
14348                     if (verCurrentState.esStackDepth != 1)
14349                     {
14350                         /* if not, just don't inline the method */
14351
14352                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14353                         return;
14354                     }
14355                 }
14356
14357                 if (tiVerificationNeeded)
14358                 {
14359                     tiRetVal = impStackTop().seTypeInfo;
14360                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14361                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14362                     {
14363                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14364                     }
14365                 }
14366
14367                 block->bbSetRunRarely(); // any block with a throw is rare
14368                 /* Pop the exception object and create the 'throw' helper call */
14369
14370                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14371
14372             EVAL_APPEND:
14373                 if (verCurrentState.esStackDepth > 0)
14374                 {
14375                     impEvalSideEffects();
14376                 }
14377
14378                 assert(verCurrentState.esStackDepth == 0);
14379
14380                 goto APPEND;
14381
14382             case CEE_RETHROW:
14383
14384                 assert(!compIsForInlining());
14385
14386                 if (info.compXcptnsCount == 0)
14387                 {
14388                     BADCODE("rethrow outside catch");
14389                 }
14390
14391                 if (tiVerificationNeeded)
14392                 {
14393                     Verify(block->hasHndIndex(), "rethrow outside catch");
14394                     if (block->hasHndIndex())
14395                     {
14396                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14397                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14398                         if (HBtab->HasFilter())
14399                         {
14400                             // we better be in the handler clause part, not the filter part
14401                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14402                                    "rethrow in filter");
14403                         }
14404                     }
14405                 }
14406
14407                 /* Create the 'rethrow' helper call */
14408
14409                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14410
14411                 goto EVAL_APPEND;
14412
14413             case CEE_INITOBJ:
14414
14415                 assertImp(sz == sizeof(unsigned));
14416
14417                 _impResolveToken(CORINFO_TOKENKIND_Class);
14418
14419                 JITDUMP(" %08X", resolvedToken.token);
14420
14421                 if (tiVerificationNeeded)
14422                 {
14423                     typeInfo tiTo    = impStackTop().seTypeInfo;
14424                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14425
14426                     Verify(tiTo.IsByRef(), "byref expected");
14427                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14428
14429                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14430                            "type operand incompatible with type of address");
14431                 }
14432
14433                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14434                 op2  = gtNewIconNode(0);                                     // Value
14435                 op1  = impPopStack().val;                                    // Dest
14436                 op1  = gtNewBlockVal(op1, size);
14437                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14438                 goto SPILL_APPEND;
14439
14440             case CEE_INITBLK:
14441
14442                 if (tiVerificationNeeded)
14443                 {
14444                     Verify(false, "bad opcode");
14445                 }
14446
14447                 op3 = impPopStack().val; // Size
14448                 op2 = impPopStack().val; // Value
14449                 op1 = impPopStack().val; // Dest
14450
14451                 if (op3->IsCnsIntOrI())
14452                 {
14453                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14454                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14455                 }
14456                 else
14457                 {
14458                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14459                     size = 0;
14460                 }
14461                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14462
14463                 goto SPILL_APPEND;
14464
14465             case CEE_CPBLK:
14466
14467                 if (tiVerificationNeeded)
14468                 {
14469                     Verify(false, "bad opcode");
14470                 }
14471                 op3 = impPopStack().val; // Size
14472                 op2 = impPopStack().val; // Src
14473                 op1 = impPopStack().val; // Dest
14474
14475                 if (op3->IsCnsIntOrI())
14476                 {
14477                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14478                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14479                 }
14480                 else
14481                 {
14482                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14483                     size = 0;
14484                 }
14485                 if (op2->OperGet() == GT_ADDR)
14486                 {
14487                     op2 = op2->gtOp.gtOp1;
14488                 }
14489                 else
14490                 {
14491                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14492                 }
14493
14494                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14495                 goto SPILL_APPEND;
14496
14497             case CEE_CPOBJ:
14498
14499                 assertImp(sz == sizeof(unsigned));
14500
14501                 _impResolveToken(CORINFO_TOKENKIND_Class);
14502
14503                 JITDUMP(" %08X", resolvedToken.token);
14504
14505                 if (tiVerificationNeeded)
14506                 {
14507                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14508                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14509                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14510
14511                     Verify(tiFrom.IsByRef(), "expected byref source");
14512                     Verify(tiTo.IsByRef(), "expected byref destination");
14513
14514                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14515                            "type of source address incompatible with type operand");
14516                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14517                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14518                            "type operand incompatible with type of destination address");
14519                 }
14520
14521                 if (!eeIsValueClass(resolvedToken.hClass))
14522                 {
14523                     op1 = impPopStack().val; // address to load from
14524
14525                     impBashVarAddrsToI(op1);
14526
14527                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14528
14529                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14530                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14531
14532                     impPushOnStackNoType(op1);
14533                     opcode = CEE_STIND_REF;
14534                     lclTyp = TYP_REF;
14535                     goto STIND_POST_VERIFY;
14536                 }
14537
14538                 op2 = impPopStack().val; // Src
14539                 op1 = impPopStack().val; // Dest
14540                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14541                 goto SPILL_APPEND;
14542
14543             case CEE_STOBJ:
14544             {
14545                 assertImp(sz == sizeof(unsigned));
14546
14547                 _impResolveToken(CORINFO_TOKENKIND_Class);
14548
14549                 JITDUMP(" %08X", resolvedToken.token);
14550
14551                 if (eeIsValueClass(resolvedToken.hClass))
14552                 {
14553                     lclTyp = TYP_STRUCT;
14554                 }
14555                 else
14556                 {
14557                     lclTyp = TYP_REF;
14558                 }
14559
14560                 if (tiVerificationNeeded)
14561                 {
14562
14563                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14564
14565                     // Make sure we have a good looking byref
14566                     Verify(tiPtr.IsByRef(), "pointer not byref");
14567                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14568                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14569                     {
14570                         compUnsafeCastUsed = true;
14571                     }
14572
14573                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14574                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14575
14576                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14577                     {
14578                         Verify(false, "type of value incompatible with type operand");
14579                         compUnsafeCastUsed = true;
14580                     }
14581
14582                     if (!tiCompatibleWith(argVal, ptrVal, false))
14583                     {
14584                         Verify(false, "type operand incompatible with type of address");
14585                         compUnsafeCastUsed = true;
14586                     }
14587                 }
14588                 else
14589                 {
14590                     compUnsafeCastUsed = true;
14591                 }
14592
14593                 if (lclTyp == TYP_REF)
14594                 {
14595                     opcode = CEE_STIND_REF;
14596                     goto STIND_POST_VERIFY;
14597                 }
14598
14599                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14600                 if (impIsPrimitive(jitTyp))
14601                 {
14602                     lclTyp = JITtype2varType(jitTyp);
14603                     goto STIND_POST_VERIFY;
14604                 }
14605
14606                 op2 = impPopStack().val; // Value
14607                 op1 = impPopStack().val; // Ptr
14608
14609                 assertImp(varTypeIsStruct(op2));
14610
14611                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14612                 goto SPILL_APPEND;
14613             }
14614
14615             case CEE_MKREFANY:
14616
14617                 assert(!compIsForInlining());
14618
14619                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14620                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14621
14622                 JITDUMP("disabling struct promotion because of mkrefany\n");
14623                 fgNoStructPromotion = true;
14624
14625                 oper = GT_MKREFANY;
14626                 assertImp(sz == sizeof(unsigned));
14627
14628                 _impResolveToken(CORINFO_TOKENKIND_Class);
14629
14630                 JITDUMP(" %08X", resolvedToken.token);
14631
14632                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14633                 if (op2 == nullptr)
14634                 { // compDonotInline()
14635                     return;
14636                 }
14637
14638                 if (tiVerificationNeeded)
14639                 {
14640                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14641                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14642
14643                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14644                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14645                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14646                 }
14647
14648                 accessAllowedResult =
14649                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14650                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14651
14652                 op1 = impPopStack().val;
14653
14654                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14655                 // But JIT32 allowed it, so we continue to allow it.
14656                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14657
14658                 // MKREFANY returns a struct.  op2 is the class token.
14659                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14660
14661                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14662                 break;
14663
14664             case CEE_LDOBJ:
14665             {
14666                 oper = GT_OBJ;
14667                 assertImp(sz == sizeof(unsigned));
14668
14669                 _impResolveToken(CORINFO_TOKENKIND_Class);
14670
14671                 JITDUMP(" %08X", resolvedToken.token);
14672
14673             OBJ:
14674
14675                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14676
14677                 if (tiVerificationNeeded)
14678                 {
14679                     typeInfo tiPtr = impStackTop().seTypeInfo;
14680
14681                     // Make sure we have a byref
14682                     if (!tiPtr.IsByRef())
14683                     {
14684                         Verify(false, "pointer not byref");
14685                         compUnsafeCastUsed = true;
14686                     }
14687                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14688
14689                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14690                     {
14691                         Verify(false, "type of address incompatible with type operand");
14692                         compUnsafeCastUsed = true;
14693                     }
14694                     tiRetVal.NormaliseForStack();
14695                 }
14696                 else
14697                 {
14698                     compUnsafeCastUsed = true;
14699                 }
14700
14701                 if (eeIsValueClass(resolvedToken.hClass))
14702                 {
14703                     lclTyp = TYP_STRUCT;
14704                 }
14705                 else
14706                 {
14707                     lclTyp = TYP_REF;
14708                     opcode = CEE_LDIND_REF;
14709                     goto LDIND_POST_VERIFY;
14710                 }
14711
14712                 op1 = impPopStack().val;
14713
14714                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14715
14716                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14717                 if (impIsPrimitive(jitTyp))
14718                 {
14719                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14720
14721                     // Could point anywhere, example a boxed class static int
14722                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14723                     assertImp(varTypeIsArithmetic(op1->gtType));
14724                 }
14725                 else
14726                 {
14727                     // OBJ returns a struct
14728                     // and an inline argument which is the class token of the loaded obj
14729                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14730                 }
14731                 op1->gtFlags |= GTF_EXCEPT;
14732
14733                 impPushOnStack(op1, tiRetVal);
14734                 break;
14735             }
14736
14737             case CEE_LDLEN:
14738                 if (tiVerificationNeeded)
14739                 {
14740                     typeInfo tiArray = impStackTop().seTypeInfo;
14741                     Verify(verIsSDArray(tiArray), "bad array");
14742                     tiRetVal = typeInfo(TI_INT);
14743                 }
14744
14745                 op1 = impPopStack().val;
14746                 if (!opts.MinOpts() && !opts.compDbgCode)
14747                 {
14748                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14749                     GenTreeArrLen* arrLen =
14750                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14751
14752                     /* Mark the block as containing a length expression */
14753
14754                     if (op1->gtOper == GT_LCL_VAR)
14755                     {
14756                         block->bbFlags |= BBF_HAS_IDX_LEN;
14757                     }
14758
14759                     op1 = arrLen;
14760                 }
14761                 else
14762                 {
14763                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14764                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14765                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14766                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14767                     op1->gtFlags |= GTF_IND_ARR_LEN;
14768                 }
14769
14770                 /* An indirection will cause a GPF if the address is null */
14771                 op1->gtFlags |= GTF_EXCEPT;
14772
14773                 /* Push the result back on the stack */
14774                 impPushOnStack(op1, tiRetVal);
14775                 break;
14776
14777             case CEE_BREAK:
14778                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14779                 goto SPILL_APPEND;
14780
14781             case CEE_NOP:
14782                 if (opts.compDbgCode)
14783                 {
14784                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14785                     goto SPILL_APPEND;
14786                 }
14787                 break;
14788
14789             /******************************** NYI *******************************/
14790
14791             case 0xCC:
14792                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14793
14794             case CEE_ILLEGAL:
14795             case CEE_MACRO_END:
14796
14797             default:
14798                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14799         }
14800
14801         codeAddr += sz;
14802         prevOpcode = opcode;
14803
14804         prefixFlags = 0;
14805         assert(!insertLdloc || opcode == CEE_DUP);
14806     }
14807
14808     assert(!insertLdloc);
14809
14810     return;
14811 #undef _impResolveToken
14812 }
14813 #ifdef _PREFAST_
14814 #pragma warning(pop)
14815 #endif
14816
14817 // Push a local/argument treeon the operand stack
14818 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14819 {
14820     tiRetVal.NormaliseForStack();
14821
14822     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14823     {
14824         tiRetVal.SetUninitialisedObjRef();
14825     }
14826
14827     impPushOnStack(op, tiRetVal);
14828 }
14829
14830 // Load a local/argument on the operand stack
14831 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14832 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14833 {
14834     var_types lclTyp;
14835
14836     if (lvaTable[lclNum].lvNormalizeOnLoad())
14837     {
14838         lclTyp = lvaGetRealType(lclNum);
14839     }
14840     else
14841     {
14842         lclTyp = lvaGetActualType(lclNum);
14843     }
14844
14845     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14846 }
14847
14848 // Load an argument on the operand stack
14849 // Shared by the various CEE_LDARG opcodes
14850 // ilArgNum is the argument index as specified in IL.
14851 // It will be mapped to the correct lvaTable index
14852 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14853 {
14854     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14855
14856     if (compIsForInlining())
14857     {
14858         if (ilArgNum >= info.compArgsCount)
14859         {
14860             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14861             return;
14862         }
14863
14864         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14865                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14866     }
14867     else
14868     {
14869         if (ilArgNum >= info.compArgsCount)
14870         {
14871             BADCODE("Bad IL");
14872         }
14873
14874         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14875
14876         if (lclNum == info.compThisArg)
14877         {
14878             lclNum = lvaArg0Var;
14879         }
14880
14881         impLoadVar(lclNum, offset);
14882     }
14883 }
14884
14885 // Load a local on the operand stack
14886 // Shared by the various CEE_LDLOC opcodes
14887 // ilLclNum is the local index as specified in IL.
14888 // It will be mapped to the correct lvaTable index
14889 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14890 {
14891     if (tiVerificationNeeded)
14892     {
14893         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14894         Verify(info.compInitMem, "initLocals not set");
14895     }
14896
14897     if (compIsForInlining())
14898     {
14899         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14900         {
14901             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14902             return;
14903         }
14904
14905         // Get the local type
14906         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14907
14908         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14909
14910         /* Have we allocated a temp for this local? */
14911
14912         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14913
14914         // All vars of inlined methods should be !lvNormalizeOnLoad()
14915
14916         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14917         lclTyp = genActualType(lclTyp);
14918
14919         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14920     }
14921     else
14922     {
14923         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14924         {
14925             BADCODE("Bad IL");
14926         }
14927
14928         unsigned lclNum = info.compArgsCount + ilLclNum;
14929
14930         impLoadVar(lclNum, offset);
14931     }
14932 }
14933
14934 #ifdef _TARGET_ARM_
14935 /**************************************************************************************
14936  *
14937  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14938  *  dst struct, because struct promotion will turn it into a float/double variable while
14939  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14940  *  a float, but there is nothing that might prevent us from doing so. The tree however
14941  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14942  *
14943  *  tmpNum - the lcl dst variable num that is a struct.
14944  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14945  *  hClass - the type handle for the struct variable.
14946  *
14947  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14948  *        however, we could do a codegen of transferring from int to float registers
14949  *        (transfer, not a cast.)
14950  *
14951  */
14952 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14953 {
14954     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14955     {
14956         int       hfaSlots = GetHfaCount(hClass);
14957         var_types hfaType  = GetHfaType(hClass);
14958
14959         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14960         // type: struct/float at importer because the ABI calls out return in integer registers.
14961         // We don't want struct promotion to replace an expression like this:
14962         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14963         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14964         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14965             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14966         {
14967             // Make sure this struct type stays as struct so we can receive the call in a struct.
14968             lvaTable[tmpNum].lvIsMultiRegRet = true;
14969         }
14970     }
14971 }
14972 #endif // _TARGET_ARM_
14973
14974 #if FEATURE_MULTIREG_RET
14975 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14976 {
14977     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14978     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14979     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14980
14981     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14982     ret->gtFlags |= GTF_DONT_CSE;
14983
14984     assert(IsMultiRegReturnedType(hClass));
14985
14986     // Mark the var so that fields are not promoted and stay together.
14987     lvaTable[tmpNum].lvIsMultiRegRet = true;
14988
14989     return ret;
14990 }
14991 #endif // FEATURE_MULTIREG_RET
14992
14993 // do import for a return
14994 // returns false if inlining was aborted
14995 // opcode can be ret or call in the case of a tail.call
14996 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14997 {
14998     if (tiVerificationNeeded)
14999     {
15000         verVerifyThisPtrInitialised();
15001
15002         unsigned expectedStack = 0;
15003         if (info.compRetType != TYP_VOID)
15004         {
15005             typeInfo tiVal = impStackTop().seTypeInfo;
15006             typeInfo tiDeclared =
15007                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15008
15009             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15010
15011             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15012             expectedStack = 1;
15013         }
15014         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15015     }
15016
15017     GenTree*             op2       = nullptr;
15018     GenTree*             op1       = nullptr;
15019     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15020
15021     if (info.compRetType != TYP_VOID)
15022     {
15023         StackEntry se = impPopStack(retClsHnd);
15024         op2           = se.val;
15025
15026         if (!compIsForInlining())
15027         {
15028             impBashVarAddrsToI(op2);
15029             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15030             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15031             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15032                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15033                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15034                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15035                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15036
15037 #ifdef DEBUG
15038             if (opts.compGcChecks && info.compRetType == TYP_REF)
15039             {
15040                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15041                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15042                 // one-return BB.
15043
15044                 assert(op2->gtType == TYP_REF);
15045
15046                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15047                 GenTreeArgList* args = gtNewArgList(op2);
15048                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15049
15050                 if (verbose)
15051                 {
15052                     printf("\ncompGcChecks tree:\n");
15053                     gtDispTree(op2);
15054                 }
15055             }
15056 #endif
15057         }
15058         else
15059         {
15060             // inlinee's stack should be empty now.
15061             assert(verCurrentState.esStackDepth == 0);
15062
15063 #ifdef DEBUG
15064             if (verbose)
15065             {
15066                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15067                 gtDispTree(op2);
15068             }
15069 #endif
15070
15071             // Make sure the type matches the original call.
15072
15073             var_types returnType       = genActualType(op2->gtType);
15074             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15075             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15076             {
15077                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15078             }
15079
15080             if (returnType != originalCallType)
15081             {
15082                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15083                 return false;
15084             }
15085
15086             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15087             // expression. At this point, retExpr could already be set if there are multiple
15088             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15089             // the other blocks already set it. If there is only a single return block,
15090             // retExpr shouldn't be set. However, this is not true if we reimport a block
15091             // with a return. In that case, retExpr will be set, then the block will be
15092             // reimported, but retExpr won't get cleared as part of setting the block to
15093             // be reimported. The reimported retExpr value should be the same, so even if
15094             // we don't unconditionally overwrite it, it shouldn't matter.
15095             if (info.compRetNativeType != TYP_STRUCT)
15096             {
15097                 // compRetNativeType is not TYP_STRUCT.
15098                 // This implies it could be either a scalar type or SIMD vector type or
15099                 // a struct type that can be normalized to a scalar type.
15100
15101                 if (varTypeIsStruct(info.compRetType))
15102                 {
15103                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15104                     // adjust the type away from struct to integral
15105                     // and no normalizing
15106                     op2 = impFixupStructReturnType(op2, retClsHnd);
15107                 }
15108                 else
15109                 {
15110                     // Do we have to normalize?
15111                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15112                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15113                         fgCastNeeded(op2, fncRealRetType))
15114                     {
15115                         // Small-typed return values are normalized by the callee
15116                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15117                     }
15118                 }
15119
15120                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15121                 {
15122                     assert(info.compRetNativeType != TYP_VOID &&
15123                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15124
15125                     // This is a bit of a workaround...
15126                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15127                     // not a struct (for example, the struct is composed of exactly one int, and the native
15128                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15129                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15130                     // to the *native* return type), and at least one of the return blocks is the result of
15131                     // a call, then we have a problem. The situation is like this (from a failed test case):
15132                     //
15133                     // inliner:
15134                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15135                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15136                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15137                     //
15138                     // inlinee:
15139                     //      ...
15140                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15141                     //      ret
15142                     //      ...
15143                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15144                     //      object&, class System.Func`1<!!0>)
15145                     //      ret
15146                     //
15147                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15148                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15149                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15150                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15151                     //
15152                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15153                     // native return type, which is what it will be set to eventually. We generate the
15154                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15155                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15156
15157                     bool restoreType = false;
15158                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15159                     {
15160                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15161                         op2->gtType = info.compRetNativeType;
15162                         restoreType = true;
15163                     }
15164
15165                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15166                                      (unsigned)CHECK_SPILL_ALL);
15167
15168                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15169
15170                     if (restoreType)
15171                     {
15172                         op2->gtType = TYP_STRUCT; // restore it to what it was
15173                     }
15174
15175                     op2 = tmpOp2;
15176
15177 #ifdef DEBUG
15178                     if (impInlineInfo->retExpr)
15179                     {
15180                         // Some other block(s) have seen the CEE_RET first.
15181                         // Better they spilled to the same temp.
15182                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15183                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15184                     }
15185 #endif
15186                 }
15187
15188 #ifdef DEBUG
15189                 if (verbose)
15190                 {
15191                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15192                     gtDispTree(op2);
15193                 }
15194 #endif
15195
15196                 // Report the return expression
15197                 impInlineInfo->retExpr = op2;
15198             }
15199             else
15200             {
15201                 // compRetNativeType is TYP_STRUCT.
15202                 // This implies that struct return via RetBuf arg or multi-reg struct return
15203
15204                 GenTreePtr iciCall = impInlineInfo->iciCall;
15205                 assert(iciCall->gtOper == GT_CALL);
15206
15207                 // Assign the inlinee return into a spill temp.
15208                 // spill temp only exists if there are multiple return points
15209                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15210                 {
15211                     // in this case we have to insert multiple struct copies to the temp
15212                     // and the retexpr is just the temp.
15213                     assert(info.compRetNativeType != TYP_VOID);
15214                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15215
15216                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15217                                      (unsigned)CHECK_SPILL_ALL);
15218                 }
15219
15220 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15221 #if defined(_TARGET_ARM_)
15222                 // TODO-ARM64-NYI: HFA
15223                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15224                 // next ifdefs could be refactored in a single method with the ifdef inside.
15225                 if (IsHfa(retClsHnd))
15226                 {
15227 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15228 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15229                 ReturnTypeDesc retTypeDesc;
15230                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15231                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15232
15233                 if (retRegCount != 0)
15234                 {
15235                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15236                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15237                     // max allowed.)
15238                     assert(retRegCount == MAX_RET_REG_COUNT);
15239                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15240                     CLANG_FORMAT_COMMENT_ANCHOR;
15241 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15242
15243                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15244                     {
15245                         if (!impInlineInfo->retExpr)
15246                         {
15247 #if defined(_TARGET_ARM_)
15248                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15249 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15250                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15251                             impInlineInfo->retExpr =
15252                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15253 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15254                         }
15255                     }
15256                     else
15257                     {
15258                         impInlineInfo->retExpr = op2;
15259                     }
15260                 }
15261                 else
15262 #elif defined(_TARGET_ARM64_)
15263                 ReturnTypeDesc retTypeDesc;
15264                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15265                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15266
15267                 if (retRegCount != 0)
15268                 {
15269                     assert(!iciCall->AsCall()->HasRetBufArg());
15270                     assert(retRegCount >= 2);
15271                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15272                     {
15273                         if (!impInlineInfo->retExpr)
15274                         {
15275                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15276                             impInlineInfo->retExpr =
15277                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15278                         }
15279                     }
15280                     else
15281                     {
15282                         impInlineInfo->retExpr = op2;
15283                     }
15284                 }
15285                 else
15286 #endif // defined(_TARGET_ARM64_)
15287                 {
15288                     assert(iciCall->AsCall()->HasRetBufArg());
15289                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15290                     // spill temp only exists if there are multiple return points
15291                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15292                     {
15293                         // if this is the first return we have seen set the retExpr
15294                         if (!impInlineInfo->retExpr)
15295                         {
15296                             impInlineInfo->retExpr =
15297                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15298                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15299                         }
15300                     }
15301                     else
15302                     {
15303                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15304                     }
15305                 }
15306             }
15307         }
15308     }
15309
15310     if (compIsForInlining())
15311     {
15312         return true;
15313     }
15314
15315     if (info.compRetType == TYP_VOID)
15316     {
15317         // return void
15318         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15319     }
15320     else if (info.compRetBuffArg != BAD_VAR_NUM)
15321     {
15322         // Assign value to return buff (first param)
15323         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15324
15325         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15326         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15327
15328         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15329         CLANG_FORMAT_COMMENT_ANCHOR;
15330
15331 #if defined(_TARGET_AMD64_)
15332
15333         // x64 (System V and Win64) calling convention requires to
15334         // return the implicit return buffer explicitly (in RAX).
15335         // Change the return type to be BYREF.
15336         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15337 #else  // !defined(_TARGET_AMD64_)
15338         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15339         // In such case the return value of the function is changed to BYREF.
15340         // If profiler hook is not needed the return type of the function is TYP_VOID.
15341         if (compIsProfilerHookNeeded())
15342         {
15343             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15344         }
15345         else
15346         {
15347             // return void
15348             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15349         }
15350 #endif // !defined(_TARGET_AMD64_)
15351     }
15352     else if (varTypeIsStruct(info.compRetType))
15353     {
15354 #if !FEATURE_MULTIREG_RET
15355         // For both ARM architectures the HFA native types are maintained as structs.
15356         // Also on System V AMD64 the multireg structs returns are also left as structs.
15357         noway_assert(info.compRetNativeType != TYP_STRUCT);
15358 #endif
15359         op2 = impFixupStructReturnType(op2, retClsHnd);
15360         // return op2
15361         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15362     }
15363     else
15364     {
15365         // return op2
15366         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15367     }
15368
15369     // We must have imported a tailcall and jumped to RET
15370     if (prefixFlags & PREFIX_TAILCALL)
15371     {
15372 #ifndef _TARGET_AMD64_
15373         // Jit64 compat:
15374         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15375         //      tail.call
15376         //      pop
15377         //      ret
15378         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15379 #endif
15380
15381         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15382
15383         // impImportCall() would have already appended TYP_VOID calls
15384         if (info.compRetType == TYP_VOID)
15385         {
15386             return true;
15387         }
15388     }
15389
15390     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15391 #ifdef DEBUG
15392     // Remember at which BC offset the tree was finished
15393     impNoteLastILoffs();
15394 #endif
15395     return true;
15396 }
15397
15398 /*****************************************************************************
15399  *  Mark the block as unimported.
15400  *  Note that the caller is responsible for calling impImportBlockPending(),
15401  *  with the appropriate stack-state
15402  */
15403
15404 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15405 {
15406 #ifdef DEBUG
15407     if (verbose && (block->bbFlags & BBF_IMPORTED))
15408     {
15409         printf("\nBB%02u will be reimported\n", block->bbNum);
15410     }
15411 #endif
15412
15413     block->bbFlags &= ~BBF_IMPORTED;
15414 }
15415
15416 /*****************************************************************************
15417  *  Mark the successors of the given block as unimported.
15418  *  Note that the caller is responsible for calling impImportBlockPending()
15419  *  for all the successors, with the appropriate stack-state.
15420  */
15421
15422 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15423 {
15424     for (unsigned i = 0; i < block->NumSucc(); i++)
15425     {
15426         impReimportMarkBlock(block->GetSucc(i));
15427     }
15428 }
15429
15430 /*****************************************************************************
15431  *
15432  *  Filter wrapper to handle only passed in exception code
15433  *  from it).
15434  */
15435
15436 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15437 {
15438     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15439     {
15440         return EXCEPTION_EXECUTE_HANDLER;
15441     }
15442
15443     return EXCEPTION_CONTINUE_SEARCH;
15444 }
15445
15446 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15447 {
15448     assert(block->hasTryIndex());
15449     assert(!compIsForInlining());
15450
15451     unsigned  tryIndex = block->getTryIndex();
15452     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15453
15454     if (isTryStart)
15455     {
15456         assert(block->bbFlags & BBF_TRY_BEG);
15457
15458         // The Stack must be empty
15459         //
15460         if (block->bbStkDepth != 0)
15461         {
15462             BADCODE("Evaluation stack must be empty on entry into a try block");
15463         }
15464     }
15465
15466     // Save the stack contents, we'll need to restore it later
15467     //
15468     SavedStack blockState;
15469     impSaveStackState(&blockState, false);
15470
15471     while (HBtab != nullptr)
15472     {
15473         if (isTryStart)
15474         {
15475             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15476             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15477             //
15478             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15479             {
15480                 // We  trigger an invalid program exception here unless we have a try/fault region.
15481                 //
15482                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15483                 {
15484                     BADCODE(
15485                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15486                 }
15487                 else
15488                 {
15489                     // Allow a try/fault region to proceed.
15490                     assert(HBtab->HasFaultHandler());
15491                 }
15492             }
15493
15494             /* Recursively process the handler block */
15495             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15496
15497             //  Construct the proper verification stack state
15498             //   either empty or one that contains just
15499             //   the Exception Object that we are dealing with
15500             //
15501             verCurrentState.esStackDepth = 0;
15502
15503             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15504             {
15505                 CORINFO_CLASS_HANDLE clsHnd;
15506
15507                 if (HBtab->HasFilter())
15508                 {
15509                     clsHnd = impGetObjectClass();
15510                 }
15511                 else
15512                 {
15513                     CORINFO_RESOLVED_TOKEN resolvedToken;
15514
15515                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15516                     resolvedToken.tokenScope   = info.compScopeHnd;
15517                     resolvedToken.token        = HBtab->ebdTyp;
15518                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15519                     info.compCompHnd->resolveToken(&resolvedToken);
15520
15521                     clsHnd = resolvedToken.hClass;
15522                 }
15523
15524                 // push catch arg the stack, spill to a temp if necessary
15525                 // Note: can update HBtab->ebdHndBeg!
15526                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15527             }
15528
15529             // Queue up the handler for importing
15530             //
15531             impImportBlockPending(hndBegBB);
15532
15533             if (HBtab->HasFilter())
15534             {
15535                 /* @VERIFICATION : Ideally the end of filter state should get
15536                    propagated to the catch handler, this is an incompleteness,
15537                    but is not a security/compliance issue, since the only
15538                    interesting state is the 'thisInit' state.
15539                    */
15540
15541                 verCurrentState.esStackDepth = 0;
15542
15543                 BasicBlock* filterBB = HBtab->ebdFilter;
15544
15545                 // push catch arg the stack, spill to a temp if necessary
15546                 // Note: can update HBtab->ebdFilter!
15547                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15548
15549                 impImportBlockPending(filterBB);
15550             }
15551         }
15552         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15553         {
15554             /* Recursively process the handler block */
15555
15556             verCurrentState.esStackDepth = 0;
15557
15558             // Queue up the fault handler for importing
15559             //
15560             impImportBlockPending(HBtab->ebdHndBeg);
15561         }
15562
15563         // Now process our enclosing try index (if any)
15564         //
15565         tryIndex = HBtab->ebdEnclosingTryIndex;
15566         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15567         {
15568             HBtab = nullptr;
15569         }
15570         else
15571         {
15572             HBtab = ehGetDsc(tryIndex);
15573         }
15574     }
15575
15576     // Restore the stack contents
15577     impRestoreStackState(&blockState);
15578 }
15579
15580 //***************************************************************
15581 // Import the instructions for the given basic block.  Perform
15582 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15583 // time, or whose verification pre-state is changed.
15584
15585 #ifdef _PREFAST_
15586 #pragma warning(push)
15587 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15588 #endif
15589 void Compiler::impImportBlock(BasicBlock* block)
15590 {
15591     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15592     // handle them specially. In particular, there is no IL to import for them, but we do need
15593     // to mark them as imported and put their successors on the pending import list.
15594     if (block->bbFlags & BBF_INTERNAL)
15595     {
15596         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15597         block->bbFlags |= BBF_IMPORTED;
15598
15599         for (unsigned i = 0; i < block->NumSucc(); i++)
15600         {
15601             impImportBlockPending(block->GetSucc(i));
15602         }
15603
15604         return;
15605     }
15606
15607     bool markImport;
15608
15609     assert(block);
15610
15611     /* Make the block globaly available */
15612
15613     compCurBB = block;
15614
15615 #ifdef DEBUG
15616     /* Initialize the debug variables */
15617     impCurOpcName = "unknown";
15618     impCurOpcOffs = block->bbCodeOffs;
15619 #endif
15620
15621     /* Set the current stack state to the merged result */
15622     verResetCurrentState(block, &verCurrentState);
15623
15624     /* Now walk the code and import the IL into GenTrees */
15625
15626     struct FilterVerificationExceptionsParam
15627     {
15628         Compiler*   pThis;
15629         BasicBlock* block;
15630     };
15631     FilterVerificationExceptionsParam param;
15632
15633     param.pThis = this;
15634     param.block = block;
15635
15636     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15637     {
15638         /* @VERIFICATION : For now, the only state propagation from try
15639            to it's handler is "thisInit" state (stack is empty at start of try).
15640            In general, for state that we track in verification, we need to
15641            model the possibility that an exception might happen at any IL
15642            instruction, so we really need to merge all states that obtain
15643            between IL instructions in a try block into the start states of
15644            all handlers.
15645
15646            However we do not allow the 'this' pointer to be uninitialized when
15647            entering most kinds try regions (only try/fault are allowed to have
15648            an uninitialized this pointer on entry to the try)
15649
15650            Fortunately, the stack is thrown away when an exception
15651            leads to a handler, so we don't have to worry about that.
15652            We DO, however, have to worry about the "thisInit" state.
15653            But only for the try/fault case.
15654
15655            The only allowed transition is from TIS_Uninit to TIS_Init.
15656
15657            So for a try/fault region for the fault handler block
15658            we will merge the start state of the try begin
15659            and the post-state of each block that is part of this try region
15660         */
15661
15662         // merge the start state of the try begin
15663         //
15664         if (pParam->block->bbFlags & BBF_TRY_BEG)
15665         {
15666             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15667         }
15668
15669         pParam->pThis->impImportBlockCode(pParam->block);
15670
15671         // As discussed above:
15672         // merge the post-state of each block that is part of this try region
15673         //
15674         if (pParam->block->hasTryIndex())
15675         {
15676             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15677         }
15678     }
15679     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15680     {
15681         verHandleVerificationFailure(block DEBUGARG(false));
15682     }
15683     PAL_ENDTRY
15684
15685     if (compDonotInline())
15686     {
15687         return;
15688     }
15689
15690     assert(!compDonotInline());
15691
15692     markImport = false;
15693
15694 SPILLSTACK:
15695
15696     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15697     bool        reimportSpillClique = false;
15698     BasicBlock* tgtBlock            = nullptr;
15699
15700     /* If the stack is non-empty, we might have to spill its contents */
15701
15702     if (verCurrentState.esStackDepth != 0)
15703     {
15704         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15705                                   // on the stack, its lifetime is hard to determine, simply
15706                                   // don't reuse such temps.
15707
15708         GenTreePtr addStmt = nullptr;
15709
15710         /* Do the successors of 'block' have any other predecessors ?
15711            We do not want to do some of the optimizations related to multiRef
15712            if we can reimport blocks */
15713
15714         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15715
15716         switch (block->bbJumpKind)
15717         {
15718             case BBJ_COND:
15719
15720                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15721
15722                 assert(impTreeLast);
15723                 assert(impTreeLast->gtOper == GT_STMT);
15724                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15725
15726                 addStmt     = impTreeLast;
15727                 impTreeLast = impTreeLast->gtPrev;
15728
15729                 /* Note if the next block has more than one ancestor */
15730
15731                 multRef |= block->bbNext->bbRefs;
15732
15733                 /* Does the next block have temps assigned? */
15734
15735                 baseTmp  = block->bbNext->bbStkTempsIn;
15736                 tgtBlock = block->bbNext;
15737
15738                 if (baseTmp != NO_BASE_TMP)
15739                 {
15740                     break;
15741                 }
15742
15743                 /* Try the target of the jump then */
15744
15745                 multRef |= block->bbJumpDest->bbRefs;
15746                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15747                 tgtBlock = block->bbJumpDest;
15748                 break;
15749
15750             case BBJ_ALWAYS:
15751                 multRef |= block->bbJumpDest->bbRefs;
15752                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15753                 tgtBlock = block->bbJumpDest;
15754                 break;
15755
15756             case BBJ_NONE:
15757                 multRef |= block->bbNext->bbRefs;
15758                 baseTmp  = block->bbNext->bbStkTempsIn;
15759                 tgtBlock = block->bbNext;
15760                 break;
15761
15762             case BBJ_SWITCH:
15763
15764                 BasicBlock** jmpTab;
15765                 unsigned     jmpCnt;
15766
15767                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15768
15769                 assert(impTreeLast);
15770                 assert(impTreeLast->gtOper == GT_STMT);
15771                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15772
15773                 addStmt     = impTreeLast;
15774                 impTreeLast = impTreeLast->gtPrev;
15775
15776                 jmpCnt = block->bbJumpSwt->bbsCount;
15777                 jmpTab = block->bbJumpSwt->bbsDstTab;
15778
15779                 do
15780                 {
15781                     tgtBlock = (*jmpTab);
15782
15783                     multRef |= tgtBlock->bbRefs;
15784
15785                     // Thanks to spill cliques, we should have assigned all or none
15786                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15787                     baseTmp = tgtBlock->bbStkTempsIn;
15788                     if (multRef > 1)
15789                     {
15790                         break;
15791                     }
15792                 } while (++jmpTab, --jmpCnt);
15793
15794                 break;
15795
15796             case BBJ_CALLFINALLY:
15797             case BBJ_EHCATCHRET:
15798             case BBJ_RETURN:
15799             case BBJ_EHFINALLYRET:
15800             case BBJ_EHFILTERRET:
15801             case BBJ_THROW:
15802                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15803                 break;
15804
15805             default:
15806                 noway_assert(!"Unexpected bbJumpKind");
15807                 break;
15808         }
15809
15810         assert(multRef >= 1);
15811
15812         /* Do we have a base temp number? */
15813
15814         bool newTemps = (baseTmp == NO_BASE_TMP);
15815
15816         if (newTemps)
15817         {
15818             /* Grab enough temps for the whole stack */
15819             baseTmp = impGetSpillTmpBase(block);
15820         }
15821
15822         /* Spill all stack entries into temps */
15823         unsigned level, tempNum;
15824
15825         JITDUMP("\nSpilling stack entries into temps\n");
15826         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15827         {
15828             GenTreePtr tree = verCurrentState.esStack[level].val;
15829
15830             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15831                the other. This should merge to a byref in unverifiable code.
15832                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15833                successor would be imported assuming there was a TYP_I_IMPL on
15834                the stack. Thus the value would not get GC-tracked. Hence,
15835                change the temp to TYP_BYREF and reimport the successors.
15836                Note: We should only allow this in unverifiable code.
15837             */
15838             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15839             {
15840                 lvaTable[tempNum].lvType = TYP_BYREF;
15841                 impReimportMarkSuccessors(block);
15842                 markImport = true;
15843             }
15844
15845 #ifdef _TARGET_64BIT_
15846             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15847             {
15848                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15849                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15850                 {
15851                     // Merge the current state into the entry state of block;
15852                     // the call to verMergeEntryStates must have changed
15853                     // the entry state of the block by merging the int local var
15854                     // and the native-int stack entry.
15855                     bool changed = false;
15856                     if (verMergeEntryStates(tgtBlock, &changed))
15857                     {
15858                         impRetypeEntryStateTemps(tgtBlock);
15859                         impReimportBlockPending(tgtBlock);
15860                         assert(changed);
15861                     }
15862                     else
15863                     {
15864                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15865                         break;
15866                     }
15867                 }
15868
15869                 // Some other block in the spill clique set this to "int", but now we have "native int".
15870                 // Change the type and go back to re-import any blocks that used the wrong type.
15871                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15872                 reimportSpillClique      = true;
15873             }
15874             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15875             {
15876                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15877                 // Insert a sign-extension to "native int" so we match the clique.
15878                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15879             }
15880
15881             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15882             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15883             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15884             // behavior instead of asserting and then generating bad code (where we save/restore the
15885             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15886             // imported already, we need to change the type of the local and reimport the spill clique.
15887             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15888             // the 'byref' size.
15889             if (!tiVerificationNeeded)
15890             {
15891                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15892                 {
15893                     // Some other block in the spill clique set this to "int", but now we have "byref".
15894                     // Change the type and go back to re-import any blocks that used the wrong type.
15895                     lvaTable[tempNum].lvType = TYP_BYREF;
15896                     reimportSpillClique      = true;
15897                 }
15898                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15899                 {
15900                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15901                     // Insert a sign-extension to "native int" so we match the clique size.
15902                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15903                 }
15904             }
15905 #endif // _TARGET_64BIT_
15906
15907 #if FEATURE_X87_DOUBLES
15908             // X87 stack doesn't differentiate between float/double
15909             // so promoting is no big deal.
15910             // For everybody else keep it as float until we have a collision and then promote
15911             // Just like for x64's TYP_INT<->TYP_I_IMPL
15912
15913             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15914             {
15915                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15916             }
15917
15918 #else // !FEATURE_X87_DOUBLES
15919
15920             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15921             {
15922                 // Some other block in the spill clique set this to "float", but now we have "double".
15923                 // Change the type and go back to re-import any blocks that used the wrong type.
15924                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15925                 reimportSpillClique      = true;
15926             }
15927             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15928             {
15929                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15930                 // Insert a cast to "double" so we match the clique.
15931                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15932             }
15933
15934 #endif // FEATURE_X87_DOUBLES
15935
15936             /* If addStmt has a reference to tempNum (can only happen if we
15937                are spilling to the temps already used by a previous block),
15938                we need to spill addStmt */
15939
15940             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15941             {
15942                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15943
15944                 if (addTree->gtOper == GT_JTRUE)
15945                 {
15946                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15947                     assert(relOp->OperIsCompare());
15948
15949                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15950
15951                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15952                     {
15953                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15954                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15955                         type              = genActualType(lvaTable[temp].TypeGet());
15956                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15957                     }
15958
15959                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15960                     {
15961                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15962                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15963                         type              = genActualType(lvaTable[temp].TypeGet());
15964                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15965                     }
15966                 }
15967                 else
15968                 {
15969                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15970
15971                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15972                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15973                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15974                 }
15975             }
15976
15977             /* Spill the stack entry, and replace with the temp */
15978
15979             if (!impSpillStackEntry(level, tempNum
15980 #ifdef DEBUG
15981                                     ,
15982                                     true, "Spill Stack Entry"
15983 #endif
15984                                     ))
15985             {
15986                 if (markImport)
15987                 {
15988                     BADCODE("bad stack state");
15989                 }
15990
15991                 // Oops. Something went wrong when spilling. Bad code.
15992                 verHandleVerificationFailure(block DEBUGARG(true));
15993
15994                 goto SPILLSTACK;
15995             }
15996         }
15997
15998         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15999
16000         if (addStmt)
16001         {
16002             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16003         }
16004     }
16005
16006     // Some of the append/spill logic works on compCurBB
16007
16008     assert(compCurBB == block);
16009
16010     /* Save the tree list in the block */
16011     impEndTreeList(block);
16012
16013     // impEndTreeList sets BBF_IMPORTED on the block
16014     // We do *NOT* want to set it later than this because
16015     // impReimportSpillClique might clear it if this block is both a
16016     // predecessor and successor in the current spill clique
16017     assert(block->bbFlags & BBF_IMPORTED);
16018
16019     // If we had a int/native int, or float/double collision, we need to re-import
16020     if (reimportSpillClique)
16021     {
16022         // This will re-import all the successors of block (as well as each of their predecessors)
16023         impReimportSpillClique(block);
16024
16025         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16026         for (unsigned i = 0; i < block->NumSucc(); i++)
16027         {
16028             BasicBlock* succ = block->GetSucc(i);
16029             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16030             {
16031                 impImportBlockPending(succ);
16032             }
16033         }
16034     }
16035     else // the normal case
16036     {
16037         // otherwise just import the successors of block
16038
16039         /* Does this block jump to any other blocks? */
16040         for (unsigned i = 0; i < block->NumSucc(); i++)
16041         {
16042             impImportBlockPending(block->GetSucc(i));
16043         }
16044     }
16045 }
16046 #ifdef _PREFAST_
16047 #pragma warning(pop)
16048 #endif
16049
16050 /*****************************************************************************/
16051 //
16052 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16053 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16054 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16055 // (its "pre-state").
16056
16057 void Compiler::impImportBlockPending(BasicBlock* block)
16058 {
16059 #ifdef DEBUG
16060     if (verbose)
16061     {
16062         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16063     }
16064 #endif
16065
16066     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16067     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16068     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16069
16070     // If the block has not been imported, add to pending set.
16071     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16072
16073     // Initialize bbEntryState just the first time we try to add this block to the pending list
16074     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16075     // We use NULL to indicate the 'common' state to avoid memory allocation
16076     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16077         (impGetPendingBlockMember(block) == 0))
16078     {
16079         verInitBBEntryState(block, &verCurrentState);
16080         assert(block->bbStkDepth == 0);
16081         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16082         assert(addToPending);
16083         assert(impGetPendingBlockMember(block) == 0);
16084     }
16085     else
16086     {
16087         // The stack should have the same height on entry to the block from all its predecessors.
16088         if (block->bbStkDepth != verCurrentState.esStackDepth)
16089         {
16090 #ifdef DEBUG
16091             char buffer[400];
16092             sprintf_s(buffer, sizeof(buffer),
16093                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16094                       "Previous depth was %d, current depth is %d",
16095                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16096                       verCurrentState.esStackDepth);
16097             buffer[400 - 1] = 0;
16098             NO_WAY(buffer);
16099 #else
16100             NO_WAY("Block entered with different stack depths");
16101 #endif
16102         }
16103
16104         // Additionally, if we need to verify, merge the verification state.
16105         if (tiVerificationNeeded)
16106         {
16107             // Merge the current state into the entry state of block; if this does not change the entry state
16108             // by merging, do not add the block to the pending-list.
16109             bool changed = false;
16110             if (!verMergeEntryStates(block, &changed))
16111             {
16112                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16113                 addToPending = true; // We will pop it off, and check the flag set above.
16114             }
16115             else if (changed)
16116             {
16117                 addToPending = true;
16118
16119                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16120             }
16121         }
16122
16123         if (!addToPending)
16124         {
16125             return;
16126         }
16127
16128         if (block->bbStkDepth > 0)
16129         {
16130             // We need to fix the types of any spill temps that might have changed:
16131             //   int->native int, float->double, int->byref, etc.
16132             impRetypeEntryStateTemps(block);
16133         }
16134
16135         // OK, we must add to the pending list, if it's not already in it.
16136         if (impGetPendingBlockMember(block) != 0)
16137         {
16138             return;
16139         }
16140     }
16141
16142     // Get an entry to add to the pending list
16143
16144     PendingDsc* dsc;
16145
16146     if (impPendingFree)
16147     {
16148         // We can reuse one of the freed up dscs.
16149         dsc            = impPendingFree;
16150         impPendingFree = dsc->pdNext;
16151     }
16152     else
16153     {
16154         // We have to create a new dsc
16155         dsc = new (this, CMK_Unknown) PendingDsc;
16156     }
16157
16158     dsc->pdBB                 = block;
16159     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16160     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16161
16162     // Save the stack trees for later
16163
16164     if (verCurrentState.esStackDepth)
16165     {
16166         impSaveStackState(&dsc->pdSavedStack, false);
16167     }
16168
16169     // Add the entry to the pending list
16170
16171     dsc->pdNext    = impPendingList;
16172     impPendingList = dsc;
16173     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16174
16175     // Various assertions require us to now to consider the block as not imported (at least for
16176     // the final time...)
16177     block->bbFlags &= ~BBF_IMPORTED;
16178
16179 #ifdef DEBUG
16180     if (verbose && 0)
16181     {
16182         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16183     }
16184 #endif
16185 }
16186
16187 /*****************************************************************************/
16188 //
16189 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16190 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16191 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16192
16193 void Compiler::impReimportBlockPending(BasicBlock* block)
16194 {
16195     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16196
16197     assert(block->bbFlags & BBF_IMPORTED);
16198
16199     // OK, we must add to the pending list, if it's not already in it.
16200     if (impGetPendingBlockMember(block) != 0)
16201     {
16202         return;
16203     }
16204
16205     // Get an entry to add to the pending list
16206
16207     PendingDsc* dsc;
16208
16209     if (impPendingFree)
16210     {
16211         // We can reuse one of the freed up dscs.
16212         dsc            = impPendingFree;
16213         impPendingFree = dsc->pdNext;
16214     }
16215     else
16216     {
16217         // We have to create a new dsc
16218         dsc = new (this, CMK_ImpStack) PendingDsc;
16219     }
16220
16221     dsc->pdBB = block;
16222
16223     if (block->bbEntryState)
16224     {
16225         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16226         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16227         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16228     }
16229     else
16230     {
16231         dsc->pdThisPtrInit        = TIS_Bottom;
16232         dsc->pdSavedStack.ssDepth = 0;
16233         dsc->pdSavedStack.ssTrees = nullptr;
16234     }
16235
16236     // Add the entry to the pending list
16237
16238     dsc->pdNext    = impPendingList;
16239     impPendingList = dsc;
16240     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16241
16242     // Various assertions require us to now to consider the block as not imported (at least for
16243     // the final time...)
16244     block->bbFlags &= ~BBF_IMPORTED;
16245
16246 #ifdef DEBUG
16247     if (verbose && 0)
16248     {
16249         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16250     }
16251 #endif
16252 }
16253
16254 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16255 {
16256     if (comp->impBlockListNodeFreeList == nullptr)
16257     {
16258         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16259     }
16260     else
16261     {
16262         BlockListNode* res             = comp->impBlockListNodeFreeList;
16263         comp->impBlockListNodeFreeList = res->m_next;
16264         return res;
16265     }
16266 }
16267
16268 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16269 {
16270     node->m_next             = impBlockListNodeFreeList;
16271     impBlockListNodeFreeList = node;
16272 }
16273
16274 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16275 {
16276     bool toDo = true;
16277
16278     noway_assert(!fgComputePredsDone);
16279     if (!fgCheapPredsValid)
16280     {
16281         fgComputeCheapPreds();
16282     }
16283
16284     BlockListNode* succCliqueToDo = nullptr;
16285     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16286     while (toDo)
16287     {
16288         toDo = false;
16289         // Look at the successors of every member of the predecessor to-do list.
16290         while (predCliqueToDo != nullptr)
16291         {
16292             BlockListNode* node = predCliqueToDo;
16293             predCliqueToDo      = node->m_next;
16294             BasicBlock* blk     = node->m_blk;
16295             FreeBlockListNode(node);
16296
16297             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16298             {
16299                 BasicBlock* succ = blk->GetSucc(succNum);
16300                 // If it's not already in the clique, add it, and also add it
16301                 // as a member of the successor "toDo" set.
16302                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16303                 {
16304                     callback->Visit(SpillCliqueSucc, succ);
16305                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16306                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16307                     toDo           = true;
16308                 }
16309             }
16310         }
16311         // Look at the predecessors of every member of the successor to-do list.
16312         while (succCliqueToDo != nullptr)
16313         {
16314             BlockListNode* node = succCliqueToDo;
16315             succCliqueToDo      = node->m_next;
16316             BasicBlock* blk     = node->m_blk;
16317             FreeBlockListNode(node);
16318
16319             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16320             {
16321                 BasicBlock* predBlock = pred->block;
16322                 // If it's not already in the clique, add it, and also add it
16323                 // as a member of the predecessor "toDo" set.
16324                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16325                 {
16326                     callback->Visit(SpillCliquePred, predBlock);
16327                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16328                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16329                     toDo           = true;
16330                 }
16331             }
16332         }
16333     }
16334
16335     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16336     // miss walking back to include the predecessor we started from.
16337     // This most likely cause: missing or out of date bbPreds
16338     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16339 }
16340
16341 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16342 {
16343     if (predOrSucc == SpillCliqueSucc)
16344     {
16345         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16346         blk->bbStkTempsIn = m_baseTmp;
16347     }
16348     else
16349     {
16350         assert(predOrSucc == SpillCliquePred);
16351         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16352         blk->bbStkTempsOut = m_baseTmp;
16353     }
16354 }
16355
16356 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16357 {
16358     // For Preds we could be a little smarter and just find the existing store
16359     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16360     // just re-import the whole block (just like we do for successors)
16361
16362     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16363     {
16364         // If we haven't imported this block and we're not going to (because it isn't on
16365         // the pending list) then just ignore it for now.
16366
16367         // This block has either never been imported (EntryState == NULL) or it failed
16368         // verification. Neither state requires us to force it to be imported now.
16369         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16370         return;
16371     }
16372
16373     // For successors we have a valid verCurrentState, so just mark them for reimport
16374     // the 'normal' way
16375     // Unlike predecessors, we *DO* need to reimport the current block because the
16376     // initial import had the wrong entry state types.
16377     // Similarly, blocks that are currently on the pending list, still need to call
16378     // impImportBlockPending to fixup their entry state.
16379     if (predOrSucc == SpillCliqueSucc)
16380     {
16381         m_pComp->impReimportMarkBlock(blk);
16382
16383         // Set the current stack state to that of the blk->bbEntryState
16384         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16385         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16386
16387         m_pComp->impImportBlockPending(blk);
16388     }
16389     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16390     {
16391         // As described above, we are only visiting predecessors so they can
16392         // add the appropriate casts, since we have already done that for the current
16393         // block, it does not need to be reimported.
16394         // Nor do we need to reimport blocks that are still pending, but not yet
16395         // imported.
16396         //
16397         // For predecessors, we have no state to seed the EntryState, so we just have
16398         // to assume the existing one is correct.
16399         // If the block is also a successor, it will get the EntryState properly
16400         // updated when it is visited as a successor in the above "if" block.
16401         assert(predOrSucc == SpillCliquePred);
16402         m_pComp->impReimportBlockPending(blk);
16403     }
16404 }
16405
16406 // Re-type the incoming lclVar nodes to match the varDsc.
16407 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16408 {
16409     if (blk->bbEntryState != nullptr)
16410     {
16411         EntryState* es = blk->bbEntryState;
16412         for (unsigned level = 0; level < es->esStackDepth; level++)
16413         {
16414             GenTreePtr tree = es->esStack[level].val;
16415             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16416             {
16417                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16418                 noway_assert(lclNum < lvaCount);
16419                 LclVarDsc* varDsc              = lvaTable + lclNum;
16420                 es->esStack[level].val->gtType = varDsc->TypeGet();
16421             }
16422         }
16423     }
16424 }
16425
16426 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16427 {
16428     if (block->bbStkTempsOut != NO_BASE_TMP)
16429     {
16430         return block->bbStkTempsOut;
16431     }
16432
16433 #ifdef DEBUG
16434     if (verbose)
16435     {
16436         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16437     }
16438 #endif // DEBUG
16439
16440     // Otherwise, choose one, and propagate to all members of the spill clique.
16441     // Grab enough temps for the whole stack.
16442     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16443     SetSpillTempsBase callback(baseTmp);
16444
16445     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16446     // to one spill clique, and similarly can only be the sucessor to one spill clique
16447     impWalkSpillCliqueFromPred(block, &callback);
16448
16449     return baseTmp;
16450 }
16451
16452 void Compiler::impReimportSpillClique(BasicBlock* block)
16453 {
16454 #ifdef DEBUG
16455     if (verbose)
16456     {
16457         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16458     }
16459 #endif // DEBUG
16460
16461     // If we get here, it is because this block is already part of a spill clique
16462     // and one predecessor had an outgoing live stack slot of type int, and this
16463     // block has an outgoing live stack slot of type native int.
16464     // We need to reset these before traversal because they have already been set
16465     // by the previous walk to determine all the members of the spill clique.
16466     impInlineRoot()->impSpillCliquePredMembers.Reset();
16467     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16468
16469     ReimportSpillClique callback(this);
16470
16471     impWalkSpillCliqueFromPred(block, &callback);
16472 }
16473
16474 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16475 // a copy of "srcState", cloning tree pointers as required.
16476 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16477 {
16478     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16479     {
16480         block->bbEntryState = nullptr;
16481         return;
16482     }
16483
16484     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16485
16486     // block->bbEntryState.esRefcount = 1;
16487
16488     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16489     block->bbEntryState->thisInitialized = TIS_Bottom;
16490
16491     if (srcState->esStackDepth > 0)
16492     {
16493         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16494         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16495
16496         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16497         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16498         {
16499             GenTreePtr tree                         = srcState->esStack[level].val;
16500             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16501         }
16502     }
16503
16504     if (verTrackObjCtorInitState)
16505     {
16506         verSetThisInit(block, srcState->thisInitialized);
16507     }
16508
16509     return;
16510 }
16511
16512 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16513 {
16514     assert(tis != TIS_Bottom); // Precondition.
16515     if (block->bbEntryState == nullptr)
16516     {
16517         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16518     }
16519
16520     block->bbEntryState->thisInitialized = tis;
16521 }
16522
16523 /*
16524  * Resets the current state to the state at the start of the basic block
16525  */
16526 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16527 {
16528
16529     if (block->bbEntryState == nullptr)
16530     {
16531         destState->esStackDepth    = 0;
16532         destState->thisInitialized = TIS_Bottom;
16533         return;
16534     }
16535
16536     destState->esStackDepth = block->bbEntryState->esStackDepth;
16537
16538     if (destState->esStackDepth > 0)
16539     {
16540         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16541
16542         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16543     }
16544
16545     destState->thisInitialized = block->bbThisOnEntry();
16546
16547     return;
16548 }
16549
16550 ThisInitState BasicBlock::bbThisOnEntry()
16551 {
16552     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16553 }
16554
16555 unsigned BasicBlock::bbStackDepthOnEntry()
16556 {
16557     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16558 }
16559
16560 void BasicBlock::bbSetStack(void* stackBuffer)
16561 {
16562     assert(bbEntryState);
16563     assert(stackBuffer);
16564     bbEntryState->esStack = (StackEntry*)stackBuffer;
16565 }
16566
16567 StackEntry* BasicBlock::bbStackOnEntry()
16568 {
16569     assert(bbEntryState);
16570     return bbEntryState->esStack;
16571 }
16572
16573 void Compiler::verInitCurrentState()
16574 {
16575     verTrackObjCtorInitState        = FALSE;
16576     verCurrentState.thisInitialized = TIS_Bottom;
16577
16578     if (tiVerificationNeeded)
16579     {
16580         // Track this ptr initialization
16581         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16582         {
16583             verTrackObjCtorInitState        = TRUE;
16584             verCurrentState.thisInitialized = TIS_Uninit;
16585         }
16586     }
16587
16588     // initialize stack info
16589
16590     verCurrentState.esStackDepth = 0;
16591     assert(verCurrentState.esStack != nullptr);
16592
16593     // copy current state to entry state of first BB
16594     verInitBBEntryState(fgFirstBB, &verCurrentState);
16595 }
16596
16597 Compiler* Compiler::impInlineRoot()
16598 {
16599     if (impInlineInfo == nullptr)
16600     {
16601         return this;
16602     }
16603     else
16604     {
16605         return impInlineInfo->InlineRoot;
16606     }
16607 }
16608
16609 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16610 {
16611     if (predOrSucc == SpillCliquePred)
16612     {
16613         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16614     }
16615     else
16616     {
16617         assert(predOrSucc == SpillCliqueSucc);
16618         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16619     }
16620 }
16621
16622 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16623 {
16624     if (predOrSucc == SpillCliquePred)
16625     {
16626         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16627     }
16628     else
16629     {
16630         assert(predOrSucc == SpillCliqueSucc);
16631         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16632     }
16633 }
16634
16635 /*****************************************************************************
16636  *
16637  *  Convert the instrs ("import") into our internal format (trees). The
16638  *  basic flowgraph has already been constructed and is passed in.
16639  */
16640
16641 void Compiler::impImport(BasicBlock* method)
16642 {
16643 #ifdef DEBUG
16644     if (verbose)
16645     {
16646         printf("*************** In impImport() for %s\n", info.compFullName);
16647     }
16648 #endif
16649
16650     /* Allocate the stack contents */
16651
16652     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16653     {
16654         /* Use local variable, don't waste time allocating on the heap */
16655
16656         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16657         verCurrentState.esStack = impSmallStack;
16658     }
16659     else
16660     {
16661         impStkSize              = info.compMaxStack;
16662         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16663     }
16664
16665     // initialize the entry state at start of method
16666     verInitCurrentState();
16667
16668     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16669     Compiler* inlineRoot = impInlineRoot();
16670     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16671     {
16672         // We have initialized these previously, but to size 0.  Make them larger.
16673         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16674         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16675         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16676     }
16677     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16678     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16679     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16680     impBlockListNodeFreeList = nullptr;
16681
16682 #ifdef DEBUG
16683     impLastILoffsStmt   = nullptr;
16684     impNestedStackSpill = false;
16685 #endif
16686     impBoxTemp = BAD_VAR_NUM;
16687
16688     impPendingList = impPendingFree = nullptr;
16689
16690     /* Add the entry-point to the worker-list */
16691
16692     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16693     // from EH normalization.
16694     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16695     // out.
16696     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16697     {
16698         // Treat these as imported.
16699         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16700         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16701         method->bbFlags |= BBF_IMPORTED;
16702     }
16703
16704     impImportBlockPending(method);
16705
16706     /* Import blocks in the worker-list until there are no more */
16707
16708     while (impPendingList)
16709     {
16710         /* Remove the entry at the front of the list */
16711
16712         PendingDsc* dsc = impPendingList;
16713         impPendingList  = impPendingList->pdNext;
16714         impSetPendingBlockMember(dsc->pdBB, 0);
16715
16716         /* Restore the stack state */
16717
16718         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16719         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16720         if (verCurrentState.esStackDepth)
16721         {
16722             impRestoreStackState(&dsc->pdSavedStack);
16723         }
16724
16725         /* Add the entry to the free list for reuse */
16726
16727         dsc->pdNext    = impPendingFree;
16728         impPendingFree = dsc;
16729
16730         /* Now import the block */
16731
16732         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16733         {
16734
16735 #ifdef _TARGET_64BIT_
16736             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16737             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16738             // method for further explanation on why we raise this exception instead of making the jitted
16739             // code throw the verification exception during execution.
16740             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16741             {
16742                 BADCODE("Basic block marked as not verifiable");
16743             }
16744             else
16745 #endif // _TARGET_64BIT_
16746             {
16747                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16748                 impEndTreeList(dsc->pdBB);
16749             }
16750         }
16751         else
16752         {
16753             impImportBlock(dsc->pdBB);
16754
16755             if (compDonotInline())
16756             {
16757                 return;
16758             }
16759             if (compIsForImportOnly() && !tiVerificationNeeded)
16760             {
16761                 return;
16762             }
16763         }
16764     }
16765
16766 #ifdef DEBUG
16767     if (verbose && info.compXcptnsCount)
16768     {
16769         printf("\nAfter impImport() added block for try,catch,finally");
16770         fgDispBasicBlocks();
16771         printf("\n");
16772     }
16773
16774     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16775     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16776     {
16777         block->bbFlags &= ~BBF_VISITED;
16778     }
16779 #endif
16780
16781     assert(!compIsForInlining() || !tiVerificationNeeded);
16782 }
16783
16784 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16785 // The invariant here is that if it's not a ref or a method and has a class handle
16786 // it's a valuetype
16787 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16788 {
16789     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16790     {
16791         return true;
16792     }
16793     else
16794     {
16795         return false;
16796     }
16797 }
16798
16799 /*****************************************************************************
16800  *  Check to see if the tree is the address of a local or
16801     the address of a field in a local.
16802
16803     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16804
16805  */
16806
16807 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16808 {
16809     if (tree->gtOper != GT_ADDR)
16810     {
16811         return FALSE;
16812     }
16813
16814     GenTreePtr op = tree->gtOp.gtOp1;
16815     while (op->gtOper == GT_FIELD)
16816     {
16817         op = op->gtField.gtFldObj;
16818         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16819         {
16820             op = op->gtOp.gtOp1;
16821         }
16822         else
16823         {
16824             return false;
16825         }
16826     }
16827
16828     if (op->gtOper == GT_LCL_VAR)
16829     {
16830         *lclVarTreeOut = op;
16831         return TRUE;
16832     }
16833     else
16834     {
16835         return FALSE;
16836     }
16837 }
16838
16839 //------------------------------------------------------------------------
16840 // impMakeDiscretionaryInlineObservations: make observations that help
16841 // determine the profitability of a discretionary inline
16842 //
16843 // Arguments:
16844 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16845 //    inlineResult -- InlineResult accumulating information about this inline
16846 //
16847 // Notes:
16848 //    If inlining or prejitting the root, this method also makes
16849 //    various observations about the method that factor into inline
16850 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16851
16852 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16853 {
16854     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16855            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16856            );
16857
16858     // If we're really inlining, we should just have one result in play.
16859     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16860
16861     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16862     // to the trouble of estimating the native code size. Even if it did, it
16863     // shouldn't be relying on the result of this method.
16864     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16865
16866     // Note if the caller contains NEWOBJ or NEWARR.
16867     Compiler* rootCompiler = impInlineRoot();
16868
16869     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16870     {
16871         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16872     }
16873
16874     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16875     {
16876         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16877     }
16878
16879     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16880     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16881
16882     if (isSpecialMethod)
16883     {
16884         if (calleeIsStatic)
16885         {
16886             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16887         }
16888         else
16889         {
16890             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16891         }
16892     }
16893     else if (!calleeIsStatic)
16894     {
16895         // Callee is an instance method.
16896         //
16897         // Check if the callee has the same 'this' as the root.
16898         if (pInlineInfo != nullptr)
16899         {
16900             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16901             assert(thisArg);
16902             bool isSameThis = impIsThis(thisArg);
16903             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16904         }
16905     }
16906
16907     // Note if the callee's class is a promotable struct
16908     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16909     {
16910         lvaStructPromotionInfo structPromotionInfo;
16911         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16912         if (structPromotionInfo.canPromote)
16913         {
16914             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16915         }
16916     }
16917
16918 #ifdef FEATURE_SIMD
16919
16920     // Note if this method is has SIMD args or return value
16921     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16922     {
16923         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16924     }
16925
16926 #endif // FEATURE_SIMD
16927
16928     // Roughly classify callsite frequency.
16929     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16930
16931     // If this is a prejit root, or a maximally hot block...
16932     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16933     {
16934         frequency = InlineCallsiteFrequency::HOT;
16935     }
16936     // No training data.  Look for loop-like things.
16937     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16938     // However, give it to things nearby.
16939     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16940              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16941     {
16942         frequency = InlineCallsiteFrequency::LOOP;
16943     }
16944     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16945     {
16946         frequency = InlineCallsiteFrequency::WARM;
16947     }
16948     // Now modify the multiplier based on where we're called from.
16949     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16950     {
16951         frequency = InlineCallsiteFrequency::RARE;
16952     }
16953     else
16954     {
16955         frequency = InlineCallsiteFrequency::BORING;
16956     }
16957
16958     // Also capture the block weight of the call site.  In the prejit
16959     // root case, assume there's some hot call site for this method.
16960     unsigned weight = 0;
16961
16962     if (pInlineInfo != nullptr)
16963     {
16964         weight = pInlineInfo->iciBlock->bbWeight;
16965     }
16966     else
16967     {
16968         weight = BB_MAX_WEIGHT;
16969     }
16970
16971     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16972     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16973 }
16974
16975 /*****************************************************************************
16976  This method makes STATIC inlining decision based on the IL code.
16977  It should not make any inlining decision based on the context.
16978  If forceInline is true, then the inlining decision should not depend on
16979  performance heuristics (code size, etc.).
16980  */
16981
16982 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16983                               CORINFO_METHOD_INFO*  methInfo,
16984                               bool                  forceInline,
16985                               InlineResult*         inlineResult)
16986 {
16987     unsigned codeSize = methInfo->ILCodeSize;
16988
16989     // We shouldn't have made up our minds yet...
16990     assert(!inlineResult->IsDecided());
16991
16992     if (methInfo->EHcount)
16993     {
16994         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16995         return;
16996     }
16997
16998     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16999     {
17000         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17001         return;
17002     }
17003
17004     // For now we don't inline varargs (import code can't handle it)
17005
17006     if (methInfo->args.isVarArg())
17007     {
17008         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17009         return;
17010     }
17011
17012     // Reject if it has too many locals.
17013     // This is currently an implementation limit due to fixed-size arrays in the
17014     // inline info, rather than a performance heuristic.
17015
17016     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17017
17018     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17019     {
17020         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17021         return;
17022     }
17023
17024     // Make sure there aren't too many arguments.
17025     // This is currently an implementation limit due to fixed-size arrays in the
17026     // inline info, rather than a performance heuristic.
17027
17028     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17029
17030     if (methInfo->args.numArgs > MAX_INL_ARGS)
17031     {
17032         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17033         return;
17034     }
17035
17036     // Note force inline state
17037
17038     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17039
17040     // Note IL code size
17041
17042     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17043
17044     if (inlineResult->IsFailure())
17045     {
17046         return;
17047     }
17048
17049     // Make sure maxstack is not too big
17050
17051     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17052
17053     if (inlineResult->IsFailure())
17054     {
17055         return;
17056     }
17057 }
17058
17059 /*****************************************************************************
17060  */
17061
17062 void Compiler::impCheckCanInline(GenTreePtr             call,
17063                                  CORINFO_METHOD_HANDLE  fncHandle,
17064                                  unsigned               methAttr,
17065                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17066                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17067                                  InlineResult*          inlineResult)
17068 {
17069     // Either EE or JIT might throw exceptions below.
17070     // If that happens, just don't inline the method.
17071
17072     struct Param
17073     {
17074         Compiler*              pThis;
17075         GenTreePtr             call;
17076         CORINFO_METHOD_HANDLE  fncHandle;
17077         unsigned               methAttr;
17078         CORINFO_CONTEXT_HANDLE exactContextHnd;
17079         InlineResult*          result;
17080         InlineCandidateInfo**  ppInlineCandidateInfo;
17081     } param = {nullptr};
17082
17083     param.pThis                 = this;
17084     param.call                  = call;
17085     param.fncHandle             = fncHandle;
17086     param.methAttr              = methAttr;
17087     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17088     param.result                = inlineResult;
17089     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17090
17091     bool success = eeRunWithErrorTrap<Param>(
17092         [](Param* pParam) {
17093             DWORD                  dwRestrictions = 0;
17094             CorInfoInitClassResult initClassResult;
17095
17096 #ifdef DEBUG
17097             const char* methodName;
17098             const char* className;
17099             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17100
17101             if (JitConfig.JitNoInline())
17102             {
17103                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17104                 goto _exit;
17105             }
17106 #endif
17107
17108             /* Try to get the code address/size for the method */
17109
17110             CORINFO_METHOD_INFO methInfo;
17111             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17112             {
17113                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17114                 goto _exit;
17115             }
17116
17117             bool forceInline;
17118             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17119
17120             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17121
17122             if (pParam->result->IsFailure())
17123             {
17124                 assert(pParam->result->IsNever());
17125                 goto _exit;
17126             }
17127
17128             // Speculatively check if initClass() can be done.
17129             // If it can be done, we will try to inline the method. If inlining
17130             // succeeds, then we will do the non-speculative initClass() and commit it.
17131             // If this speculative call to initClass() fails, there is no point
17132             // trying to inline this method.
17133             initClassResult =
17134                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17135                                                            pParam->exactContextHnd /* context */,
17136                                                            TRUE /* speculative */);
17137
17138             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17139             {
17140                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17141                 goto _exit;
17142             }
17143
17144             // Given the EE the final say in whether to inline or not.
17145             // This should be last since for verifiable code, this can be expensive
17146
17147             /* VM Inline check also ensures that the method is verifiable if needed */
17148             CorInfoInline vmResult;
17149             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17150                                                                   &dwRestrictions);
17151
17152             if (vmResult == INLINE_FAIL)
17153             {
17154                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17155             }
17156             else if (vmResult == INLINE_NEVER)
17157             {
17158                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17159             }
17160
17161             if (pParam->result->IsFailure())
17162             {
17163                 // Make sure not to report this one.  It was already reported by the VM.
17164                 pParam->result->SetReported();
17165                 goto _exit;
17166             }
17167
17168             // check for unsupported inlining restrictions
17169             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17170
17171             if (dwRestrictions & INLINE_SAME_THIS)
17172             {
17173                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17174                 assert(thisArg);
17175
17176                 if (!pParam->pThis->impIsThis(thisArg))
17177                 {
17178                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17179                     goto _exit;
17180                 }
17181             }
17182
17183             /* Get the method properties */
17184
17185             CORINFO_CLASS_HANDLE clsHandle;
17186             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17187             unsigned clsAttr;
17188             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17189
17190             /* Get the return type */
17191
17192             var_types fncRetType;
17193             fncRetType = pParam->call->TypeGet();
17194
17195 #ifdef DEBUG
17196             var_types fncRealRetType;
17197             fncRealRetType = JITtype2varType(methInfo.args.retType);
17198
17199             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17200                    // <BUGNUM> VSW 288602 </BUGNUM>
17201                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17202                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17203                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17204 #endif
17205
17206             //
17207             // Allocate an InlineCandidateInfo structure
17208             //
17209             InlineCandidateInfo* pInfo;
17210             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17211
17212             pInfo->dwRestrictions  = dwRestrictions;
17213             pInfo->methInfo        = methInfo;
17214             pInfo->methAttr        = pParam->methAttr;
17215             pInfo->clsHandle       = clsHandle;
17216             pInfo->clsAttr         = clsAttr;
17217             pInfo->fncRetType      = fncRetType;
17218             pInfo->exactContextHnd = pParam->exactContextHnd;
17219             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17220             pInfo->initClassResult = initClassResult;
17221
17222             *(pParam->ppInlineCandidateInfo) = pInfo;
17223
17224         _exit:;
17225         },
17226         &param);
17227     if (!success)
17228     {
17229         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17230     }
17231 }
17232
17233 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17234                                       GenTreePtr    curArgVal,
17235                                       unsigned      argNum,
17236                                       InlineResult* inlineResult)
17237 {
17238     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17239
17240     if (curArgVal->gtOper == GT_MKREFANY)
17241     {
17242         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17243         return;
17244     }
17245
17246     inlCurArgInfo->argNode = curArgVal;
17247
17248     GenTreePtr lclVarTree;
17249     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17250     {
17251         inlCurArgInfo->argIsByRefToStructLocal = true;
17252 #ifdef FEATURE_SIMD
17253         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17254         {
17255             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17256         }
17257 #endif // FEATURE_SIMD
17258     }
17259
17260     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17261     {
17262         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17263         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17264     }
17265
17266     if (curArgVal->gtOper == GT_LCL_VAR)
17267     {
17268         inlCurArgInfo->argIsLclVar = true;
17269
17270         /* Remember the "original" argument number */
17271         curArgVal->gtLclVar.gtLclILoffs = argNum;
17272     }
17273
17274     if ((curArgVal->OperKind() & GTK_CONST) ||
17275         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17276     {
17277         inlCurArgInfo->argIsInvariant = true;
17278         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17279         {
17280             /* Abort, but do not mark as not inlinable */
17281             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17282             return;
17283         }
17284     }
17285
17286     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17287     {
17288         inlCurArgInfo->argHasLdargaOp = true;
17289     }
17290
17291 #ifdef DEBUG
17292     if (verbose)
17293     {
17294         if (inlCurArgInfo->argIsThis)
17295         {
17296             printf("thisArg:");
17297         }
17298         else
17299         {
17300             printf("\nArgument #%u:", argNum);
17301         }
17302         if (inlCurArgInfo->argIsLclVar)
17303         {
17304             printf(" is a local var");
17305         }
17306         if (inlCurArgInfo->argIsInvariant)
17307         {
17308             printf(" is a constant");
17309         }
17310         if (inlCurArgInfo->argHasGlobRef)
17311         {
17312             printf(" has global refs");
17313         }
17314         if (inlCurArgInfo->argHasSideEff)
17315         {
17316             printf(" has side effects");
17317         }
17318         if (inlCurArgInfo->argHasLdargaOp)
17319         {
17320             printf(" has ldarga effect");
17321         }
17322         if (inlCurArgInfo->argHasStargOp)
17323         {
17324             printf(" has starg effect");
17325         }
17326         if (inlCurArgInfo->argIsByRefToStructLocal)
17327         {
17328             printf(" is byref to a struct local");
17329         }
17330
17331         printf("\n");
17332         gtDispTree(curArgVal);
17333         printf("\n");
17334     }
17335 #endif
17336 }
17337
17338 /*****************************************************************************
17339  *
17340  */
17341
17342 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17343 {
17344     assert(!compIsForInlining());
17345
17346     GenTreePtr           call         = pInlineInfo->iciCall;
17347     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17348     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17349     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17350     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17351     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17352
17353     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17354
17355     /* init the argument stuct */
17356
17357     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17358
17359     /* Get hold of the 'this' pointer and the argument list proper */
17360
17361     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17362     GenTreePtr argList = call->gtCall.gtCallArgs;
17363     unsigned   argCnt  = 0; // Count of the arguments
17364
17365     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17366
17367     if (thisArg)
17368     {
17369         inlArgInfo[0].argIsThis = true;
17370
17371         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17372
17373         if (inlineResult->IsFailure())
17374         {
17375             return;
17376         }
17377
17378         /* Increment the argument count */
17379         argCnt++;
17380     }
17381
17382     /* Record some information about each of the arguments */
17383     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17384
17385 #if USER_ARGS_COME_LAST
17386     unsigned typeCtxtArg = thisArg ? 1 : 0;
17387 #else  // USER_ARGS_COME_LAST
17388     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17389 #endif // USER_ARGS_COME_LAST
17390
17391     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17392     {
17393         if (argTmp == argList && hasRetBuffArg)
17394         {
17395             continue;
17396         }
17397
17398         // Ignore the type context argument
17399         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17400         {
17401             typeCtxtArg = 0xFFFFFFFF;
17402             continue;
17403         }
17404
17405         assert(argTmp->gtOper == GT_LIST);
17406         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17407
17408         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17409
17410         if (inlineResult->IsFailure())
17411         {
17412             return;
17413         }
17414
17415         /* Increment the argument count */
17416         argCnt++;
17417     }
17418
17419     /* Make sure we got the arg number right */
17420     assert(argCnt == methInfo->args.totalILArgs());
17421
17422 #ifdef FEATURE_SIMD
17423     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17424 #endif // FEATURE_SIMD
17425
17426     /* We have typeless opcodes, get type information from the signature */
17427
17428     if (thisArg)
17429     {
17430         var_types sigType;
17431
17432         if (clsAttr & CORINFO_FLG_VALUECLASS)
17433         {
17434             sigType = TYP_BYREF;
17435         }
17436         else
17437         {
17438             sigType = TYP_REF;
17439         }
17440
17441         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17442         lclVarInfo[0].lclHasLdlocaOp = false;
17443
17444 #ifdef FEATURE_SIMD
17445         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17446         // the inlining multiplier) for anything in that assembly.
17447         // But we only need to normalize it if it is a TYP_STRUCT
17448         // (which we need to do even if we have already set foundSIMDType).
17449         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17450         {
17451             if (sigType == TYP_STRUCT)
17452             {
17453                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17454             }
17455             foundSIMDType = true;
17456         }
17457 #endif // FEATURE_SIMD
17458         lclVarInfo[0].lclTypeInfo = sigType;
17459
17460         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17461                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17462                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17463
17464         if (genActualType(thisArg->gtType) != genActualType(sigType))
17465         {
17466             if (sigType == TYP_REF)
17467             {
17468                 /* The argument cannot be bashed into a ref (see bug 750871) */
17469                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17470                 return;
17471             }
17472
17473             /* This can only happen with byrefs <-> ints/shorts */
17474
17475             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17476             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17477
17478             if (sigType == TYP_BYREF)
17479             {
17480                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17481             }
17482             else if (thisArg->gtType == TYP_BYREF)
17483             {
17484                 assert(sigType == TYP_I_IMPL);
17485
17486                 /* If possible change the BYREF to an int */
17487                 if (thisArg->IsVarAddr())
17488                 {
17489                     thisArg->gtType              = TYP_I_IMPL;
17490                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17491                 }
17492                 else
17493                 {
17494                     /* Arguments 'int <- byref' cannot be bashed */
17495                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17496                     return;
17497                 }
17498             }
17499         }
17500     }
17501
17502     /* Init the types of the arguments and make sure the types
17503      * from the trees match the types in the signature */
17504
17505     CORINFO_ARG_LIST_HANDLE argLst;
17506     argLst = methInfo->args.args;
17507
17508     unsigned i;
17509     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17510     {
17511         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17512
17513         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17514
17515 #ifdef FEATURE_SIMD
17516         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17517         {
17518             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17519             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17520             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17521             foundSIMDType = true;
17522             if (sigType == TYP_STRUCT)
17523             {
17524                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17525                 sigType              = structType;
17526             }
17527         }
17528 #endif // FEATURE_SIMD
17529
17530         lclVarInfo[i].lclTypeInfo    = sigType;
17531         lclVarInfo[i].lclHasLdlocaOp = false;
17532
17533         /* Does the tree type match the signature type? */
17534
17535         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17536
17537         if (sigType != inlArgNode->gtType)
17538         {
17539             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17540                but in bad IL cases with caller-callee signature mismatches we can see other types.
17541                Intentionally reject cases with mismatches so the jit is more flexible when
17542                encountering bad IL. */
17543
17544             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17545                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17546                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17547
17548             if (!isPlausibleTypeMatch)
17549             {
17550                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17551                 return;
17552             }
17553
17554             /* Is it a narrowing or widening cast?
17555              * Widening casts are ok since the value computed is already
17556              * normalized to an int (on the IL stack) */
17557
17558             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17559             {
17560                 if (sigType == TYP_BYREF)
17561                 {
17562                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17563                 }
17564                 else if (inlArgNode->gtType == TYP_BYREF)
17565                 {
17566                     assert(varTypeIsIntOrI(sigType));
17567
17568                     /* If possible bash the BYREF to an int */
17569                     if (inlArgNode->IsVarAddr())
17570                     {
17571                         inlArgNode->gtType           = TYP_I_IMPL;
17572                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17573                     }
17574                     else
17575                     {
17576                         /* Arguments 'int <- byref' cannot be changed */
17577                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17578                         return;
17579                     }
17580                 }
17581                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17582                 {
17583                     /* Narrowing cast */
17584
17585                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17586                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17587                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17588                     {
17589                         /* We don't need to insert a cast here as the variable
17590                            was assigned a normalized value of the right type */
17591
17592                         continue;
17593                     }
17594
17595                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17596
17597                     inlArgInfo[i].argIsLclVar = false;
17598
17599                     /* Try to fold the node in case we have constant arguments */
17600
17601                     if (inlArgInfo[i].argIsInvariant)
17602                     {
17603                         inlArgNode            = gtFoldExprConst(inlArgNode);
17604                         inlArgInfo[i].argNode = inlArgNode;
17605                         assert(inlArgNode->OperIsConst());
17606                     }
17607                 }
17608 #ifdef _TARGET_64BIT_
17609                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17610                 {
17611                     // This should only happen for int -> native int widening
17612                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17613
17614                     inlArgInfo[i].argIsLclVar = false;
17615
17616                     /* Try to fold the node in case we have constant arguments */
17617
17618                     if (inlArgInfo[i].argIsInvariant)
17619                     {
17620                         inlArgNode            = gtFoldExprConst(inlArgNode);
17621                         inlArgInfo[i].argNode = inlArgNode;
17622                         assert(inlArgNode->OperIsConst());
17623                     }
17624                 }
17625 #endif // _TARGET_64BIT_
17626             }
17627         }
17628     }
17629
17630     /* Init the types of the local variables */
17631
17632     CORINFO_ARG_LIST_HANDLE localsSig;
17633     localsSig = methInfo->locals.args;
17634
17635     for (i = 0; i < methInfo->locals.numArgs; i++)
17636     {
17637         bool      isPinned;
17638         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17639
17640         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17641         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17642         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17643
17644         if (isPinned)
17645         {
17646             // Pinned locals may cause inlines to fail.
17647             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17648             if (inlineResult->IsFailure())
17649             {
17650                 return;
17651             }
17652         }
17653
17654         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17655
17656         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17657         // out on the inline.
17658         if (type == TYP_STRUCT)
17659         {
17660             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17661             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17662             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17663             {
17664                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17665                 if (inlineResult->IsFailure())
17666                 {
17667                     return;
17668                 }
17669
17670                 // Do further notification in the case where the call site is rare; some policies do
17671                 // not track the relative hotness of call sites for "always" inline cases.
17672                 if (pInlineInfo->iciBlock->isRunRarely())
17673                 {
17674                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17675                     if (inlineResult->IsFailure())
17676                     {
17677
17678                         return;
17679                     }
17680                 }
17681             }
17682         }
17683
17684         localsSig = info.compCompHnd->getArgNext(localsSig);
17685
17686 #ifdef FEATURE_SIMD
17687         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17688         {
17689             foundSIMDType = true;
17690             if (featureSIMD && type == TYP_STRUCT)
17691             {
17692                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17693                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17694             }
17695         }
17696 #endif // FEATURE_SIMD
17697     }
17698
17699 #ifdef FEATURE_SIMD
17700     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17701     {
17702         foundSIMDType = true;
17703     }
17704     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17705 #endif // FEATURE_SIMD
17706 }
17707
17708 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17709 {
17710     assert(compIsForInlining());
17711
17712     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17713
17714     if (tmpNum == BAD_VAR_NUM)
17715     {
17716         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17717
17718         // The lifetime of this local might span multiple BBs.
17719         // So it is a long lifetime local.
17720         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17721
17722         lvaTable[tmpNum].lvType = lclTyp;
17723         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17724         {
17725             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17726         }
17727
17728         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17729         {
17730             lvaTable[tmpNum].lvPinned = 1;
17731
17732             if (!impInlineInfo->hasPinnedLocals)
17733             {
17734                 // If the inlinee returns a value, use a spill temp
17735                 // for the return value to ensure that even in case
17736                 // where the return expression refers to one of the
17737                 // pinned locals, we can unpin the local right after
17738                 // the inlined method body.
17739                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17740                 {
17741                     lvaInlineeReturnSpillTemp =
17742                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17743                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17744                 }
17745             }
17746
17747             impInlineInfo->hasPinnedLocals = true;
17748         }
17749
17750         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17751         {
17752             if (varTypeIsStruct(lclTyp))
17753             {
17754                 lvaSetStruct(tmpNum,
17755                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17756                              true /* unsafe value cls check */);
17757             }
17758             else
17759             {
17760                 // This is a wrapped primitive.  Make sure the verstate knows that
17761                 lvaTable[tmpNum].lvVerTypeInfo =
17762                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17763             }
17764         }
17765     }
17766
17767     return tmpNum;
17768 }
17769
17770 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17771 // Only use this method for the arguments of the inlinee method.
17772 // !!! Do not use it for the locals of the inlinee method. !!!!
17773
17774 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17775 {
17776     /* Get the argument type */
17777     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17778
17779     GenTreePtr op1 = nullptr;
17780
17781     // constant or address of local
17782     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17783     {
17784         /* Clone the constant. Note that we cannot directly use argNode
17785         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17786         would introduce aliasing between inlArgInfo[].argNode and
17787         impInlineExpr. Then gtFoldExpr() could change it, causing further
17788         references to the argument working off of the bashed copy. */
17789
17790         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17791         PREFIX_ASSUME(op1 != nullptr);
17792         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17793     }
17794     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17795     {
17796         /* Argument is a local variable (of the caller)
17797          * Can we re-use the passed argument node? */
17798
17799         op1                          = inlArgInfo[lclNum].argNode;
17800         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17801
17802         if (inlArgInfo[lclNum].argIsUsed)
17803         {
17804             assert(op1->gtOper == GT_LCL_VAR);
17805             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17806
17807             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17808             {
17809                 lclTyp = genActualType(lclTyp);
17810             }
17811
17812             /* Create a new lcl var node - remember the argument lclNum */
17813             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17814         }
17815     }
17816     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17817     {
17818         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17819            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17820            This way we will increase the chance for this byref to be optimized away by
17821            a subsequent "dereference" operation.
17822
17823            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17824            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17825            For example, if the caller is:
17826                 ldloca.s   V_1  // V_1 is a local struct
17827                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17828            and the callee being inlined has:
17829                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17830                     ldarga.s   ptrToInts
17831                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17832            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17833            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17834         */
17835         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17836                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17837         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17838     }
17839     else
17840     {
17841         /* Argument is a complex expression - it must be evaluated into a temp */
17842
17843         if (inlArgInfo[lclNum].argHasTmp)
17844         {
17845             assert(inlArgInfo[lclNum].argIsUsed);
17846             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17847
17848             /* Create a new lcl var node - remember the argument lclNum */
17849             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17850
17851             /* This is the second or later use of the this argument,
17852             so we have to use the temp (instead of the actual arg) */
17853             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17854         }
17855         else
17856         {
17857             /* First time use */
17858             assert(inlArgInfo[lclNum].argIsUsed == false);
17859
17860             /* Reserve a temp for the expression.
17861             * Use a large size node as we may change it later */
17862
17863             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17864
17865             lvaTable[tmpNum].lvType = lclTyp;
17866             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17867             if (inlArgInfo[lclNum].argHasLdargaOp)
17868             {
17869                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17870             }
17871
17872             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17873             {
17874                 if (varTypeIsStruct(lclTyp))
17875                 {
17876                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17877                                  true /* unsafe value cls check */);
17878                 }
17879                 else
17880                 {
17881                     // This is a wrapped primitive.  Make sure the verstate knows that
17882                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17883                 }
17884             }
17885
17886             inlArgInfo[lclNum].argHasTmp = true;
17887             inlArgInfo[lclNum].argTmpNum = tmpNum;
17888
17889             // If we require strict exception order, then arguments must
17890             // be evaluated in sequence before the body of the inlined method.
17891             // So we need to evaluate them to a temp.
17892             // Also, if arguments have global references, we need to
17893             // evaluate them to a temp before the inlined body as the
17894             // inlined body may be modifying the global ref.
17895             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17896             // if it is a struct, because it requires some additional handling.
17897
17898             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17899             {
17900                 /* Get a *LARGE* LCL_VAR node */
17901                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17902
17903                 /* Record op1 as the very first use of this argument.
17904                 If there are no further uses of the arg, we may be
17905                 able to use the actual arg node instead of the temp.
17906                 If we do see any further uses, we will clear this. */
17907                 inlArgInfo[lclNum].argBashTmpNode = op1;
17908             }
17909             else
17910             {
17911                 /* Get a small LCL_VAR node */
17912                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17913                 /* No bashing of this argument */
17914                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17915             }
17916         }
17917     }
17918
17919     /* Mark the argument as used */
17920
17921     inlArgInfo[lclNum].argIsUsed = true;
17922
17923     return op1;
17924 }
17925
17926 /******************************************************************************
17927  Is this the original "this" argument to the call being inlined?
17928
17929  Note that we do not inline methods with "starg 0", and so we do not need to
17930  worry about it.
17931 */
17932
17933 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17934 {
17935     assert(compIsForInlining());
17936     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17937 }
17938
17939 //-----------------------------------------------------------------------------
17940 // This function checks if a dereference in the inlinee can guarantee that
17941 // the "this" is non-NULL.
17942 // If we haven't hit a branch or a side effect, and we are dereferencing
17943 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17944 // then we can avoid a separate null pointer check.
17945 //
17946 // "additionalTreesToBeEvaluatedBefore"
17947 // is the set of pending trees that have not yet been added to the statement list,
17948 // and which have been removed from verCurrentState.esStack[]
17949
17950 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17951                                                                   GenTreePtr  variableBeingDereferenced,
17952                                                                   InlArgInfo* inlArgInfo)
17953 {
17954     assert(compIsForInlining());
17955     assert(opts.OptEnabled(CLFLG_INLINING));
17956
17957     BasicBlock* block = compCurBB;
17958
17959     GenTreePtr stmt;
17960     GenTreePtr expr;
17961
17962     if (block != fgFirstBB)
17963     {
17964         return FALSE;
17965     }
17966
17967     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17968     {
17969         return FALSE;
17970     }
17971
17972     if (additionalTreesToBeEvaluatedBefore &&
17973         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17974     {
17975         return FALSE;
17976     }
17977
17978     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17979     {
17980         expr = stmt->gtStmt.gtStmtExpr;
17981
17982         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17983         {
17984             return FALSE;
17985         }
17986     }
17987
17988     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17989     {
17990         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17991         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17992         {
17993             return FALSE;
17994         }
17995     }
17996
17997     return TRUE;
17998 }
17999
18000 /******************************************************************************/
18001 // Check the inlining eligibility of this GT_CALL node.
18002 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
18003
18004 // Todo: find a way to record the failure reasons in the IR (or
18005 // otherwise build tree context) so when we do the inlining pass we
18006 // can capture these reasons
18007
18008 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
18009                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18010                                       CORINFO_CALL_INFO*     callInfo)
18011 {
18012     // Let the strategy know there's another call
18013     impInlineRoot()->m_inlineStrategy->NoteCall();
18014
18015     if (!opts.OptEnabled(CLFLG_INLINING))
18016     {
18017         /* XXX Mon 8/18/2008
18018          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18019          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18020          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18021          * figure out why we did not set MAXOPT for this compile.
18022          */
18023         assert(!compIsForInlining());
18024         return;
18025     }
18026
18027     if (compIsForImportOnly())
18028     {
18029         // Don't bother creating the inline candidate during verification.
18030         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18031         // that leads to the creation of multiple instances of Compiler.
18032         return;
18033     }
18034
18035     GenTreeCall* call = callNode->AsCall();
18036     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18037
18038     // Don't inline if not optimizing root method
18039     if (opts.compDbgCode)
18040     {
18041         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18042         return;
18043     }
18044
18045     // Don't inline if inlining into root method is disabled.
18046     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18047     {
18048         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18049         return;
18050     }
18051
18052     // Inlining candidate determination needs to honor only IL tail prefix.
18053     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18054     if (call->IsTailPrefixedCall())
18055     {
18056         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18057         return;
18058     }
18059
18060     // Tail recursion elimination takes precedence over inlining.
18061     // TODO: We may want to do some of the additional checks from fgMorphCall
18062     // here to reduce the chance we don't inline a call that won't be optimized
18063     // as a fast tail call or turned into a loop.
18064     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18065     {
18066         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18067         return;
18068     }
18069
18070     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18071     {
18072         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18073         return;
18074     }
18075
18076     /* Ignore helper calls */
18077
18078     if (call->gtCallType == CT_HELPER)
18079     {
18080         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18081         return;
18082     }
18083
18084     /* Ignore indirect calls */
18085     if (call->gtCallType == CT_INDIRECT)
18086     {
18087         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18088         return;
18089     }
18090
18091     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18092      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18093      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18094
18095     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18096     unsigned              methAttr;
18097
18098     // Reuse method flags from the original callInfo if possible
18099     if (fncHandle == callInfo->hMethod)
18100     {
18101         methAttr = callInfo->methodFlags;
18102     }
18103     else
18104     {
18105         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18106     }
18107
18108 #ifdef DEBUG
18109     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18110     {
18111         methAttr |= CORINFO_FLG_FORCEINLINE;
18112     }
18113 #endif
18114
18115     // Check for COMPlus_AggressiveInlining
18116     if (compDoAggressiveInlining)
18117     {
18118         methAttr |= CORINFO_FLG_FORCEINLINE;
18119     }
18120
18121     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18122     {
18123         /* Don't bother inline blocks that are in the filter region */
18124         if (bbInCatchHandlerILRange(compCurBB))
18125         {
18126 #ifdef DEBUG
18127             if (verbose)
18128             {
18129                 printf("\nWill not inline blocks that are in the catch handler region\n");
18130             }
18131
18132 #endif
18133
18134             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18135             return;
18136         }
18137
18138         if (bbInFilterILRange(compCurBB))
18139         {
18140 #ifdef DEBUG
18141             if (verbose)
18142             {
18143                 printf("\nWill not inline blocks that are in the filter region\n");
18144             }
18145 #endif
18146
18147             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18148             return;
18149         }
18150     }
18151
18152     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18153
18154     if (opts.compNeedSecurityCheck)
18155     {
18156         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18157         return;
18158     }
18159
18160     /* Check if we tried to inline this method before */
18161
18162     if (methAttr & CORINFO_FLG_DONT_INLINE)
18163     {
18164         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18165         return;
18166     }
18167
18168     /* Cannot inline synchronized methods */
18169
18170     if (methAttr & CORINFO_FLG_SYNCH)
18171     {
18172         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18173         return;
18174     }
18175
18176     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18177
18178     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18179     {
18180         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18181         return;
18182     }
18183
18184     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18185     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18186
18187     if (inlineResult.IsFailure())
18188     {
18189         return;
18190     }
18191
18192     // The old value should be NULL
18193     assert(call->gtInlineCandidateInfo == nullptr);
18194
18195     call->gtInlineCandidateInfo = inlineCandidateInfo;
18196
18197     // Mark the call node as inline candidate.
18198     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18199
18200     // Let the strategy know there's another candidate.
18201     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18202
18203     // Since we're not actually inlining yet, and this call site is
18204     // still just an inline candidate, there's nothing to report.
18205     inlineResult.SetReported();
18206 }
18207
18208 /******************************************************************************/
18209 // Returns true if the given intrinsic will be implemented by target-specific
18210 // instructions
18211
18212 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18213 {
18214 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18215     switch (intrinsicId)
18216     {
18217         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18218         //
18219         // TODO: Because the x86 backend only targets SSE for floating-point code,
18220         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18221         //       implemented those intrinsics as x87 instructions). If this poses
18222         //       a CQ problem, it may be necessary to change the implementation of
18223         //       the helper calls to decrease call overhead or switch back to the
18224         //       x87 instructions. This is tracked by #7097.
18225         case CORINFO_INTRINSIC_Sqrt:
18226         case CORINFO_INTRINSIC_Abs:
18227             return true;
18228
18229         default:
18230             return false;
18231     }
18232 #elif defined(_TARGET_ARM64_)
18233     switch (intrinsicId)
18234     {
18235         case CORINFO_INTRINSIC_Sqrt:
18236         case CORINFO_INTRINSIC_Abs:
18237         case CORINFO_INTRINSIC_Round:
18238             return true;
18239
18240         default:
18241             return false;
18242     }
18243 #elif defined(_TARGET_ARM_)
18244     switch (intrinsicId)
18245     {
18246         case CORINFO_INTRINSIC_Sqrt:
18247         case CORINFO_INTRINSIC_Abs:
18248         case CORINFO_INTRINSIC_Round:
18249             return true;
18250
18251         default:
18252             return false;
18253     }
18254 #elif defined(_TARGET_X86_)
18255     switch (intrinsicId)
18256     {
18257         case CORINFO_INTRINSIC_Sin:
18258         case CORINFO_INTRINSIC_Cos:
18259         case CORINFO_INTRINSIC_Sqrt:
18260         case CORINFO_INTRINSIC_Abs:
18261         case CORINFO_INTRINSIC_Round:
18262             return true;
18263
18264         default:
18265             return false;
18266     }
18267 #else
18268     // TODO: This portion of logic is not implemented for other arch.
18269     // The reason for returning true is that on all other arch the only intrinsic
18270     // enabled are target intrinsics.
18271     return true;
18272 #endif //_TARGET_AMD64_
18273 }
18274
18275 /******************************************************************************/
18276 // Returns true if the given intrinsic will be implemented by calling System.Math
18277 // methods.
18278
18279 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18280 {
18281     // Currently, if an math intrisic is not implemented by target-specific
18282     // intructions, it will be implemented by a System.Math call. In the
18283     // future, if we turn to implementing some of them with helper callers,
18284     // this predicate needs to be revisited.
18285     return !IsTargetIntrinsic(intrinsicId);
18286 }
18287
18288 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18289 {
18290     switch (intrinsicId)
18291     {
18292         case CORINFO_INTRINSIC_Sin:
18293         case CORINFO_INTRINSIC_Sqrt:
18294         case CORINFO_INTRINSIC_Abs:
18295         case CORINFO_INTRINSIC_Cos:
18296         case CORINFO_INTRINSIC_Round:
18297         case CORINFO_INTRINSIC_Cosh:
18298         case CORINFO_INTRINSIC_Sinh:
18299         case CORINFO_INTRINSIC_Tan:
18300         case CORINFO_INTRINSIC_Tanh:
18301         case CORINFO_INTRINSIC_Asin:
18302         case CORINFO_INTRINSIC_Acos:
18303         case CORINFO_INTRINSIC_Atan:
18304         case CORINFO_INTRINSIC_Atan2:
18305         case CORINFO_INTRINSIC_Log10:
18306         case CORINFO_INTRINSIC_Pow:
18307         case CORINFO_INTRINSIC_Exp:
18308         case CORINFO_INTRINSIC_Ceiling:
18309         case CORINFO_INTRINSIC_Floor:
18310             return true;
18311         default:
18312             return false;
18313     }
18314 }
18315
18316 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18317 {
18318     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18319 }
18320 /*****************************************************************************/