Merge pull request #8470 from brianrob/fix_lttng_header_detection
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
151 {
152     assert(verCurrentState.esStackDepth < impStkSize);
153     INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154     verCurrentState.esStack[verCurrentState.esStackDepth++].val              = tree;
155
156     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
157     {
158         compLongUsed = true;
159     }
160     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
161     {
162         compFloatingPointUsed = true;
163     }
164 }
165
166 inline void Compiler::impPushNullObjRefOnStack()
167 {
168     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
169 }
170
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
173
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175                                                           DEBUGARG(unsigned line))
176 {
177     // Remember that the code is not verifiable
178     // Note that the method may yet pass canSkipMethodVerification(),
179     // and so the presence of unverifiable code may not be an issue.
180     tiIsVerifiableCode = FALSE;
181
182 #ifdef DEBUG
183     const char* tail = strrchr(file, '\\');
184     if (tail)
185     {
186         file = tail + 1;
187     }
188
189     if (JitConfig.JitBreakOnUnsafeCode())
190     {
191         assert(!"Unsafe code detected");
192     }
193 #endif
194
195     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
197
198     if (verNeedsVerification() || compIsForImportOnly())
199     {
200         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
203     }
204 }
205
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207                                                                     DEBUGARG(unsigned line))
208 {
209     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
211
212 #ifdef DEBUG
213     //    BreakIfDebuggerPresent();
214     if (getBreakOnBadCode())
215     {
216         assert(!"Typechecking error");
217     }
218 #endif
219
220     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
221     UNREACHABLE();
222 }
223
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
226 // us lvAddrTaken
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
228 {
229     assert(!compIsForInlining());
230
231     OPCODE opcode;
232
233     opcode = (OPCODE)getU1LittleEndian(codeAddr);
234
235     switch (opcode)
236     {
237         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
238         // like
239         //
240         //          ldloca.0
241         //          ldflda whatever
242         //
243         // of a primitivelike struct, you end up after morphing with addr of a local
244         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245         // for structs that contain other structs, which isnt a case we handle very
246         // well now for other reasons.
247
248         case CEE_LDFLD:
249         {
250             // We won't collapse small fields. This is probably not the right place to have this
251             // check, but we're only using the function for this purpose, and is easy to factor
252             // out if we need to do so.
253
254             CORINFO_RESOLVED_TOKEN resolvedToken;
255             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
256
257             CORINFO_CLASS_HANDLE clsHnd;
258             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
259
260             // Preserve 'small' int types
261             if (lclTyp > TYP_INT)
262             {
263                 lclTyp = genActualType(lclTyp);
264             }
265
266             if (varTypeIsSmall(lclTyp))
267             {
268                 return false;
269             }
270
271             return true;
272         }
273         default:
274             break;
275     }
276
277     return false;
278 }
279
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
281 {
282     pResolvedToken->tokenContext = impTokenLookupContextHandle;
283     pResolvedToken->tokenScope   = info.compScopeHnd;
284     pResolvedToken->token        = getU4LittleEndian(addr);
285     pResolvedToken->tokenType    = kind;
286
287     if (!tiVerificationNeeded)
288     {
289         info.compCompHnd->resolveToken(pResolvedToken);
290     }
291     else
292     {
293         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
294     }
295 }
296
297 /*****************************************************************************
298  *
299  *  Pop one tree from the stack.
300  */
301
302 StackEntry Compiler::impPopStack()
303 {
304     if (verCurrentState.esStackDepth == 0)
305     {
306         BADCODE("stack underflow");
307     }
308
309 #ifdef DEBUG
310 #if VERBOSE_VERIFY
311     if (VERBOSE && tiVerificationNeeded)
312     {
313         JITDUMP("\n");
314         printf(TI_DUMP_PADDING);
315         printf("About to pop from the stack: ");
316         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
317         ti.Dump();
318     }
319 #endif // VERBOSE_VERIFY
320 #endif // DEBUG
321
322     return verCurrentState.esStack[--verCurrentState.esStackDepth];
323 }
324
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
326 {
327     StackEntry ret = impPopStack();
328     structType     = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
329     return (ret);
330 }
331
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
333 {
334     StackEntry ret = impPopStack();
335     ti             = ret.seTypeInfo;
336     return (ret.val);
337 }
338
339 /*****************************************************************************
340  *
341  *  Peep at n'th (0-based) tree on the top of the stack.
342  */
343
344 StackEntry& Compiler::impStackTop(unsigned n)
345 {
346     if (verCurrentState.esStackDepth <= n)
347     {
348         BADCODE("stack underflow");
349     }
350
351     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
352 }
353 /*****************************************************************************
354  *  Some of the trees are spilled specially. While unspilling them, or
355  *  making a copy, these need to be handled specially. The function
356  *  enumerates the operators possible after spilling.
357  */
358
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
361 {
362     if (tree->gtOper == GT_LCL_VAR)
363     {
364         return true;
365     }
366
367     if (tree->OperIsConst())
368     {
369         return true;
370     }
371
372     return false;
373 }
374 #endif
375
376 /*****************************************************************************
377  *
378  *  The following logic is used to save/restore stack contents.
379  *  If 'copy' is true, then we make a copy of the trees on the stack. These
380  *  have to all be cloneable/spilled values.
381  */
382
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
384 {
385     savePtr->ssDepth = verCurrentState.esStackDepth;
386
387     if (verCurrentState.esStackDepth)
388     {
389         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
391
392         if (copy)
393         {
394             StackEntry* table = savePtr->ssTrees;
395
396             /* Make a fresh copy of all the stack entries */
397
398             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
399             {
400                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401                 GenTreePtr tree   = verCurrentState.esStack[level].val;
402
403                 assert(impValidSpilledStackEntry(tree));
404
405                 switch (tree->gtOper)
406                 {
407                     case GT_CNS_INT:
408                     case GT_CNS_LNG:
409                     case GT_CNS_DBL:
410                     case GT_CNS_STR:
411                     case GT_LCL_VAR:
412                         table->val = gtCloneExpr(tree);
413                         break;
414
415                     default:
416                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
417                         break;
418                 }
419             }
420         }
421         else
422         {
423             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
424         }
425     }
426 }
427
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
429 {
430     verCurrentState.esStackDepth = savePtr->ssDepth;
431
432     if (verCurrentState.esStackDepth)
433     {
434         memcpy(verCurrentState.esStack, savePtr->ssTrees,
435                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
436     }
437 }
438
439 /*****************************************************************************
440  *
441  *  Get the tree list started for a new basic block.
442  */
443 inline void Compiler::impBeginTreeList()
444 {
445     assert(impTreeList == nullptr && impTreeLast == nullptr);
446
447     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the given start and end stmt in the given basic block. This is
453  *  mostly called by impEndTreeList(BasicBlock *block). It is called
454  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
455  */
456
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
458 {
459     assert(firstStmt->gtOper == GT_STMT);
460     assert(lastStmt->gtOper == GT_STMT);
461
462     /* Make the list circular, so that we can easily walk it backwards */
463
464     firstStmt->gtPrev = lastStmt;
465
466     /* Store the tree list in the basic block */
467
468     block->bbTreeList = firstStmt;
469
470     /* The block should not already be marked as imported */
471     assert((block->bbFlags & BBF_IMPORTED) == 0);
472
473     block->bbFlags |= BBF_IMPORTED;
474 }
475
476 /*****************************************************************************
477  *
478  *  Store the current tree list in the given basic block.
479  */
480
481 inline void Compiler::impEndTreeList(BasicBlock* block)
482 {
483     assert(impTreeList->gtOper == GT_BEG_STMTS);
484
485     GenTreePtr firstTree = impTreeList->gtNext;
486
487     if (!firstTree)
488     {
489         /* The block should not already be marked as imported */
490         assert((block->bbFlags & BBF_IMPORTED) == 0);
491
492         // Empty block. Just mark it as imported
493         block->bbFlags |= BBF_IMPORTED;
494     }
495     else
496     {
497         // Ignore the GT_BEG_STMTS
498         assert(firstTree->gtPrev == impTreeList);
499
500         impEndTreeList(block, firstTree, impTreeLast);
501     }
502
503 #ifdef DEBUG
504     if (impLastILoffsStmt != nullptr)
505     {
506         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507         impLastILoffsStmt                          = nullptr;
508     }
509
510     impTreeList = impTreeLast = nullptr;
511 #endif
512 }
513
514 /*****************************************************************************
515  *
516  *  Check that storing the given tree doesnt mess up the semantic order. Note
517  *  that this has only limited value as we can only check [0..chkLevel).
518  */
519
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
521 {
522 #ifndef DEBUG
523     return;
524 #else
525     assert(stmt->gtOper == GT_STMT);
526
527     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
528     {
529         chkLevel = verCurrentState.esStackDepth;
530     }
531
532     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
533     {
534         return;
535     }
536
537     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
538
539     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
540
541     if (tree->gtFlags & GTF_CALL)
542     {
543         for (unsigned level = 0; level < chkLevel; level++)
544         {
545             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
546         }
547     }
548
549     if (tree->gtOper == GT_ASG)
550     {
551         // For an assignment to a local variable, all references of that
552         // variable have to be spilled. If it is aliased, all calls and
553         // indirect accesses have to be spilled
554
555         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
556         {
557             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558             for (unsigned level = 0; level < chkLevel; level++)
559             {
560                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561                 assert(!lvaTable[lclNum].lvAddrExposed ||
562                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
563             }
564         }
565
566         // If the access may be to global memory, all side effects have to be spilled.
567
568         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
569         {
570             for (unsigned level = 0; level < chkLevel; level++)
571             {
572                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
573             }
574         }
575     }
576 #endif
577 }
578
579 /*****************************************************************************
580  *
581  *  Append the given GT_STMT node to the current block's tree list.
582  *  [0..chkLevel) is the portion of the stack which we will check for
583  *    interference with stmt and spill if needed.
584  */
585
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
587 {
588     assert(stmt->gtOper == GT_STMT);
589     noway_assert(impTreeLast != nullptr);
590
591     /* If the statement being appended has any side-effects, check the stack
592        to see if anything needs to be spilled to preserve correct ordering. */
593
594     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
595     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
596
597     // Assignment to (unaliased) locals don't count as a side-effect as
598     // we handle them specially using impSpillLclRefs(). Temp locals should
599     // be fine too.
600
601     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
603     {
604         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605         assert(flags == (op2Flags | GTF_ASG));
606         flags = op2Flags;
607     }
608
609     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
610     {
611         chkLevel = verCurrentState.esStackDepth;
612     }
613
614     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
615     {
616         assert(chkLevel <= verCurrentState.esStackDepth);
617
618         if (flags)
619         {
620             // If there is a call, we have to spill global refs
621             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
622
623             if (expr->gtOper == GT_ASG)
624             {
625                 GenTree* lhs = expr->gtGetOp1();
626                 // If we are assigning to a global ref, we have to spill global refs on stack.
627                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630                 if (!expr->OperIsBlkOp())
631                 {
632                     // If we are assigning to a global ref, we have to spill global refs on stack
633                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
634                     {
635                         spillGlobEffects = true;
636                     }
637                 }
638                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639                          ((lhs->OperGet() == GT_LCL_VAR) &&
640                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
641                 {
642                     spillGlobEffects = true;
643                 }
644             }
645
646             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
647         }
648         else
649         {
650             impSpillSpecialSideEff();
651         }
652     }
653
654     impAppendStmtCheck(stmt, chkLevel);
655
656     /* Point 'prev' at the previous node, so that we can walk backwards */
657
658     stmt->gtPrev = impTreeLast;
659
660     /* Append the expression statement to the list */
661
662     impTreeLast->gtNext = stmt;
663     impTreeLast         = stmt;
664
665 #ifdef FEATURE_SIMD
666     impMarkContiguousSIMDFieldAssignments(stmt);
667 #endif
668
669     /* Once we set impCurStmtOffs in an appended tree, we are ready to
670        report the following offsets. So reset impCurStmtOffs */
671
672     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
673     {
674         impCurStmtOffsSet(BAD_IL_OFFSET);
675     }
676
677 #ifdef DEBUG
678     if (impLastILoffsStmt == nullptr)
679     {
680         impLastILoffsStmt = stmt;
681     }
682
683     if (verbose)
684     {
685         printf("\n\n");
686         gtDispTree(stmt);
687     }
688 #endif
689 }
690
691 /*****************************************************************************
692  *
693  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
694  */
695
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
697 {
698     assert(stmt->gtOper == GT_STMT);
699     assert(stmtBefore->gtOper == GT_STMT);
700
701     GenTreePtr stmtPrev = stmtBefore->gtPrev;
702     stmt->gtPrev        = stmtPrev;
703     stmt->gtNext        = stmtBefore;
704     stmtPrev->gtNext    = stmt;
705     stmtBefore->gtPrev  = stmt;
706 }
707
708 /*****************************************************************************
709  *
710  *  Append the given expression tree to the current block's tree list.
711  *  Return the newly created statement.
712  */
713
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
715 {
716     assert(tree);
717
718     /* Allocate an 'expression statement' node */
719
720     GenTreePtr expr = gtNewStmt(tree, offset);
721
722     /* Append the statement to the current block's stmt list */
723
724     impAppendStmt(expr, chkLevel);
725
726     return expr;
727 }
728
729 /*****************************************************************************
730  *
731  *  Insert the given exression tree before GT_STMT "stmtBefore"
732  */
733
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
735 {
736     assert(stmtBefore->gtOper == GT_STMT);
737
738     /* Allocate an 'expression statement' node */
739
740     GenTreePtr expr = gtNewStmt(tree, offset);
741
742     /* Append the statement to the current block's stmt list */
743
744     impInsertStmtBefore(expr, stmtBefore);
745 }
746
747 /*****************************************************************************
748  *
749  *  Append an assignment of the given value to a temp to the current tree list.
750  *  curLevel is the stack level for which the spill to the temp is being done.
751  */
752
753 void Compiler::impAssignTempGen(unsigned    tmp,
754                                 GenTreePtr  val,
755                                 unsigned    curLevel,
756                                 GenTreePtr* pAfterStmt, /* = NULL */
757                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
758                                 BasicBlock* block       /* = NULL */
759                                 )
760 {
761     GenTreePtr asg = gtNewTempAssign(tmp, val);
762
763     if (!asg->IsNothingNode())
764     {
765         if (pAfterStmt)
766         {
767             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
769         }
770         else
771         {
772             impAppendTree(asg, curLevel, impCurStmtOffs);
773         }
774     }
775 }
776
777 /*****************************************************************************
778  * same as above, but handle the valueclass case too
779  */
780
781 void Compiler::impAssignTempGen(unsigned             tmpNum,
782                                 GenTreePtr           val,
783                                 CORINFO_CLASS_HANDLE structType,
784                                 unsigned             curLevel,
785                                 GenTreePtr*          pAfterStmt, /* = NULL */
786                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
787                                 BasicBlock*          block       /* = NULL */
788                                 )
789 {
790     GenTreePtr asg;
791
792     if (varTypeIsStruct(val))
793     {
794         assert(tmpNum < lvaCount);
795         assert(structType != NO_CLASS_HANDLE);
796
797         // if the method is non-verifiable the assert is not true
798         // so at least ignore it in the case when verification is turned on
799         // since any block that tries to use the temp would have failed verification.
800         var_types varType = lvaTable[tmpNum].lvType;
801         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802         lvaSetStruct(tmpNum, structType, false);
803
804         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806         // that has been passed in for the value being assigned to the temp, in which case we
807         // need to set 'val' to that same type.
808         // Note also that if we always normalized the types of any node that might be a struct
809         // type, this would not be necessary - but that requires additional JIT/EE interface
810         // calls that may not actually be required - e.g. if we only access a field of a struct.
811
812         val->gtType = lvaTable[tmpNum].lvType;
813
814         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
816     }
817     else
818     {
819         asg = gtNewTempAssign(tmpNum, val);
820     }
821
822     if (!asg->IsNothingNode())
823     {
824         if (pAfterStmt)
825         {
826             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
828         }
829         else
830         {
831             impAppendTree(asg, curLevel, impCurStmtOffs);
832         }
833     }
834 }
835
836 /*****************************************************************************
837  *
838  *  Pop the given number of values from the stack and return a list node with
839  *  their values.
840  *  The 'prefixTree' argument may optionally contain an argument
841  *  list that is prepended to the list returned from this function.
842  *
843  *  The notion of prepended is a bit misleading in that the list is backwards
844  *  from the way I would expect: The first element popped is at the end of
845  *  the returned list, and prefixTree is 'before' that, meaning closer to
846  *  the end of the list.  To get to prefixTree, you have to walk to the
847  *  end of the list.
848  *
849  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850  *  such we reverse its meaning such that returnValue has a reversed
851  *  prefixTree at the head of the list.
852  */
853
854 GenTreeArgList* Compiler::impPopList(unsigned          count,
855                                      unsigned*         flagsPtr,
856                                      CORINFO_SIG_INFO* sig,
857                                      GenTreeArgList*   prefixTree)
858 {
859     assert(sig == nullptr || count == sig->numArgs);
860
861     unsigned             flags = 0;
862     CORINFO_CLASS_HANDLE structType;
863     GenTreeArgList*      treeList;
864
865     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
866     {
867         treeList = nullptr;
868     }
869     else
870     { // ARG_ORDER_L2R
871         treeList = prefixTree;
872     }
873
874     while (count--)
875     {
876         StackEntry se   = impPopStack();
877         typeInfo   ti   = se.seTypeInfo;
878         GenTreePtr temp = se.val;
879
880         if (varTypeIsStruct(temp))
881         {
882             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883             assert(ti.IsType(TI_STRUCT));
884             structType = ti.GetClassHandleForValueClass();
885             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
886         }
887
888         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889         flags |= temp->gtFlags;
890         treeList = gtNewListNode(temp, treeList);
891     }
892
893     *flagsPtr = flags;
894
895     if (sig != nullptr)
896     {
897         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
899         {
900             // Make sure that all valuetypes (including enums) that we push are loaded.
901             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902             // all valuetypes in the method signature are already loaded.
903             // We need to be able to find the size of the valuetypes, but we cannot
904             // do a class-load from within GC.
905             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
906         }
907
908         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909         CORINFO_CLASS_HANDLE    argClass;
910         CORINFO_CLASS_HANDLE    argRealClass;
911         GenTreeArgList*         args;
912         unsigned                sigSize;
913
914         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
915         {
916             PREFIX_ASSUME(args != nullptr);
917
918             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
919
920             // insert implied casts (from float to double or double to float)
921
922             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
923             {
924                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
925             }
926             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
927             {
928                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
929             }
930
931             // insert any widening or narrowing casts for backwards compatibility
932
933             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
934
935             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
937             {
938                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
940                 // primitive types.
941                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
942                 // details).
943                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
944                 {
945                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
946                 }
947
948                 // Make sure that all valuetypes (including enums) that we push are loaded.
949                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950                 // all valuetypes in the method signature are already loaded.
951                 // We need to be able to find the size of the valuetypes, but we cannot
952                 // do a class-load from within GC.
953                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
954             }
955
956             argLst = info.compCompHnd->getArgNext(argLst);
957         }
958     }
959
960     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
961     {
962         // Prepend the prefixTree
963
964         // Simple in-place reversal to place treeList
965         // at the end of a reversed prefixTree
966         while (prefixTree != nullptr)
967         {
968             GenTreeArgList* next = prefixTree->Rest();
969             prefixTree->Rest()   = treeList;
970             treeList             = prefixTree;
971             prefixTree           = next;
972         }
973     }
974     return treeList;
975 }
976
977 /*****************************************************************************
978  *
979  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980  *  The first "skipReverseCount" items are not reversed.
981  */
982
983 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
984                                         unsigned*         flagsPtr,
985                                         CORINFO_SIG_INFO* sig,
986                                         unsigned          skipReverseCount)
987
988 {
989     assert(skipReverseCount <= count);
990
991     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
992
993     // reverse the list
994     if (list == nullptr || skipReverseCount == count)
995     {
996         return list;
997     }
998
999     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
1000     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1001
1002     if (skipReverseCount == 0)
1003     {
1004         ptr = list;
1005     }
1006     else
1007     {
1008         lastSkipNode = list;
1009         // Get to the first node that needs to be reversed
1010         for (unsigned i = 0; i < skipReverseCount - 1; i++)
1011         {
1012             lastSkipNode = lastSkipNode->Rest();
1013         }
1014
1015         PREFIX_ASSUME(lastSkipNode != nullptr);
1016         ptr = lastSkipNode->Rest();
1017     }
1018
1019     GenTreeArgList* reversedList = nullptr;
1020
1021     do
1022     {
1023         GenTreeArgList* tmp = ptr->Rest();
1024         ptr->Rest()         = reversedList;
1025         reversedList        = ptr;
1026         ptr                 = tmp;
1027     } while (ptr != nullptr);
1028
1029     if (skipReverseCount)
1030     {
1031         lastSkipNode->Rest() = reversedList;
1032         return list;
1033     }
1034     else
1035     {
1036         return reversedList;
1037     }
1038 }
1039
1040 /*****************************************************************************
1041    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1042    class of type 'clsHnd'.  It returns the tree that should be appended to the
1043    statement list that represents the assignment.
1044    Temp assignments may be appended to impTreeList if spilling is necessary.
1045    curLevel is the stack level for which a spill may be being done.
1046  */
1047
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1049                                      GenTreePtr           src,
1050                                      CORINFO_CLASS_HANDLE structHnd,
1051                                      unsigned             curLevel,
1052                                      GenTreePtr*          pAfterStmt, /* = NULL */
1053                                      BasicBlock*          block       /* = NULL */
1054                                      )
1055 {
1056     assert(varTypeIsStruct(dest));
1057
1058     while (dest->gtOper == GT_COMMA)
1059     {
1060         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1061
1062         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1063         if (pAfterStmt)
1064         {
1065             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1066         }
1067         else
1068         {
1069             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1070         }
1071
1072         // set dest to the second thing
1073         dest = dest->gtOp.gtOp2;
1074     }
1075
1076     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1078
1079     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1081     {
1082         // Make this a NOP
1083         return gtNewNothingNode();
1084     }
1085
1086     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087     // or re-creating a Blk node if it is.
1088     GenTreePtr destAddr;
1089
1090     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1091     {
1092         destAddr = dest->gtOp.gtOp1;
1093     }
1094     else
1095     {
1096         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1097     }
1098
1099     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1100 }
1101
1102 /*****************************************************************************/
1103
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1105                                         GenTreePtr           src,
1106                                         CORINFO_CLASS_HANDLE structHnd,
1107                                         unsigned             curLevel,
1108                                         GenTreePtr*          pAfterStmt, /* = NULL */
1109                                         BasicBlock*          block       /* = NULL */
1110                                         )
1111 {
1112     var_types  destType;
1113     GenTreePtr dest      = nullptr;
1114     unsigned   destFlags = 0;
1115
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118     // TODO-ARM-BUG: Does ARM need this?
1119     // TODO-ARM64-BUG: Does ARM64 need this?
1120     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125     assert(varTypeIsStruct(src));
1126
1127     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129            src->gtOper == GT_COMMA ||
1130            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132     if (destAddr->OperGet() == GT_ADDR)
1133     {
1134         GenTree* destNode = destAddr->gtGetOp1();
1135         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136         // will be morphed, don't insert an OBJ(ADDR).
1137         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1141                 )
1142         {
1143             dest = destNode;
1144         }
1145         destType = destNode->TypeGet();
1146     }
1147     else
1148     {
1149         destType = src->TypeGet();
1150     }
1151
1152     var_types asgType = src->TypeGet();
1153
1154     if (src->gtOper == GT_CALL)
1155     {
1156         if (src->AsCall()->TreatAsHasRetBufArg(this))
1157         {
1158             // Case of call returning a struct via hidden retbuf arg
1159
1160             // insert the return value buffer into the argument list as first byref parameter
1161             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1162
1163             // now returns void, not a struct
1164             src->gtType = TYP_VOID;
1165
1166             // return the morphed call node
1167             return src;
1168         }
1169         else
1170         {
1171             // Case of call returning a struct in one or more registers.
1172
1173             var_types returnType = (var_types)src->gtCall.gtReturnType;
1174
1175             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176             src->gtType = genActualType(returnType);
1177
1178             // First we try to change this to "LclVar/LclFld = call"
1179             //
1180             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1181             {
1182                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183                 // That is, the IR will be of the form lclVar = call for multi-reg return
1184                 //
1185                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186                 if (src->AsCall()->HasMultiRegRetVal())
1187                 {
1188                     // Mark the struct LclVar as used in a MultiReg return context
1189                     //  which currently makes it non promotable.
1190                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                     // handle multireg returns.
1192                     lcl->gtFlags |= GTF_DONT_CSE;
1193                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1194                 }
1195                 else // The call result is not a multireg return
1196                 {
1197                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198                     lcl->ChangeOper(GT_LCL_FLD);
1199                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1200                 }
1201
1202                 lcl->gtType = src->gtType;
1203                 asgType     = src->gtType;
1204                 dest        = lcl;
1205
1206 #if defined(_TARGET_ARM_)
1207                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208                 // but that method has not been updadted to include ARM.
1209                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210                 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1214
1215                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217                 // handle multireg returns.
1218                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219                 // non-multireg returns.
1220                 lcl->gtFlags |= GTF_DONT_CSE;
1221                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1222 #endif
1223             }
1224             else // we don't have a GT_ADDR of a GT_LCL_VAR
1225             {
1226                 // !!! The destination could be on stack. !!!
1227                 // This flag will let us choose the correct write barrier.
1228                 asgType   = returnType;
1229                 destFlags = GTF_IND_TGTANYWHERE;
1230             }
1231         }
1232     }
1233     else if (src->gtOper == GT_RET_EXPR)
1234     {
1235         GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236         noway_assert(call->gtOper == GT_CALL);
1237
1238         if (call->AsCall()->HasRetBufArg())
1239         {
1240             // insert the return value buffer into the argument list as first byref parameter
1241             call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1242
1243             // now returns void, not a struct
1244             src->gtType  = TYP_VOID;
1245             call->gtType = TYP_VOID;
1246
1247             // We already have appended the write to 'dest' GT_CALL's args
1248             // So now we just return an empty node (pruning the GT_RET_EXPR)
1249             return src;
1250         }
1251         else
1252         {
1253             // Case of inline method returning a struct in one or more registers.
1254             //
1255             var_types returnType = (var_types)call->gtCall.gtReturnType;
1256
1257             // We won't need a return buffer
1258             asgType      = returnType;
1259             src->gtType  = genActualType(returnType);
1260             call->gtType = src->gtType;
1261
1262             // If we've changed the type, and it no longer matches a local destination,
1263             // we must use an indirection.
1264             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1265             {
1266                 dest = nullptr;
1267             }
1268
1269             // !!! The destination could be on stack. !!!
1270             // This flag will let us choose the correct write barrier.
1271             destFlags = GTF_IND_TGTANYWHERE;
1272         }
1273     }
1274     else if (src->OperIsBlk())
1275     {
1276         asgType = impNormStructType(structHnd);
1277         if (src->gtOper == GT_OBJ)
1278         {
1279             assert(src->gtObj.gtClass == structHnd);
1280         }
1281     }
1282     else if (src->gtOper == GT_INDEX)
1283     {
1284         asgType = impNormStructType(structHnd);
1285         assert(src->gtIndex.gtStructElemClass == structHnd);
1286     }
1287     else if (src->gtOper == GT_MKREFANY)
1288     {
1289         // Since we are assigning the result of a GT_MKREFANY,
1290         // "destAddr" must point to a refany.
1291
1292         GenTreePtr destAddrClone;
1293         destAddr =
1294             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1295
1296         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302         GenTreePtr typeSlot =
1303             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1304
1305         // append the assign of the pointer value
1306         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1307         if (pAfterStmt)
1308         {
1309             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1310         }
1311         else
1312         {
1313             impAppendTree(asg, curLevel, impCurStmtOffs);
1314         }
1315
1316         // return the assign of the type value, to be appended
1317         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1318     }
1319     else if (src->gtOper == GT_COMMA)
1320     {
1321         // The second thing is the struct or its address.
1322         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1323         if (pAfterStmt)
1324         {
1325             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1326         }
1327         else
1328         {
1329             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1330         }
1331
1332         // Evaluate the second thing using recursion.
1333         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1334     }
1335     else if (src->IsLocal())
1336     {
1337         asgType = src->TypeGet();
1338     }
1339     else if (asgType == TYP_STRUCT)
1340     {
1341         asgType     = impNormStructType(structHnd);
1342         src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344         if (asgType == TYP_STRUCT)
1345         {
1346             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1348         }
1349 #endif
1350     }
1351     if (dest == nullptr)
1352     {
1353         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354         // if this is a known struct type.
1355         if (asgType == TYP_STRUCT)
1356         {
1357             dest = gtNewObjNode(structHnd, destAddr);
1358             gtSetObjGcInfo(dest->AsObj());
1359             // Although an obj as a call argument was always assumed to be a globRef
1360             // (which is itself overly conservative), that is not true of the operands
1361             // of a block assignment.
1362             dest->gtFlags &= ~GTF_GLOB_REF;
1363             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1364         }
1365         else if (varTypeIsStruct(asgType))
1366         {
1367             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1368         }
1369         else
1370         {
1371             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1372         }
1373     }
1374     else
1375     {
1376         dest->gtType = asgType;
1377     }
1378
1379     dest->gtFlags |= destFlags;
1380     destFlags = dest->gtFlags;
1381
1382     // return an assignment node, to be appended
1383     GenTree* asgNode = gtNewAssignNode(dest, src);
1384     gtBlockOpInit(asgNode, dest, src, false);
1385
1386     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1387     // of assignments.
1388     if ((destFlags & GTF_DONT_CSE) == 0)
1389     {
1390         dest->gtFlags &= ~(GTF_DONT_CSE);
1391     }
1392     return asgNode;
1393 }
1394
1395 /*****************************************************************************
1396    Given a struct value, and the class handle for that structure, return
1397    the expression for the address for that structure value.
1398
1399    willDeref - does the caller guarantee to dereference the pointer.
1400 */
1401
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1403                                       CORINFO_CLASS_HANDLE structHnd,
1404                                       unsigned             curLevel,
1405                                       bool                 willDeref)
1406 {
1407     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1408
1409     var_types type = structVal->TypeGet();
1410
1411     genTreeOps oper = structVal->gtOper;
1412
1413     if (oper == GT_OBJ && willDeref)
1414     {
1415         assert(structVal->gtObj.gtClass == structHnd);
1416         return (structVal->gtObj.Addr());
1417     }
1418     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1419     {
1420         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1421
1422         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1423
1424         // The 'return value' is now the temp itself
1425
1426         type            = genActualType(lvaTable[tmpNum].TypeGet());
1427         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1429         return temp;
1430     }
1431     else if (oper == GT_COMMA)
1432     {
1433         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1434
1435         GenTreePtr oldTreeLast = impTreeLast;
1436         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437         structVal->gtType      = TYP_BYREF;
1438
1439         if (oldTreeLast != impTreeLast)
1440         {
1441             // Some temp assignment statement was placed on the statement list
1442             // for Op2, but that would be out of order with op1, so we need to
1443             // spill op1 onto the statement list after whatever was last
1444             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446             structVal->gtOp.gtOp1 = gtNewNothingNode();
1447         }
1448
1449         return (structVal);
1450     }
1451
1452     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1453 }
1454
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 //                    and optionally determine the GC layout of the struct.
1458 //
1459 // Arguments:
1460 //    structHnd       - The class handle for the struct type of interest.
1461 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 //                      into which the gcLayout will be written.
1463 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 //                      which will be set to the number of GC fields in the struct.
1465 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 //                      type, set to the SIMD base type
1467 //
1468 // Return Value:
1469 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1472 //
1473 // Assumptions:
1474 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1476 //
1477 // Notes:
1478 //    Normalizing the type involves examining the struct type to determine if it should
1479 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1480 //    for full enregistration, e.g. TYP_SIMD16.
1481
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1483                                       BYTE*                gcLayout,
1484                                       unsigned*            pNumGCVars,
1485                                       var_types*           pSimdBaseType)
1486 {
1487     assert(structHnd != NO_CLASS_HANDLE);
1488
1489     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490     var_types   structType  = TYP_STRUCT;
1491
1492 #ifdef FEATURE_CORECLR
1493     const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1494 #else
1495     // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496     const bool        isRefAny    = (structHnd == impGetRefAnyClass());
1497     const bool        hasGCPtrs   = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1498 #endif
1499
1500 #ifdef FEATURE_SIMD
1501     // Check to see if this is a SIMD type.
1502     if (featureSIMD && !hasGCPtrs)
1503     {
1504         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505
1506         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507         {
1508             unsigned int sizeBytes;
1509             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510             if (simdBaseType != TYP_UNKNOWN)
1511             {
1512                 assert(sizeBytes == originalSize);
1513                 structType = getSIMDTypeForSize(sizeBytes);
1514                 if (pSimdBaseType != nullptr)
1515                 {
1516                     *pSimdBaseType = simdBaseType;
1517                 }
1518 #ifdef _TARGET_AMD64_
1519                 // Amd64: also indicate that we use floating point registers
1520                 compFloatingPointUsed = true;
1521 #endif
1522             }
1523         }
1524     }
1525 #endif // FEATURE_SIMD
1526
1527     // Fetch GC layout info if requested
1528     if (gcLayout != nullptr)
1529     {
1530         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1531
1532         // Verify that the quick test up above via the class attributes gave a
1533         // safe view of the type's GCness.
1534         //
1535         // Note there are cases where hasGCPtrs is true but getClassGClayout
1536         // does not report any gc fields.
1537         assert(hasGCPtrs || (numGCVars == 0));
1538
1539         if (pNumGCVars != nullptr)
1540         {
1541             *pNumGCVars = numGCVars;
1542         }
1543     }
1544     else
1545     {
1546         // Can't safely ask for number of GC pointers without also
1547         // asking for layout.
1548         assert(pNumGCVars == nullptr);
1549     }
1550
1551     return structType;
1552 }
1553
1554 //****************************************************************************
1555 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1557 //
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1559                                       CORINFO_CLASS_HANDLE structHnd,
1560                                       unsigned             curLevel,
1561                                       bool                 forceNormalization /*=false*/)
1562 {
1563     assert(forceNormalization || varTypeIsStruct(structVal));
1564     assert(structHnd != NO_CLASS_HANDLE);
1565     var_types structType = structVal->TypeGet();
1566     bool      makeTemp   = false;
1567     if (structType == TYP_STRUCT)
1568     {
1569         structType = impNormStructType(structHnd);
1570     }
1571     bool                 alreadyNormalized = false;
1572     GenTreeLclVarCommon* structLcl         = nullptr;
1573
1574     genTreeOps oper = structVal->OperGet();
1575     switch (oper)
1576     {
1577         // GT_RETURN and GT_MKREFANY don't capture the handle.
1578         case GT_RETURN:
1579             break;
1580         case GT_MKREFANY:
1581             alreadyNormalized = true;
1582             break;
1583
1584         case GT_CALL:
1585             structVal->gtCall.gtRetClsHnd = structHnd;
1586             makeTemp                      = true;
1587             break;
1588
1589         case GT_RET_EXPR:
1590             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1591             makeTemp                         = true;
1592             break;
1593
1594         case GT_ARGPLACE:
1595             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1596             break;
1597
1598         case GT_INDEX:
1599             // This will be transformed to an OBJ later.
1600             alreadyNormalized                    = true;
1601             structVal->gtIndex.gtStructElemClass = structHnd;
1602             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1603             break;
1604
1605         case GT_FIELD:
1606             // Wrap it in a GT_OBJ.
1607             structVal->gtType = structType;
1608             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1609             break;
1610
1611         case GT_LCL_VAR:
1612         case GT_LCL_FLD:
1613             structLcl = structVal->AsLclVarCommon();
1614             // Wrap it in a GT_OBJ.
1615             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1616             __fallthrough;
1617
1618         case GT_OBJ:
1619         case GT_BLK:
1620         case GT_DYN_BLK:
1621         case GT_ASG:
1622             // These should already have the appropriate type.
1623             assert(structVal->gtType == structType);
1624             alreadyNormalized = true;
1625             break;
1626
1627         case GT_IND:
1628             assert(structVal->gtType == structType);
1629             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630             alreadyNormalized = true;
1631             break;
1632
1633 #ifdef FEATURE_SIMD
1634         case GT_SIMD:
1635             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1636             break;
1637 #endif // FEATURE_SIMD
1638
1639         case GT_COMMA:
1640         {
1641             // The second thing is the block node.
1642             GenTree* blockNode = structVal->gtOp.gtOp2;
1643             assert(blockNode->gtType == structType);
1644             // It had better be a block node - any others should not occur here.
1645             assert(blockNode->OperIsBlk());
1646
1647             // Sink the GT_COMMA below the blockNode addr.
1648             GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1649             assert(blockNodeAddr->gtType == TYP_BYREF);
1650             GenTree* commaNode    = structVal;
1651             commaNode->gtType     = TYP_BYREF;
1652             commaNode->gtOp.gtOp2 = blockNodeAddr;
1653             blockNode->gtOp.gtOp1 = commaNode;
1654             structVal             = blockNode;
1655             alreadyNormalized     = true;
1656         }
1657         break;
1658
1659         default:
1660             assert(!"Unexpected node in impNormStructVal()");
1661             break;
1662     }
1663     structVal->gtType  = structType;
1664     GenTree* structObj = structVal;
1665
1666     if (!alreadyNormalized || forceNormalization)
1667     {
1668         if (makeTemp)
1669         {
1670             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1671
1672             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1673
1674             // The structVal is now the temp itself
1675
1676             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1677             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1678             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1679         }
1680         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1681         {
1682             // Wrap it in a GT_OBJ
1683             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1684         }
1685     }
1686
1687     if (structLcl != nullptr)
1688     {
1689         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1690         // so we don't set GTF_EXCEPT here.
1691         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1692         {
1693             structObj->gtFlags &= ~GTF_GLOB_REF;
1694         }
1695     }
1696     else
1697     {
1698         // In general a OBJ is an indirection and could raise an exception.
1699         structObj->gtFlags |= GTF_EXCEPT;
1700     }
1701     return (structObj);
1702 }
1703
1704 /******************************************************************************/
1705 // Given a type token, generate code that will evaluate to the correct
1706 // handle representation of that token (type handle, field handle, or method handle)
1707 //
1708 // For most cases, the handle is determined at compile-time, and the code
1709 // generated is simply an embedded handle.
1710 //
1711 // Run-time lookup is required if the enclosing method is shared between instantiations
1712 // and the token refers to formal type parameters whose instantiation is not known
1713 // at compile-time.
1714 //
1715 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1716                                       BOOL*                   pRuntimeLookup /* = NULL */,
1717                                       BOOL                    mustRestoreHandle /* = FALSE */,
1718                                       BOOL                    importParent /* = FALSE */)
1719 {
1720     assert(!fgGlobalMorph);
1721
1722     CORINFO_GENERICHANDLE_RESULT embedInfo;
1723     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1724
1725     if (pRuntimeLookup)
1726     {
1727         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1728     }
1729
1730     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1731     {
1732         switch (embedInfo.handleType)
1733         {
1734             case CORINFO_HANDLETYPE_CLASS:
1735                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1736                 break;
1737
1738             case CORINFO_HANDLETYPE_METHOD:
1739                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1740                 break;
1741
1742             case CORINFO_HANDLETYPE_FIELD:
1743                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1744                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1745                 break;
1746
1747             default:
1748                 break;
1749         }
1750     }
1751
1752     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1753                            embedInfo.compileTimeHandle);
1754 }
1755
1756 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1757                                      CORINFO_LOOKUP*         pLookup,
1758                                      unsigned                handleFlags,
1759                                      void*                   compileTimeHandle)
1760 {
1761     if (!pLookup->lookupKind.needsRuntimeLookup)
1762     {
1763         // No runtime lookup is required.
1764         // Access is direct or memory-indirect (of a fixed address) reference
1765
1766         CORINFO_GENERIC_HANDLE handle       = nullptr;
1767         void*                  pIndirection = nullptr;
1768         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1769
1770         if (pLookup->constLookup.accessType == IAT_VALUE)
1771         {
1772             handle = pLookup->constLookup.handle;
1773         }
1774         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1775         {
1776             pIndirection = pLookup->constLookup.addr;
1777         }
1778         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1779     }
1780     else if (compIsForInlining())
1781     {
1782         // Don't import runtime lookups when inlining
1783         // Inlining has to be aborted in such a case
1784         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1785         return nullptr;
1786     }
1787     else
1788     {
1789         // Need to use dictionary-based access which depends on the typeContext
1790         // which is only available at runtime, not at compile-time.
1791
1792         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1793     }
1794 }
1795
1796 #ifdef FEATURE_READYTORUN_COMPILER
1797 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1798                                                unsigned              handleFlags,
1799                                                void*                 compileTimeHandle)
1800 {
1801     CORINFO_GENERIC_HANDLE handle       = nullptr;
1802     void*                  pIndirection = nullptr;
1803     assert(pLookup->accessType != IAT_PPVALUE);
1804
1805     if (pLookup->accessType == IAT_VALUE)
1806     {
1807         handle = pLookup->handle;
1808     }
1809     else if (pLookup->accessType == IAT_PVALUE)
1810     {
1811         pIndirection = pLookup->addr;
1812     }
1813     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1814 }
1815
1816 GenTreePtr Compiler::impReadyToRunHelperToTree(
1817     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1818     CorInfoHelpFunc         helper,
1819     var_types               type,
1820     GenTreeArgList*         args /* =NULL*/,
1821     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1822 {
1823     CORINFO_CONST_LOOKUP lookup;
1824 #if COR_JIT_EE_VERSION > 460
1825     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1826     {
1827         return nullptr;
1828     }
1829 #else
1830     info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1831 #endif
1832
1833     GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1834
1835     op1->gtCall.setEntryPoint(lookup);
1836
1837     return op1;
1838 }
1839 #endif
1840
1841 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1842 {
1843     GenTreePtr op1 = nullptr;
1844
1845     switch (pCallInfo->kind)
1846     {
1847         case CORINFO_CALL:
1848             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1849
1850 #ifdef FEATURE_READYTORUN_COMPILER
1851             if (opts.IsReadyToRun())
1852             {
1853                 op1->gtFptrVal.gtEntryPoint          = pCallInfo->codePointerLookup.constLookup;
1854                 op1->gtFptrVal.gtLdftnResolvedToken  = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1855                 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1856             }
1857             else
1858             {
1859                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1860             }
1861 #endif
1862             break;
1863
1864         case CORINFO_CALL_CODE_POINTER:
1865             if (compIsForInlining())
1866             {
1867                 // Don't import runtime lookups when inlining
1868                 // Inlining has to be aborted in such a case
1869                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1870                 return nullptr;
1871             }
1872
1873             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1874             break;
1875
1876         default:
1877             noway_assert(!"unknown call kind");
1878             break;
1879     }
1880
1881     return op1;
1882 }
1883
1884 //------------------------------------------------------------------------
1885 // getRuntimeContextTree: find pointer to context for runtime lookup.
1886 //
1887 // Arguments:
1888 //    kind - lookup kind.
1889 //
1890 // Return Value:
1891 //    Return GenTree pointer to generic shared context.
1892 //
1893 // Notes:
1894 //    Reports about generic context using.
1895
1896 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1897 {
1898     GenTreePtr ctxTree = nullptr;
1899
1900     // Collectible types requires that for shared generic code, if we use the generic context parameter
1901     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1902     // context parameter is this that we don't need the eager reporting logic.)
1903     lvaGenericsContextUsed = true;
1904
1905     if (kind == CORINFO_LOOKUP_THISOBJ)
1906     {
1907         // this Object
1908         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1909
1910         // Vtable pointer of this object
1911         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1912         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1913         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1914     }
1915     else
1916     {
1917         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1918
1919         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1920     }
1921     return ctxTree;
1922 }
1923
1924 /*****************************************************************************/
1925 /* Import a dictionary lookup to access a handle in code shared between
1926    generic instantiations.
1927    The lookup depends on the typeContext which is only available at
1928    runtime, and not at compile-time.
1929    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1930    The cases are:
1931
1932    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1933       instantiation-specific handle, and the tokens to lookup the handle.
1934    2. pLookup->indirections != CORINFO_USEHELPER :
1935       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1936           to get the handle.
1937       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1938           If it is non-NULL, it is the handle required. Else, call a helper
1939           to lookup the handle.
1940  */
1941
1942 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1943                                             CORINFO_LOOKUP*         pLookup,
1944                                             void*                   compileTimeHandle)
1945 {
1946
1947     // This method can only be called from the importer instance of the Compiler.
1948     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1949     assert(!compIsForInlining());
1950
1951     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1952
1953 #ifdef FEATURE_READYTORUN_COMPILER
1954     if (opts.IsReadyToRun())
1955     {
1956         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1957                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1958     }
1959 #endif
1960
1961     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1962     // It's available only via the run-time helper function
1963     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1964     {
1965         GenTreeArgList* helperArgs =
1966             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1967                                                       nullptr, compileTimeHandle));
1968
1969         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1970     }
1971
1972     // Slot pointer
1973     GenTreePtr slotPtrTree = ctxTree;
1974
1975     if (pRuntimeLookup->testForNull)
1976     {
1977         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1978                                    nullptr DEBUGARG("impRuntimeLookup slot"));
1979     }
1980
1981     // Applied repeated indirections
1982     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1983     {
1984         if (i != 0)
1985         {
1986             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1987             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1988             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1989         }
1990         if (pRuntimeLookup->offsets[i] != 0)
1991         {
1992             slotPtrTree =
1993                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1994         }
1995     }
1996
1997     // No null test required
1998     if (!pRuntimeLookup->testForNull)
1999     {
2000         if (pRuntimeLookup->indirections == 0)
2001         {
2002             return slotPtrTree;
2003         }
2004
2005         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2006         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2007
2008         if (!pRuntimeLookup->testForFixup)
2009         {
2010             return slotPtrTree;
2011         }
2012
2013         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2014
2015         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2016                                       nullptr DEBUGARG("impRuntimeLookup test"));
2017         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2018
2019         // Use a GT_AND to check for the lowest bit and indirect if it is set
2020         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2021         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2022         relop->gtFlags |= GTF_RELOP_QMARK;
2023
2024         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2025                            nullptr DEBUGARG("impRuntimeLookup indir"));
2026         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2027         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2028         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2029
2030         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2031
2032         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2033         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2034         return gtNewLclvNode(tmp, TYP_I_IMPL);
2035     }
2036
2037     assert(pRuntimeLookup->indirections != 0);
2038
2039     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2040
2041     // Extract the handle
2042     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043     handle->gtFlags |= GTF_IND_NONFAULTING;
2044
2045     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2046                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2047
2048     // Call to helper
2049     GenTreeArgList* helperArgs =
2050         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2051                                                   compileTimeHandle));
2052     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2053
2054     // Check for null and possibly call helper
2055     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2056     relop->gtFlags |= GTF_RELOP_QMARK;
2057
2058     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2059                                                          gtNewNothingNode(), // do nothing if nonnull
2060                                                          helperCall);
2061
2062     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2063
2064     unsigned tmp;
2065     if (handleCopy->IsLocal())
2066     {
2067         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2068     }
2069     else
2070     {
2071         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2072     }
2073
2074     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2075     return gtNewLclvNode(tmp, TYP_I_IMPL);
2076 }
2077
2078 /******************************************************************************
2079  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2080  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2081  *     else, grab a new temp.
2082  *  For structs (which can be pushed on the stack using obj, etc),
2083  *  special handling is needed
2084  */
2085
2086 struct RecursiveGuard
2087 {
2088 public:
2089     RecursiveGuard()
2090     {
2091         m_pAddress = nullptr;
2092     }
2093
2094     ~RecursiveGuard()
2095     {
2096         if (m_pAddress)
2097         {
2098             *m_pAddress = false;
2099         }
2100     }
2101
2102     void Init(bool* pAddress, bool bInitialize)
2103     {
2104         assert(pAddress && *pAddress == false && "Recursive guard violation");
2105         m_pAddress = pAddress;
2106
2107         if (bInitialize)
2108         {
2109             *m_pAddress = true;
2110         }
2111     }
2112
2113 protected:
2114     bool* m_pAddress;
2115 };
2116
2117 bool Compiler::impSpillStackEntry(unsigned level,
2118                                   unsigned tnum
2119 #ifdef DEBUG
2120                                   ,
2121                                   bool        bAssertOnRecursion,
2122                                   const char* reason
2123 #endif
2124                                   )
2125 {
2126
2127 #ifdef DEBUG
2128     RecursiveGuard guard;
2129     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2130 #endif
2131
2132     GenTreePtr tree = verCurrentState.esStack[level].val;
2133
2134     /* Allocate a temp if we haven't been asked to use a particular one */
2135
2136     if (tiVerificationNeeded)
2137     {
2138         // Ignore bad temp requests (they will happen with bad code and will be
2139         // catched when importing the destblock)
2140         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2141         {
2142             return false;
2143         }
2144     }
2145     else
2146     {
2147         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2148         {
2149             return false;
2150         }
2151     }
2152
2153     if (tnum == BAD_VAR_NUM)
2154     {
2155         tnum = lvaGrabTemp(true DEBUGARG(reason));
2156     }
2157     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2158     {
2159         // if verification is needed and tnum's type is incompatible with
2160         // type on that stack, we grab a new temp. This is safe since
2161         // we will throw a verification exception in the dest block.
2162
2163         var_types valTyp = tree->TypeGet();
2164         var_types dstTyp = lvaTable[tnum].TypeGet();
2165
2166         // if the two types are different, we return. This will only happen with bad code and will
2167         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2168         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2169             !(
2170 #ifndef _TARGET_64BIT_
2171                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2172 #endif // !_TARGET_64BIT_
2173                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2174         {
2175             if (verNeedsVerification())
2176             {
2177                 return false;
2178             }
2179         }
2180     }
2181
2182     /* Assign the spilled entry to the temp */
2183     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2184
2185     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2186     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2187     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2188     verCurrentState.esStack[level].val = temp;
2189
2190     return true;
2191 }
2192
2193 /*****************************************************************************
2194  *
2195  *  Ensure that the stack has only spilled values
2196  */
2197
2198 void Compiler::impSpillStackEnsure(bool spillLeaves)
2199 {
2200     assert(!spillLeaves || opts.compDbgCode);
2201
2202     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2203     {
2204         GenTreePtr tree = verCurrentState.esStack[level].val;
2205
2206         if (!spillLeaves && tree->OperIsLeaf())
2207         {
2208             continue;
2209         }
2210
2211         // Temps introduced by the importer itself don't need to be spilled
2212
2213         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2214
2215         if (isTempLcl)
2216         {
2217             continue;
2218         }
2219
2220         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2221     }
2222 }
2223
2224 void Compiler::impSpillEvalStack()
2225 {
2226     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2227     {
2228         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2229     }
2230 }
2231
2232 /*****************************************************************************
2233  *
2234  *  If the stack contains any trees with side effects in them, assign those
2235  *  trees to temps and append the assignments to the statement list.
2236  *  On return the stack is guaranteed to be empty.
2237  */
2238
2239 inline void Compiler::impEvalSideEffects()
2240 {
2241     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2242     verCurrentState.esStackDepth = 0;
2243 }
2244
2245 /*****************************************************************************
2246  *
2247  *  If the stack contains any trees with side effects in them, assign those
2248  *  trees to temps and replace them on the stack with refs to their temps.
2249  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2250  */
2251
2252 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2253 {
2254     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2255
2256     /* Before we make any appends to the tree list we must spill the
2257      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2258
2259     impSpillSpecialSideEff();
2260
2261     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2262     {
2263         chkLevel = verCurrentState.esStackDepth;
2264     }
2265
2266     assert(chkLevel <= verCurrentState.esStackDepth);
2267
2268     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2269
2270     for (unsigned i = 0; i < chkLevel; i++)
2271     {
2272         GenTreePtr tree = verCurrentState.esStack[i].val;
2273
2274         GenTreePtr lclVarTree;
2275
2276         if ((tree->gtFlags & spillFlags) != 0 ||
2277             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2278              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2279              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2280                                            // lvAddrTaken flag.
2281         {
2282             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2283         }
2284     }
2285 }
2286
2287 /*****************************************************************************
2288  *
2289  *  If the stack contains any trees with special side effects in them, assign
2290  *  those trees to temps and replace them on the stack with refs to their temps.
2291  */
2292
2293 inline void Compiler::impSpillSpecialSideEff()
2294 {
2295     // Only exception objects need to be carefully handled
2296
2297     if (!compCurBB->bbCatchTyp)
2298     {
2299         return;
2300     }
2301
2302     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2303     {
2304         GenTreePtr tree = verCurrentState.esStack[level].val;
2305         // Make sure if we have an exception object in the sub tree we spill ourselves.
2306         if (gtHasCatchArg(tree))
2307         {
2308             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2309         }
2310     }
2311 }
2312
2313 /*****************************************************************************
2314  *
2315  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2316  */
2317
2318 void Compiler::impSpillValueClasses()
2319 {
2320     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2321     {
2322         GenTreePtr tree = verCurrentState.esStack[level].val;
2323
2324         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2325         {
2326             // Tree walk was aborted, which means that we found a
2327             // value class on the stack.  Need to spill that
2328             // stack entry.
2329
2330             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2331         }
2332     }
2333 }
2334
2335 /*****************************************************************************
2336  *
2337  *  Callback that checks if a tree node is TYP_STRUCT
2338  */
2339
2340 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2341 {
2342     fgWalkResult walkResult = WALK_CONTINUE;
2343
2344     if ((*pTree)->gtType == TYP_STRUCT)
2345     {
2346         // Abort the walk and indicate that we found a value class
2347
2348         walkResult = WALK_ABORT;
2349     }
2350
2351     return walkResult;
2352 }
2353
2354 /*****************************************************************************
2355  *
2356  *  If the stack contains any trees with references to local #lclNum, assign
2357  *  those trees to temps and replace their place on the stack with refs to
2358  *  their temps.
2359  */
2360
2361 void Compiler::impSpillLclRefs(ssize_t lclNum)
2362 {
2363     /* Before we make any appends to the tree list we must spill the
2364      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2365
2366     impSpillSpecialSideEff();
2367
2368     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2369     {
2370         GenTreePtr tree = verCurrentState.esStack[level].val;
2371
2372         /* If the tree may throw an exception, and the block has a handler,
2373            then we need to spill assignments to the local if the local is
2374            live on entry to the handler.
2375            Just spill 'em all without considering the liveness */
2376
2377         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2378
2379         /* Skip the tree if it doesn't have an affected reference,
2380            unless xcptnCaught */
2381
2382         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2383         {
2384             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2385         }
2386     }
2387 }
2388
2389 /*****************************************************************************
2390  *
2391  *  Push catch arg onto the stack.
2392  *  If there are jumps to the beginning of the handler, insert basic block
2393  *  and spill catch arg to a temp. Update the handler block if necessary.
2394  *
2395  *  Returns the basic block of the actual handler.
2396  */
2397
2398 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2399 {
2400     // Do not inject the basic block twice on reimport. This should be
2401     // hit only under JIT stress. See if the block is the one we injected.
2402     // Note that EH canonicalization can inject internal blocks here. We might
2403     // be able to re-use such a block (but we don't, right now).
2404     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2405         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2406     {
2407         GenTreePtr tree = hndBlk->bbTreeList;
2408
2409         if (tree != nullptr && tree->gtOper == GT_STMT)
2410         {
2411             tree = tree->gtStmt.gtStmtExpr;
2412             assert(tree != nullptr);
2413
2414             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2415                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2416             {
2417                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2418
2419                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2420
2421                 return hndBlk->bbNext;
2422             }
2423         }
2424
2425         // If we get here, it must have been some other kind of internal block. It's possible that
2426         // someone prepended something to our injected block, but that's unlikely.
2427     }
2428
2429     /* Push the exception address value on the stack */
2430     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2431
2432     /* Mark the node as having a side-effect - i.e. cannot be
2433      * moved around since it is tied to a fixed location (EAX) */
2434     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2435
2436     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2437     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2438     {
2439         if (hndBlk->bbRefs == 1)
2440         {
2441             hndBlk->bbRefs++;
2442         }
2443
2444         /* Create extra basic block for the spill */
2445         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2446         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2447         newBlk->setBBWeight(hndBlk->bbWeight);
2448         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2449
2450         /* Account for the new link we are about to create */
2451         hndBlk->bbRefs++;
2452
2453         /* Spill into a temp */
2454         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2455         lvaTable[tempNum].lvType = TYP_REF;
2456         arg                      = gtNewTempAssign(tempNum, arg);
2457
2458         hndBlk->bbStkTempsIn = tempNum;
2459
2460         /* Report the debug info. impImportBlockCode won't treat
2461          * the actual handler as exception block and thus won't do it for us. */
2462         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2463         {
2464             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2465             arg            = gtNewStmt(arg, impCurStmtOffs);
2466         }
2467
2468         fgInsertStmtAtEnd(newBlk, arg);
2469
2470         arg = gtNewLclvNode(tempNum, TYP_REF);
2471     }
2472
2473     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2474
2475     return hndBlk;
2476 }
2477
2478 /*****************************************************************************
2479  *
2480  *  Given a tree, clone it. *pClone is set to the cloned tree.
2481  *  Returns the original tree if the cloning was easy,
2482  *   else returns the temp to which the tree had to be spilled to.
2483  *  If the tree has side-effects, it will be spilled to a temp.
2484  */
2485
2486 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2487                                   GenTreePtr*          pClone,
2488                                   CORINFO_CLASS_HANDLE structHnd,
2489                                   unsigned             curLevel,
2490                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2491 {
2492     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2493     {
2494         GenTreePtr clone = gtClone(tree, true);
2495
2496         if (clone)
2497         {
2498             *pClone = clone;
2499             return tree;
2500         }
2501     }
2502
2503     /* Store the operand in a temp and return the temp */
2504
2505     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2506
2507     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2508     // return a struct type. It also may modify the struct type to a more
2509     // specialized type (e.g. a SIMD type).  So we will get the type from
2510     // the lclVar AFTER calling impAssignTempGen().
2511
2512     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2513     var_types type = genActualType(lvaTable[temp].TypeGet());
2514
2515     *pClone = gtNewLclvNode(temp, type);
2516     return gtNewLclvNode(temp, type);
2517 }
2518
2519 /*****************************************************************************
2520  * Remember the IL offset (including stack-empty info) for the trees we will
2521  * generate now.
2522  */
2523
2524 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2525 {
2526     if (compIsForInlining())
2527     {
2528         GenTreePtr callStmt = impInlineInfo->iciStmt;
2529         assert(callStmt->gtOper == GT_STMT);
2530         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2531     }
2532     else
2533     {
2534         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2535         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2536         impCurStmtOffs    = offs | stkBit;
2537     }
2538 }
2539
2540 /*****************************************************************************
2541  * Returns current IL offset with stack-empty and call-instruction info incorporated
2542  */
2543 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2544 {
2545     if (compIsForInlining())
2546     {
2547         return BAD_IL_OFFSET;
2548     }
2549     else
2550     {
2551         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2552         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2553         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2554         return offs | stkBit | callInstructionBit;
2555     }
2556 }
2557
2558 /*****************************************************************************
2559  *
2560  *  Remember the instr offset for the statements
2561  *
2562  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2563  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2564  *  as some of the trees corresponding to code up to impCurOpcOffs might
2565  *  still be sitting on the stack.
2566  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2567  *  This should be called when an opcode finally/explicitly causes
2568  *  impAppendTree(tree) to be called (as opposed to being called because of
2569  *  a spill caused by the opcode)
2570  */
2571
2572 #ifdef DEBUG
2573
2574 void Compiler::impNoteLastILoffs()
2575 {
2576     if (impLastILoffsStmt == nullptr)
2577     {
2578         // We should have added a statement for the current basic block
2579         // Is this assert correct ?
2580
2581         assert(impTreeLast);
2582         assert(impTreeLast->gtOper == GT_STMT);
2583
2584         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2585     }
2586     else
2587     {
2588         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2589         impLastILoffsStmt                          = nullptr;
2590     }
2591 }
2592
2593 #endif // DEBUG
2594
2595 /*****************************************************************************
2596  * We don't create any GenTree (excluding spills) for a branch.
2597  * For debugging info, we need a placeholder so that we can note
2598  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2599  */
2600
2601 void Compiler::impNoteBranchOffs()
2602 {
2603     if (opts.compDbgCode)
2604     {
2605         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2606     }
2607 }
2608
2609 /*****************************************************************************
2610  * Locate the next stmt boundary for which we need to record info.
2611  * We will have to spill the stack at such boundaries if it is not
2612  * already empty.
2613  * Returns the next stmt boundary (after the start of the block)
2614  */
2615
2616 unsigned Compiler::impInitBlockLineInfo()
2617 {
2618     /* Assume the block does not correspond with any IL offset. This prevents
2619        us from reporting extra offsets. Extra mappings can cause confusing
2620        stepping, especially if the extra mapping is a jump-target, and the
2621        debugger does not ignore extra mappings, but instead rewinds to the
2622        nearest known offset */
2623
2624     impCurStmtOffsSet(BAD_IL_OFFSET);
2625
2626     if (compIsForInlining())
2627     {
2628         return ~0;
2629     }
2630
2631     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2632
2633     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2634     {
2635         impCurStmtOffsSet(blockOffs);
2636     }
2637
2638     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2639     {
2640         impCurStmtOffsSet(blockOffs);
2641     }
2642
2643     /* Always report IL offset 0 or some tests get confused.
2644        Probably a good idea anyways */
2645
2646     if (blockOffs == 0)
2647     {
2648         impCurStmtOffsSet(blockOffs);
2649     }
2650
2651     if (!info.compStmtOffsetsCount)
2652     {
2653         return ~0;
2654     }
2655
2656     /* Find the lowest explicit stmt boundary within the block */
2657
2658     /* Start looking at an entry that is based on our instr offset */
2659
2660     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2661
2662     if (index >= info.compStmtOffsetsCount)
2663     {
2664         index = info.compStmtOffsetsCount - 1;
2665     }
2666
2667     /* If we've guessed too far, back up */
2668
2669     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2670     {
2671         index--;
2672     }
2673
2674     /* If we guessed short, advance ahead */
2675
2676     while (info.compStmtOffsets[index] < blockOffs)
2677     {
2678         index++;
2679
2680         if (index == info.compStmtOffsetsCount)
2681         {
2682             return info.compStmtOffsetsCount;
2683         }
2684     }
2685
2686     assert(index < info.compStmtOffsetsCount);
2687
2688     if (info.compStmtOffsets[index] == blockOffs)
2689     {
2690         /* There is an explicit boundary for the start of this basic block.
2691            So we will start with bbCodeOffs. Else we will wait until we
2692            get to the next explicit boundary */
2693
2694         impCurStmtOffsSet(blockOffs);
2695
2696         index++;
2697     }
2698
2699     return index;
2700 }
2701
2702 /*****************************************************************************/
2703
2704 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2705 {
2706     switch (opcode)
2707     {
2708         case CEE_CALL:
2709         case CEE_CALLI:
2710         case CEE_CALLVIRT:
2711             return true;
2712
2713         default:
2714             return false;
2715     }
2716 }
2717
2718 /*****************************************************************************/
2719
2720 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2721 {
2722     switch (opcode)
2723     {
2724         case CEE_CALL:
2725         case CEE_CALLI:
2726         case CEE_CALLVIRT:
2727         case CEE_JMP:
2728         case CEE_NEWOBJ:
2729         case CEE_NEWARR:
2730             return true;
2731
2732         default:
2733             return false;
2734     }
2735 }
2736
2737 /*****************************************************************************/
2738
2739 // One might think it is worth caching these values, but results indicate
2740 // that it isn't.
2741 // In addition, caching them causes SuperPMI to be unable to completely
2742 // encapsulate an individual method context.
2743 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2744 {
2745     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2746     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2747     return refAnyClass;
2748 }
2749
2750 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2751 {
2752     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2753     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2754     return typeHandleClass;
2755 }
2756
2757 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2758 {
2759     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2760     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2761     return argIteratorClass;
2762 }
2763
2764 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2765 {
2766     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2767     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2768     return stringClass;
2769 }
2770
2771 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2772 {
2773     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2774     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2775     return objectClass;
2776 }
2777
2778 /*****************************************************************************
2779  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2780  *  set its type to TYP_BYREF when we create it. We know if it can be
2781  *  changed to TYP_I_IMPL only at the point where we use it
2782  */
2783
2784 /* static */
2785 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2786 {
2787     if (tree1->IsVarAddr())
2788     {
2789         tree1->gtType = TYP_I_IMPL;
2790     }
2791
2792     if (tree2 && tree2->IsVarAddr())
2793     {
2794         tree2->gtType = TYP_I_IMPL;
2795     }
2796 }
2797
2798 /*****************************************************************************
2799  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2800  *  to make that an explicit cast in our trees, so any implicit casts that
2801  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2802  *  turned into explicit casts here.
2803  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2804  */
2805
2806 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2807 {
2808     var_types currType   = genActualType(tree->gtType);
2809     var_types wantedType = genActualType(dstTyp);
2810
2811     if (wantedType != currType)
2812     {
2813         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2814         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2815         {
2816             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2817             {
2818                 tree->gtType = TYP_I_IMPL;
2819             }
2820         }
2821 #ifdef _TARGET_64BIT_
2822         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2823         {
2824             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2825             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2826         }
2827         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2828         {
2829             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2830             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2831         }
2832 #endif // _TARGET_64BIT_
2833     }
2834
2835     return tree;
2836 }
2837
2838 /*****************************************************************************
2839  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2840  *  but we want to make that an explicit cast in our trees, so any implicit casts
2841  *  that exist in the IL are turned into explicit casts here.
2842  */
2843
2844 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2845 {
2846 #ifndef LEGACY_BACKEND
2847     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2848     {
2849         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2850     }
2851 #endif // !LEGACY_BACKEND
2852
2853     return tree;
2854 }
2855
2856 //------------------------------------------------------------------------
2857 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2858 //    with a GT_COPYBLK node.
2859 //
2860 // Arguments:
2861 //    sig - The InitializeArray signature.
2862 //
2863 // Return Value:
2864 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2865 //    nullptr otherwise.
2866 //
2867 // Notes:
2868 //    The function recognizes the following IL pattern:
2869 //      ldc <length> or a list of ldc <lower bound>/<length>
2870 //      newarr or newobj
2871 //      dup
2872 //      ldtoken <field handle>
2873 //      call InitializeArray
2874 //    The lower bounds need not be constant except when the array rank is 1.
2875 //    The function recognizes all kinds of arrays thus enabling a small runtime
2876 //    such as CoreRT to skip providing an implementation for InitializeArray.
2877
2878 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2879 {
2880     assert(sig->numArgs == 2);
2881
2882     GenTreePtr fieldTokenNode = impStackTop(0).val;
2883     GenTreePtr arrayLocalNode = impStackTop(1).val;
2884
2885     //
2886     // Verify that the field token is known and valid.  Note that It's also
2887     // possible for the token to come from reflection, in which case we cannot do
2888     // the optimization and must therefore revert to calling the helper.  You can
2889     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2890     //
2891
2892     // Check to see if the ldtoken helper call is what we see here.
2893     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2894         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2895     {
2896         return nullptr;
2897     }
2898
2899     // Strip helper call away
2900     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2901
2902     if (fieldTokenNode->gtOper == GT_IND)
2903     {
2904         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2905     }
2906
2907     // Check for constant
2908     if (fieldTokenNode->gtOper != GT_CNS_INT)
2909     {
2910         return nullptr;
2911     }
2912
2913     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2914     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2915     {
2916         return nullptr;
2917     }
2918
2919     //
2920     // We need to get the number of elements in the array and the size of each element.
2921     // We verify that the newarr statement is exactly what we expect it to be.
2922     // If it's not then we just return NULL and we don't optimize this call
2923     //
2924
2925     //
2926     // It is possible the we don't have any statements in the block yet
2927     //
2928     if (impTreeLast->gtOper != GT_STMT)
2929     {
2930         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2931         return nullptr;
2932     }
2933
2934     //
2935     // We start by looking at the last statement, making sure it's an assignment, and
2936     // that the target of the assignment is the array passed to InitializeArray.
2937     //
2938     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2939     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2940         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2941         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2942     {
2943         return nullptr;
2944     }
2945
2946     //
2947     // Make sure that the object being assigned is a helper call.
2948     //
2949
2950     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2951     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2952     {
2953         return nullptr;
2954     }
2955
2956     //
2957     // Verify that it is one of the new array helpers.
2958     //
2959
2960     bool isMDArray = false;
2961
2962     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2963         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2964         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2965         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2966 #ifdef FEATURE_READYTORUN_COMPILER
2967         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2968 #endif
2969             )
2970     {
2971 #if COR_JIT_EE_VERSION > 460
2972         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
2973         {
2974             return nullptr;
2975         }
2976
2977         isMDArray = true;
2978 #endif
2979     }
2980
2981     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
2982
2983     //
2984     // Make sure we found a compile time handle to the array
2985     //
2986
2987     if (!arrayClsHnd)
2988     {
2989         return nullptr;
2990     }
2991
2992     unsigned rank = 0;
2993     S_UINT32 numElements;
2994
2995     if (isMDArray)
2996     {
2997         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
2998
2999         if (rank == 0)
3000         {
3001             return nullptr;
3002         }
3003
3004         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3005         assert(tokenArg != nullptr);
3006         GenTreeArgList* numArgsArg = tokenArg->Rest();
3007         assert(numArgsArg != nullptr);
3008         GenTreeArgList* argsArg = numArgsArg->Rest();
3009         assert(argsArg != nullptr);
3010
3011         //
3012         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3013         // so at least one length must be present and the rank can't exceed 32 so there can
3014         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3015         //
3016
3017         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3018             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3019         {
3020             return nullptr;
3021         }
3022
3023         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3024         bool     lowerBoundsSpecified;
3025
3026         if (numArgs == rank * 2)
3027         {
3028             lowerBoundsSpecified = true;
3029         }
3030         else if (numArgs == rank)
3031         {
3032             lowerBoundsSpecified = false;
3033
3034             //
3035             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3036             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3037             // we get a SDArray as well, see the for loop below.
3038             //
3039
3040             if (rank == 1)
3041             {
3042                 isMDArray = false;
3043             }
3044         }
3045         else
3046         {
3047             return nullptr;
3048         }
3049
3050         //
3051         // The rank is known to be at least 1 so we can start with numElements being 1
3052         // to avoid the need to special case the first dimension.
3053         //
3054
3055         numElements = S_UINT32(1);
3056
3057         struct Match
3058         {
3059             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3060             {
3061                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3062                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3063             }
3064
3065             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3066             {
3067                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3068                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3069                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3070             }
3071
3072             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3073             {
3074                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3075                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3076             }
3077
3078             static bool IsComma(GenTree* tree)
3079             {
3080                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3081             }
3082         };
3083
3084         unsigned argIndex = 0;
3085         GenTree* comma;
3086
3087         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3088         {
3089             if (lowerBoundsSpecified)
3090             {
3091                 //
3092                 // In general lower bounds can be ignored because they're not needed to
3093                 // calculate the total number of elements. But for single dimensional arrays
3094                 // we need to know if the lower bound is 0 because in this case the runtime
3095                 // creates a SDArray and this affects the way the array data offset is calculated.
3096                 //
3097
3098                 if (rank == 1)
3099                 {
3100                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3101                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3102                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3103
3104                     if (lowerBoundNode->IsIntegralConst(0))
3105                     {
3106                         isMDArray = false;
3107                     }
3108                 }
3109
3110                 comma = comma->gtGetOp2();
3111                 argIndex++;
3112             }
3113
3114             GenTree* lengthNodeAssign = comma->gtGetOp1();
3115             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3116             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3117
3118             if (!lengthNode->IsCnsIntOrI())
3119             {
3120                 return nullptr;
3121             }
3122
3123             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3124             argIndex++;
3125         }
3126
3127         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3128
3129         if (argIndex != numArgs)
3130         {
3131             return nullptr;
3132         }
3133     }
3134     else
3135     {
3136         //
3137         // Make sure there are exactly two arguments:  the array class and
3138         // the number of elements.
3139         //
3140
3141         GenTreePtr arrayLengthNode;
3142
3143         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3144 #ifdef FEATURE_READYTORUN_COMPILER
3145         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3146         {
3147             // Array length is 1st argument for readytorun helper
3148             arrayLengthNode = args->Current();
3149         }
3150         else
3151 #endif
3152         {
3153             // Array length is 2nd argument for regular helper
3154             arrayLengthNode = args->Rest()->Current();
3155         }
3156
3157         //
3158         // Make sure that the number of elements look valid.
3159         //
3160         if (arrayLengthNode->gtOper != GT_CNS_INT)
3161         {
3162             return nullptr;
3163         }
3164
3165         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3166
3167         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3168         {
3169             return nullptr;
3170         }
3171     }
3172
3173     CORINFO_CLASS_HANDLE elemClsHnd;
3174     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3175
3176     //
3177     // Note that genTypeSize will return zero for non primitive types, which is exactly
3178     // what we want (size will then be 0, and we will catch this in the conditional below).
3179     // Note that we don't expect this to fail for valid binaries, so we assert in the
3180     // non-verification case (the verification case should not assert but rather correctly
3181     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3182     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3183     // why.
3184     //
3185
3186     S_UINT32 elemSize(genTypeSize(elementType));
3187     S_UINT32 size = elemSize * S_UINT32(numElements);
3188
3189     if (size.IsOverflow())
3190     {
3191         return nullptr;
3192     }
3193
3194     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3195     {
3196         assert(verNeedsVerification());
3197         return nullptr;
3198     }
3199
3200     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3201     if (!initData)
3202     {
3203         return nullptr;
3204     }
3205
3206     //
3207     // At this point we are ready to commit to implementing the InitializeArray
3208     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3209     // return the struct assignment node.
3210     //
3211
3212     impPopStack();
3213     impPopStack();
3214
3215     const unsigned blkSize = size.Value();
3216     GenTreePtr     dst;
3217
3218     if (isMDArray)
3219     {
3220         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3221
3222         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3223     }
3224     else
3225     {
3226         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3227     }
3228     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3229     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3230     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3231
3232     return gtNewBlkOpNode(blk,     // dst
3233                           src,     // src
3234                           blkSize, // size
3235                           false,   // volatil
3236                           true);   // copyBlock
3237 }
3238
3239 /*****************************************************************************/
3240 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3241 // Returns NULL if an intrinsic cannot be used
3242
3243 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE  clsHnd,
3244                                   CORINFO_METHOD_HANDLE method,
3245                                   CORINFO_SIG_INFO*     sig,
3246                                   int                   memberRef,
3247                                   bool                  readonlyCall,
3248                                   bool                  tailCall,
3249                                   CorInfoIntrinsics*    pIntrinsicID)
3250 {
3251     bool mustExpand = false;
3252 #if COR_JIT_EE_VERSION > 460
3253     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3254 #else
3255     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3256 #endif
3257     *pIntrinsicID = intrinsicID;
3258
3259 #ifndef _TARGET_ARM_
3260     genTreeOps interlockedOperator;
3261 #endif
3262
3263     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3264     {
3265         // must be done regardless of DbgCode and MinOpts
3266         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3267     }
3268 #ifdef _TARGET_64BIT_
3269     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3270     {
3271         // must be done regardless of DbgCode and MinOpts
3272         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3273     }
3274 #else
3275     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3276 #endif
3277
3278     GenTreePtr retNode = nullptr;
3279
3280     //
3281     // We disable the inlining of instrinsics for MinOpts.
3282     //
3283     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3284     {
3285         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3286         return retNode;
3287     }
3288
3289     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3290     // seem to work properly for Infinity values, we don't do
3291     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3292
3293     var_types callType = JITtype2varType(sig->retType);
3294
3295     /* First do the intrinsics which are always smaller than a call */
3296
3297     switch (intrinsicID)
3298     {
3299         GenTreePtr op1, op2;
3300
3301         case CORINFO_INTRINSIC_Sin:
3302         case CORINFO_INTRINSIC_Sqrt:
3303         case CORINFO_INTRINSIC_Abs:
3304         case CORINFO_INTRINSIC_Cos:
3305         case CORINFO_INTRINSIC_Round:
3306         case CORINFO_INTRINSIC_Cosh:
3307         case CORINFO_INTRINSIC_Sinh:
3308         case CORINFO_INTRINSIC_Tan:
3309         case CORINFO_INTRINSIC_Tanh:
3310         case CORINFO_INTRINSIC_Asin:
3311         case CORINFO_INTRINSIC_Acos:
3312         case CORINFO_INTRINSIC_Atan:
3313         case CORINFO_INTRINSIC_Atan2:
3314         case CORINFO_INTRINSIC_Log10:
3315         case CORINFO_INTRINSIC_Pow:
3316         case CORINFO_INTRINSIC_Exp:
3317         case CORINFO_INTRINSIC_Ceiling:
3318         case CORINFO_INTRINSIC_Floor:
3319
3320             // These are math intrinsics
3321
3322             assert(callType != TYP_STRUCT);
3323
3324             op1 = nullptr;
3325
3326 #if defined(LEGACY_BACKEND)
3327             if (IsTargetIntrinsic(intrinsicID))
3328 #elif !defined(_TARGET_X86_)
3329             // Intrinsics that are not implemented directly by target instructions will
3330             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3331             // don't do this optimization, because
3332             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3333             //  b) It will be non-trivial task or too late to re-materialize a surviving
3334             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3335             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3336 #else
3337             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3338             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3339             // code generation for certain EH constructs.
3340             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3341 #endif
3342             {
3343                 switch (sig->numArgs)
3344                 {
3345                     case 1:
3346                         op1 = impPopStack().val;
3347
3348 #if FEATURE_X87_DOUBLES
3349
3350                         // X87 stack doesn't differentiate between float/double
3351                         // so it doesn't need a cast, but everybody else does
3352                         // Just double check it is at least a FP type
3353                         noway_assert(varTypeIsFloating(op1));
3354
3355 #else // FEATURE_X87_DOUBLES
3356
3357                         if (op1->TypeGet() != callType)
3358                         {
3359                             op1 = gtNewCastNode(callType, op1, callType);
3360                         }
3361
3362 #endif // FEATURE_X87_DOUBLES
3363
3364                         op1 = new (this, GT_INTRINSIC)
3365                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3366                         break;
3367
3368                     case 2:
3369                         op2 = impPopStack().val;
3370                         op1 = impPopStack().val;
3371
3372 #if FEATURE_X87_DOUBLES
3373
3374                         // X87 stack doesn't differentiate between float/double
3375                         // so it doesn't need a cast, but everybody else does
3376                         // Just double check it is at least a FP type
3377                         noway_assert(varTypeIsFloating(op2));
3378                         noway_assert(varTypeIsFloating(op1));
3379
3380 #else // FEATURE_X87_DOUBLES
3381
3382                         if (op2->TypeGet() != callType)
3383                         {
3384                             op2 = gtNewCastNode(callType, op2, callType);
3385                         }
3386                         if (op1->TypeGet() != callType)
3387                         {
3388                             op1 = gtNewCastNode(callType, op1, callType);
3389                         }
3390
3391 #endif // FEATURE_X87_DOUBLES
3392
3393                         op1 = new (this, GT_INTRINSIC)
3394                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3395                         break;
3396
3397                     default:
3398                         NO_WAY("Unsupported number of args for Math Instrinsic");
3399                 }
3400
3401 #ifndef LEGACY_BACKEND
3402                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3403                 {
3404                     op1->gtFlags |= GTF_CALL;
3405                 }
3406 #endif
3407             }
3408
3409             retNode = op1;
3410             break;
3411
3412 #ifdef _TARGET_XARCH_
3413         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3414         case CORINFO_INTRINSIC_InterlockedAdd32:
3415             interlockedOperator = GT_LOCKADD;
3416             goto InterlockedBinOpCommon;
3417         case CORINFO_INTRINSIC_InterlockedXAdd32:
3418             interlockedOperator = GT_XADD;
3419             goto InterlockedBinOpCommon;
3420         case CORINFO_INTRINSIC_InterlockedXchg32:
3421             interlockedOperator = GT_XCHG;
3422             goto InterlockedBinOpCommon;
3423
3424 #ifdef _TARGET_AMD64_
3425         case CORINFO_INTRINSIC_InterlockedAdd64:
3426             interlockedOperator = GT_LOCKADD;
3427             goto InterlockedBinOpCommon;
3428         case CORINFO_INTRINSIC_InterlockedXAdd64:
3429             interlockedOperator = GT_XADD;
3430             goto InterlockedBinOpCommon;
3431         case CORINFO_INTRINSIC_InterlockedXchg64:
3432             interlockedOperator = GT_XCHG;
3433             goto InterlockedBinOpCommon;
3434 #endif // _TARGET_AMD64_
3435
3436         InterlockedBinOpCommon:
3437             assert(callType != TYP_STRUCT);
3438             assert(sig->numArgs == 2);
3439
3440             op2 = impPopStack().val;
3441             op1 = impPopStack().val;
3442
3443             // This creates:
3444             //   val
3445             // XAdd
3446             //   addr
3447             //     field (for example)
3448             //
3449             // In the case where the first argument is the address of a local, we might
3450             // want to make this *not* make the var address-taken -- but atomic instructions
3451             // on a local are probably pretty useless anyway, so we probably don't care.
3452
3453             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3454             op1->gtFlags |= GTF_GLOB_EFFECT;
3455             retNode = op1;
3456             break;
3457 #endif // _TARGET_XARCH_
3458
3459         case CORINFO_INTRINSIC_MemoryBarrier:
3460
3461             assert(sig->numArgs == 0);
3462
3463             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3464             op1->gtFlags |= GTF_GLOB_EFFECT;
3465             retNode = op1;
3466             break;
3467
3468 #ifdef _TARGET_XARCH_
3469         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3470         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3471 #ifdef _TARGET_AMD64_
3472         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3473 #endif
3474         {
3475             assert(callType != TYP_STRUCT);
3476             assert(sig->numArgs == 3);
3477             GenTreePtr op3;
3478
3479             op3 = impPopStack().val; // comparand
3480             op2 = impPopStack().val; // value
3481             op1 = impPopStack().val; // location
3482
3483             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3484
3485             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3486             retNode = node;
3487             break;
3488         }
3489 #endif
3490
3491         case CORINFO_INTRINSIC_StringLength:
3492             op1 = impPopStack().val;
3493             if (!opts.MinOpts() && !opts.compDbgCode)
3494             {
3495                 GenTreeArrLen* arrLen =
3496                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3497                 op1 = arrLen;
3498             }
3499             else
3500             {
3501                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3502                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3503                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3504                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3505             }
3506             retNode = op1;
3507             break;
3508
3509         case CORINFO_INTRINSIC_StringGetChar:
3510             op2 = impPopStack().val;
3511             op1 = impPopStack().val;
3512             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3513             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3514             retNode = op1;
3515             break;
3516
3517         case CORINFO_INTRINSIC_InitializeArray:
3518             retNode = impInitializeArrayIntrinsic(sig);
3519             break;
3520
3521         case CORINFO_INTRINSIC_Array_Address:
3522         case CORINFO_INTRINSIC_Array_Get:
3523         case CORINFO_INTRINSIC_Array_Set:
3524             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3525             break;
3526
3527         case CORINFO_INTRINSIC_GetTypeFromHandle:
3528             op1 = impStackTop(0).val;
3529             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3530                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3531             {
3532                 op1 = impPopStack().val;
3533                 // Change call to return RuntimeType directly.
3534                 op1->gtType = TYP_REF;
3535                 retNode     = op1;
3536             }
3537             // Call the regular function.
3538             break;
3539
3540         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3541             op1 = impStackTop(0).val;
3542             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3543                 gtIsTypeHandleToRuntimeTypeHelper(op1))
3544             {
3545                 // Old tree
3546                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3547                 //
3548                 // New tree
3549                 // TreeToGetNativeTypeHandle
3550
3551                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3552                 // to that helper.
3553
3554                 op1 = impPopStack().val;
3555
3556                 // Get native TypeHandle argument to old helper
3557                 op1 = op1->gtCall.gtCallArgs;
3558                 assert(op1->OperIsList());
3559                 assert(op1->gtOp.gtOp2 == nullptr);
3560                 op1     = op1->gtOp.gtOp1;
3561                 retNode = op1;
3562             }
3563             // Call the regular function.
3564             break;
3565
3566 #ifndef LEGACY_BACKEND
3567         case CORINFO_INTRINSIC_Object_GetType:
3568
3569             op1 = impPopStack().val;
3570             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3571
3572             // Set the CALL flag to indicate that the operator is implemented by a call.
3573             // Set also the EXCEPTION flag because the native implementation of
3574             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3575             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3576             retNode = op1;
3577             break;
3578 #endif
3579
3580         default:
3581             /* Unknown intrinsic */
3582             break;
3583     }
3584
3585     if (mustExpand)
3586     {
3587         if (retNode == nullptr)
3588         {
3589             NO_WAY("JIT must expand the intrinsic!");
3590         }
3591     }
3592
3593     return retNode;
3594 }
3595
3596 /*****************************************************************************/
3597
3598 GenTreePtr Compiler::impArrayAccessIntrinsic(
3599     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3600 {
3601     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3602        the following, as it generates fatter code.
3603     */
3604
3605     if (compCodeOpt() == SMALL_CODE)
3606     {
3607         return nullptr;
3608     }
3609
3610     /* These intrinsics generate fatter (but faster) code and are only
3611        done if we don't need SMALL_CODE */
3612
3613     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3614
3615     // The rank 1 case is special because it has to handle two array formats
3616     // we will simply not do that case
3617     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3618     {
3619         return nullptr;
3620     }
3621
3622     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3623     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3624
3625     // For the ref case, we will only be able to inline if the types match
3626     // (verifier checks for this, we don't care for the nonverified case and the
3627     // type is final (so we don't need to do the cast)
3628     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3629     {
3630         // Get the call site signature
3631         CORINFO_SIG_INFO LocalSig;
3632         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3633         assert(LocalSig.hasThis());
3634
3635         CORINFO_CLASS_HANDLE actualElemClsHnd;
3636
3637         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3638         {
3639             // Fetch the last argument, the one that indicates the type we are setting.
3640             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3641             for (unsigned r = 0; r < rank; r++)
3642             {
3643                 argType = info.compCompHnd->getArgNext(argType);
3644             }
3645
3646             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3647             actualElemClsHnd = argInfo.GetClassHandle();
3648         }
3649         else
3650         {
3651             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3652
3653             // Fetch the return type
3654             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3655             assert(retInfo.IsByRef());
3656             actualElemClsHnd = retInfo.GetClassHandle();
3657         }
3658
3659         // if it's not final, we can't do the optimization
3660         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3661         {
3662             return nullptr;
3663         }
3664     }
3665
3666     unsigned arrayElemSize;
3667     if (elemType == TYP_STRUCT)
3668     {
3669         assert(arrElemClsHnd);
3670
3671         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3672     }
3673     else
3674     {
3675         arrayElemSize = genTypeSize(elemType);
3676     }
3677
3678     if ((unsigned char)arrayElemSize != arrayElemSize)
3679     {
3680         // arrayElemSize would be truncated as an unsigned char.
3681         // This means the array element is too large. Don't do the optimization.
3682         return nullptr;
3683     }
3684
3685     GenTreePtr val = nullptr;
3686
3687     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3688     {
3689         // Assignment of a struct is more work, and there are more gets than sets.
3690         if (elemType == TYP_STRUCT)
3691         {
3692             return nullptr;
3693         }
3694
3695         val = impPopStack().val;
3696         assert(genActualType(elemType) == genActualType(val->gtType) ||
3697                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3698                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3699                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3700     }
3701
3702     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3703
3704     GenTreePtr inds[GT_ARR_MAX_RANK];
3705     for (unsigned k = rank; k > 0; k--)
3706     {
3707         inds[k - 1] = impPopStack().val;
3708     }
3709
3710     GenTreePtr arr = impPopStack().val;
3711     assert(arr->gtType == TYP_REF);
3712
3713     GenTreePtr arrElem =
3714         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3715                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3716
3717     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3718     {
3719         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3720     }
3721
3722     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3723     {
3724         assert(val != nullptr);
3725         return gtNewAssignNode(arrElem, val);
3726     }
3727     else
3728     {
3729         return arrElem;
3730     }
3731 }
3732
3733 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3734 {
3735     unsigned i;
3736
3737     // do some basic checks first
3738     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3739     {
3740         return FALSE;
3741     }
3742
3743     if (verCurrentState.esStackDepth > 0)
3744     {
3745         // merge stack types
3746         StackEntry* parentStack = block->bbStackOnEntry();
3747         StackEntry* childStack  = verCurrentState.esStack;
3748
3749         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3750         {
3751             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3752             {
3753                 return FALSE;
3754             }
3755         }
3756     }
3757
3758     // merge initialization status of this ptr
3759
3760     if (verTrackObjCtorInitState)
3761     {
3762         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3763         assert(verCurrentState.thisInitialized != TIS_Bottom);
3764
3765         // If the successor block's thisInit state is unknown, copy it from the current state.
3766         if (block->bbThisOnEntry() == TIS_Bottom)
3767         {
3768             *changed = true;
3769             verSetThisInit(block, verCurrentState.thisInitialized);
3770         }
3771         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3772         {
3773             if (block->bbThisOnEntry() != TIS_Top)
3774             {
3775                 *changed = true;
3776                 verSetThisInit(block, TIS_Top);
3777
3778                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3779                 {
3780                     // The block is bad. Control can flow through the block to any handler that catches the
3781                     // verification exception, but the importer ignores bad blocks and therefore won't model
3782                     // this flow in the normal way. To complete the merge into the bad block, the new state
3783                     // needs to be manually pushed to the handlers that may be reached after the verification
3784                     // exception occurs.
3785                     //
3786                     // Usually, the new state was already propagated to the relevant handlers while processing
3787                     // the predecessors of the bad block. The exception is when the bad block is at the start
3788                     // of a try region, meaning it is protected by additional handlers that do not protect its
3789                     // predecessors.
3790                     //
3791                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3792                     {
3793                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3794                         // recursive calls back into this code path (if successors of the current bad block are
3795                         // also bad blocks).
3796                         //
3797                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3798                         verCurrentState.thisInitialized = TIS_Top;
3799                         impVerifyEHBlock(block, true);
3800                         verCurrentState.thisInitialized = origTIS;
3801                     }
3802                 }
3803             }
3804         }
3805     }
3806     else
3807     {
3808         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3809     }
3810
3811     return TRUE;
3812 }
3813
3814 /*****************************************************************************
3815  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3816  *   already logged it (presumably in a more detailed fashion than done here)
3817  * 'bVerificationException' is true for a verification exception, false for a
3818  *   "call unauthorized by host" exception.
3819  */
3820
3821 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3822 {
3823     block->bbJumpKind = BBJ_THROW;
3824     block->bbFlags |= BBF_FAILED_VERIFICATION;
3825
3826     impCurStmtOffsSet(block->bbCodeOffs);
3827
3828 #ifdef DEBUG
3829     // we need this since BeginTreeList asserts otherwise
3830     impTreeList = impTreeLast = nullptr;
3831     block->bbFlags &= ~BBF_IMPORTED;
3832
3833     if (logMsg)
3834     {
3835         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3836                 block->bbCodeOffs, block->bbCodeOffsEnd));
3837         if (verbose)
3838         {
3839             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3840         }
3841     }
3842
3843     if (JitConfig.DebugBreakOnVerificationFailure())
3844     {
3845         DebugBreak();
3846     }
3847 #endif
3848
3849     impBeginTreeList();
3850
3851     // if the stack is non-empty evaluate all the side-effects
3852     if (verCurrentState.esStackDepth > 0)
3853     {
3854         impEvalSideEffects();
3855     }
3856     assert(verCurrentState.esStackDepth == 0);
3857
3858     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3859                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3860     // verCurrentState.esStackDepth = 0;
3861     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3862
3863     // The inliner is not able to handle methods that require throw block, so
3864     // make sure this methods never gets inlined.
3865     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3866 }
3867
3868 /*****************************************************************************
3869  *
3870  */
3871 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3872
3873 {
3874     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3875     // slightly different mechanism in which it calls the JIT to perform IL verification:
3876     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3877     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3878     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3879     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
3880     // up the exception, instead it embeds a throw inside the offending basic block and lets this
3881     // to fail upon runtime of the jitted method.
3882     //
3883     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3884     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3885     // just try to find out whether to fail this method before even actually jitting it.  So, in case
3886     // we detect these two conditions, instead of generating a throw statement inside the offending
3887     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3888     // to return false and make RyuJIT behave the same way JIT64 does.
3889     //
3890     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3891     // RyuJIT for the time being until we completely replace JIT64.
3892     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3893
3894     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3895     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
3896     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3897     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3898     // be turned off during importation).
3899     CLANG_FORMAT_COMMENT_ANCHOR;
3900
3901 #ifdef _TARGET_64BIT_
3902
3903 #ifdef DEBUG
3904     bool canSkipVerificationResult =
3905         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3906     assert(tiVerificationNeeded || canSkipVerificationResult);
3907 #endif // DEBUG
3908
3909     // Add the non verifiable flag to the compiler
3910     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3911     {
3912         tiIsVerifiableCode = FALSE;
3913     }
3914 #endif //_TARGET_64BIT_
3915     verResetCurrentState(block, &verCurrentState);
3916     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3917
3918 #ifdef DEBUG
3919     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3920 #endif                   // DEBUG
3921 }
3922
3923 /******************************************************************************/
3924 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3925 {
3926     assert(ciType < CORINFO_TYPE_COUNT);
3927
3928     typeInfo tiResult;
3929     switch (ciType)
3930     {
3931         case CORINFO_TYPE_STRING:
3932         case CORINFO_TYPE_CLASS:
3933             tiResult = verMakeTypeInfo(clsHnd);
3934             if (!tiResult.IsType(TI_REF))
3935             { // type must be consistent with element type
3936                 return typeInfo();
3937             }
3938             break;
3939
3940 #ifdef _TARGET_64BIT_
3941         case CORINFO_TYPE_NATIVEINT:
3942         case CORINFO_TYPE_NATIVEUINT:
3943             if (clsHnd)
3944             {
3945                 // If we have more precise information, use it
3946                 return verMakeTypeInfo(clsHnd);
3947             }
3948             else
3949             {
3950                 return typeInfo::nativeInt();
3951             }
3952             break;
3953 #endif // _TARGET_64BIT_
3954
3955         case CORINFO_TYPE_VALUECLASS:
3956         case CORINFO_TYPE_REFANY:
3957             tiResult = verMakeTypeInfo(clsHnd);
3958             // type must be constant with element type;
3959             if (!tiResult.IsValueClass())
3960             {
3961                 return typeInfo();
3962             }
3963             break;
3964         case CORINFO_TYPE_VAR:
3965             return verMakeTypeInfo(clsHnd);
3966
3967         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3968         case CORINFO_TYPE_VOID:
3969             return typeInfo();
3970             break;
3971
3972         case CORINFO_TYPE_BYREF:
3973         {
3974             CORINFO_CLASS_HANDLE childClassHandle;
3975             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
3976             return ByRef(verMakeTypeInfo(childType, childClassHandle));
3977         }
3978         break;
3979
3980         default:
3981             if (clsHnd)
3982             { // If we have more precise information, use it
3983                 return typeInfo(TI_STRUCT, clsHnd);
3984             }
3985             else
3986             {
3987                 return typeInfo(JITtype2tiType(ciType));
3988             }
3989     }
3990     return tiResult;
3991 }
3992
3993 /******************************************************************************/
3994
3995 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
3996 {
3997     if (clsHnd == nullptr)
3998     {
3999         return typeInfo();
4000     }
4001
4002     // Byrefs should only occur in method and local signatures, which are accessed
4003     // using ICorClassInfo and ICorClassInfo.getChildType.
4004     // So findClass() and getClassAttribs() should not be called for byrefs
4005
4006     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4007     {
4008         assert(!"Did findClass() return a Byref?");
4009         return typeInfo();
4010     }
4011
4012     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4013
4014     if (attribs & CORINFO_FLG_VALUECLASS)
4015     {
4016         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4017
4018         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4019         // not occur here, so we may want to change this to an assert instead.
4020         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4021         {
4022             return typeInfo();
4023         }
4024
4025 #ifdef _TARGET_64BIT_
4026         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4027         {
4028             return typeInfo::nativeInt();
4029         }
4030 #endif // _TARGET_64BIT_
4031
4032         if (t != CORINFO_TYPE_UNDEF)
4033         {
4034             return (typeInfo(JITtype2tiType(t)));
4035         }
4036         else if (bashStructToRef)
4037         {
4038             return (typeInfo(TI_REF, clsHnd));
4039         }
4040         else
4041         {
4042             return (typeInfo(TI_STRUCT, clsHnd));
4043         }
4044     }
4045     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4046     {
4047         // See comment in _typeInfo.h for why we do it this way.
4048         return (typeInfo(TI_REF, clsHnd, true));
4049     }
4050     else
4051     {
4052         return (typeInfo(TI_REF, clsHnd));
4053     }
4054 }
4055
4056 /******************************************************************************/
4057 BOOL Compiler::verIsSDArray(typeInfo ti)
4058 {
4059     if (ti.IsNullObjRef())
4060     { // nulls are SD arrays
4061         return TRUE;
4062     }
4063
4064     if (!ti.IsType(TI_REF))
4065     {
4066         return FALSE;
4067     }
4068
4069     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4070     {
4071         return FALSE;
4072     }
4073     return TRUE;
4074 }
4075
4076 /******************************************************************************/
4077 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4078 /* Returns an error type if anything goes wrong */
4079
4080 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4081 {
4082     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4083
4084     if (!verIsSDArray(arrayObjectType))
4085     {
4086         return typeInfo();
4087     }
4088
4089     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4090     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4091
4092     return verMakeTypeInfo(ciType, childClassHandle);
4093 }
4094
4095 /*****************************************************************************
4096  */
4097 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4098 {
4099     CORINFO_CLASS_HANDLE classHandle;
4100     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4101
4102     var_types type = JITtype2varType(ciType);
4103     if (varTypeIsGC(type))
4104     {
4105         // For efficiency, getArgType only returns something in classHandle for
4106         // value types.  For other types that have addition type info, you
4107         // have to call back explicitly
4108         classHandle = info.compCompHnd->getArgClass(sig, args);
4109         if (!classHandle)
4110         {
4111             NO_WAY("Could not figure out Class specified in argument or local signature");
4112         }
4113     }
4114
4115     return verMakeTypeInfo(ciType, classHandle);
4116 }
4117
4118 /*****************************************************************************/
4119
4120 // This does the expensive check to figure out whether the method
4121 // needs to be verified. It is called only when we fail verification,
4122 // just before throwing the verification exception.
4123
4124 BOOL Compiler::verNeedsVerification()
4125 {
4126     // If we have previously determined that verification is NOT needed
4127     // (for example in Compiler::compCompile), that means verification is really not needed.
4128     // Return the same decision we made before.
4129     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4130
4131     if (!tiVerificationNeeded)
4132     {
4133         return tiVerificationNeeded;
4134     }
4135
4136     assert(tiVerificationNeeded);
4137
4138     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4139     // obtain the answer.
4140     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4141         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4142
4143     // canSkipVerification will return one of the following three values:
4144     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4145     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4146     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4147     //     but need to insert a callout to the VM to ask during runtime
4148     //     whether to skip verification or not.
4149
4150     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4151     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4152     {
4153         tiRuntimeCalloutNeeded = true;
4154     }
4155
4156     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4157     {
4158         // Dev10 706080 - Testers don't like the assert, so just silence it
4159         // by not using the macros that invoke debugAssert.
4160         badCode();
4161     }
4162
4163     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4164     // The following line means we will NOT do jit time verification if canSkipVerification
4165     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4166     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4167     return tiVerificationNeeded;
4168 }
4169
4170 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4171 {
4172     if (ti.IsByRef())
4173     {
4174         return TRUE;
4175     }
4176     if (!ti.IsType(TI_STRUCT))
4177     {
4178         return FALSE;
4179     }
4180     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4181 }
4182
4183 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4184 {
4185     if (ti.IsPermanentHomeByRef())
4186     {
4187         return TRUE;
4188     }
4189     else
4190     {
4191         return FALSE;
4192     }
4193 }
4194
4195 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4196 {
4197     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4198             || ti.IsUnboxedGenericTypeVar() ||
4199             (ti.IsType(TI_STRUCT) &&
4200              // exclude byreflike structs
4201              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4202 }
4203
4204 // Is it a boxed value type?
4205 bool Compiler::verIsBoxedValueType(typeInfo ti)
4206 {
4207     if (ti.GetType() == TI_REF)
4208     {
4209         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4210         return !!eeIsValueClass(clsHnd);
4211     }
4212     else
4213     {
4214         return false;
4215     }
4216 }
4217
4218 /*****************************************************************************
4219  *
4220  *  Check if a TailCall is legal.
4221  */
4222
4223 bool Compiler::verCheckTailCallConstraint(
4224     OPCODE                  opcode,
4225     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4226     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4227     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4228                                                        // return false to the caller.
4229                                                        // If false, it will throw.
4230     )
4231 {
4232     DWORD            mflags;
4233     CORINFO_SIG_INFO sig;
4234     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4235                                    // this counter is used to keep track of how many items have been
4236                                    // virtually popped
4237
4238     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4239     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4240     unsigned              methodClassFlgs = 0;
4241
4242     assert(impOpcodeIsCallOpcode(opcode));
4243
4244     if (compIsForInlining())
4245     {
4246         return false;
4247     }
4248
4249     // for calli, VerifyOrReturn that this is not a virtual method
4250     if (opcode == CEE_CALLI)
4251     {
4252         /* Get the call sig */
4253         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4254
4255         // We don't know the target method, so we have to infer the flags, or
4256         // assume the worst-case.
4257         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4258     }
4259     else
4260     {
4261         methodHnd = pResolvedToken->hMethod;
4262
4263         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4264
4265         // When verifying generic code we pair the method handle with its
4266         // owning class to get the exact method signature.
4267         methodClassHnd = pResolvedToken->hClass;
4268         assert(methodClassHnd);
4269
4270         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4271
4272         // opcode specific check
4273         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4274     }
4275
4276     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4277     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4278
4279     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4280     {
4281         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4282     }
4283
4284     // check compatibility of the arguments
4285     unsigned int argCount;
4286     argCount = sig.numArgs;
4287     CORINFO_ARG_LIST_HANDLE args;
4288     args = sig.args;
4289     while (argCount--)
4290     {
4291         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4292
4293         // check that the argument is not a byref for tailcalls
4294         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4295
4296         // For unsafe code, we might have parameters containing pointer to the stack location.
4297         // Disallow the tailcall for this kind.
4298         CORINFO_CLASS_HANDLE classHandle;
4299         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4300         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4301
4302         args = info.compCompHnd->getArgNext(args);
4303     }
4304
4305     // update popCount
4306     popCount += sig.numArgs;
4307
4308     // check for 'this' which is on non-static methods, not called via NEWOBJ
4309     if (!(mflags & CORINFO_FLG_STATIC))
4310     {
4311         // Always update the popCount.
4312         // This is crucial for the stack calculation to be correct.
4313         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4314         popCount++;
4315
4316         if (opcode == CEE_CALLI)
4317         {
4318             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4319             // on the stack.
4320             if (tiThis.IsValueClass())
4321             {
4322                 tiThis.MakeByRef();
4323             }
4324             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4325         }
4326         else
4327         {
4328             // Check type compatibility of the this argument
4329             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4330             if (tiDeclaredThis.IsValueClass())
4331             {
4332                 tiDeclaredThis.MakeByRef();
4333             }
4334
4335             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4336         }
4337     }
4338
4339     // Tail calls on constrained calls should be illegal too:
4340     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4341     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4342
4343     // Get the exact view of the signature for an array method
4344     if (sig.retType != CORINFO_TYPE_VOID)
4345     {
4346         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4347         {
4348             assert(opcode != CEE_CALLI);
4349             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4350         }
4351     }
4352
4353     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4354     typeInfo tiCallerRetType =
4355         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4356
4357     // void return type gets morphed into the error type, so we have to treat them specially here
4358     if (sig.retType == CORINFO_TYPE_VOID)
4359     {
4360         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4361                                   speculative);
4362     }
4363     else
4364     {
4365         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4366                                                    NormaliseForStack(tiCallerRetType), true),
4367                                   "tailcall return mismatch", speculative);
4368     }
4369
4370     // for tailcall, stack must be empty
4371     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4372
4373     return true; // Yes, tailcall is legal
4374 }
4375
4376 /*****************************************************************************
4377  *
4378  *  Checks the IL verification rules for the call
4379  */
4380
4381 void Compiler::verVerifyCall(OPCODE                  opcode,
4382                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4383                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4384                              bool                    tailCall,
4385                              bool                    readonlyCall,
4386                              const BYTE*             delegateCreateStart,
4387                              const BYTE*             codeAddr,
4388                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4389 {
4390     DWORD             mflags;
4391     CORINFO_SIG_INFO* sig      = nullptr;
4392     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4393                                     // this counter is used to keep track of how many items have been
4394                                     // virtually popped
4395
4396     // for calli, VerifyOrReturn that this is not a virtual method
4397     if (opcode == CEE_CALLI)
4398     {
4399         Verify(false, "Calli not verifiable");
4400         return;
4401     }
4402
4403     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4404     mflags = callInfo->verMethodFlags;
4405
4406     sig = &callInfo->verSig;
4407
4408     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4409     {
4410         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4411     }
4412
4413     // opcode specific check
4414     unsigned methodClassFlgs = callInfo->classFlags;
4415     switch (opcode)
4416     {
4417         case CEE_CALLVIRT:
4418             // cannot do callvirt on valuetypes
4419             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4420             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4421             break;
4422
4423         case CEE_NEWOBJ:
4424         {
4425             assert(!tailCall); // Importer should not allow this
4426             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4427                            "newobj must be on instance");
4428
4429             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4430             {
4431                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4432                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4433                 typeInfo tiDeclaredFtn =
4434                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4435                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4436
4437                 assert(popCount == 0);
4438                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4439                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4440
4441                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4442                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4443                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4444                                "delegate object type mismatch");
4445
4446                 CORINFO_CLASS_HANDLE objTypeHandle =
4447                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4448
4449                 // the method signature must be compatible with the delegate's invoke method
4450
4451                 // check that for virtual functions, the type of the object used to get the
4452                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4453                 // since this is a bit of work to determine in general, we pattern match stylized
4454                 // code sequences
4455
4456                 // the delegate creation code check, which used to be done later, is now done here
4457                 // so we can read delegateMethodRef directly from
4458                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4459                 // we then use it in our call to isCompatibleDelegate().
4460
4461                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4462                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4463                                "must create delegates with certain IL");
4464
4465                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4466                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4467                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4468                 delegateResolvedToken.token        = delegateMethodRef;
4469                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4470                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4471
4472                 CORINFO_CALL_INFO delegateCallInfo;
4473                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4474                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4475
4476                 BOOL isOpenDelegate = FALSE;
4477                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4478                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4479                                                                       &isOpenDelegate),
4480                                "function incompatible with delegate");
4481
4482                 // check the constraints on the target method
4483                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4484                                "delegate target has unsatisfied class constraints");
4485                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4486                                                                             tiActualFtn.GetMethod()),
4487                                "delegate target has unsatisfied method constraints");
4488
4489                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4490                 // for additional verification rules for delegates
4491                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4492                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4493                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4494                 {
4495
4496                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4497 #ifdef DEBUG
4498                         && StrictCheckForNonVirtualCallToVirtualMethod()
4499 #endif
4500                             )
4501                     {
4502                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4503                         {
4504                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4505                                                verIsBoxedValueType(tiActualObj),
4506                                            "The 'this' parameter to the call must be either the calling method's "
4507                                            "'this' parameter or "
4508                                            "a boxed value type.");
4509                         }
4510                     }
4511                 }
4512
4513                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4514                 {
4515                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4516
4517                     Verify(targetIsStatic || !isOpenDelegate,
4518                            "Unverifiable creation of an open instance delegate for a protected member.");
4519
4520                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4521                                                                 ? info.compClassHnd
4522                                                                 : tiActualObj.GetClassHandleForObjRef();
4523
4524                     // In the case of protected methods, it is a requirement that the 'this'
4525                     // pointer be a subclass of the current context.  Perform this check.
4526                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4527                            "Accessing protected method through wrong type.");
4528                 }
4529                 goto DONE_ARGS;
4530             }
4531         }
4532         // fall thru to default checks
4533         default:
4534             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4535     }
4536     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4537                    "can only newobj a delegate constructor");
4538
4539     // check compatibility of the arguments
4540     unsigned int argCount;
4541     argCount = sig->numArgs;
4542     CORINFO_ARG_LIST_HANDLE args;
4543     args = sig->args;
4544     while (argCount--)
4545     {
4546         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4547
4548         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4549         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4550
4551         args = info.compCompHnd->getArgNext(args);
4552     }
4553
4554 DONE_ARGS:
4555
4556     // update popCount
4557     popCount += sig->numArgs;
4558
4559     // check for 'this' which are is non-static methods, not called via NEWOBJ
4560     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4561     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4562     {
4563         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4564         popCount++;
4565
4566         // If it is null, we assume we can access it (since it will AV shortly)
4567         // If it is anything but a reference class, there is no hierarchy, so
4568         // again, we don't need the precise instance class to compute 'protected' access
4569         if (tiThis.IsType(TI_REF))
4570         {
4571             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4572         }
4573
4574         // Check type compatibility of the this argument
4575         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4576         if (tiDeclaredThis.IsValueClass())
4577         {
4578             tiDeclaredThis.MakeByRef();
4579         }
4580
4581         // If this is a call to the base class .ctor, set thisPtr Init for
4582         // this block.
4583         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4584         {
4585             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4586                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4587             {
4588                 assert(verCurrentState.thisInitialized !=
4589                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4590                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4591                                "Call to base class constructor when 'this' is possibly initialized");
4592                 // Otherwise, 'this' is now initialized.
4593                 verCurrentState.thisInitialized = TIS_Init;
4594                 tiThis.SetInitialisedObjRef();
4595             }
4596             else
4597             {
4598                 // We allow direct calls to value type constructors
4599                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4600                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4601                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4602                                "Bad call to a constructor");
4603             }
4604         }
4605
4606         if (pConstrainedResolvedToken != nullptr)
4607         {
4608             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4609
4610             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4611
4612             // We just dereference this and test for equality
4613             tiThis.DereferenceByRef();
4614             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4615                            "this type mismatch with constrained type operand");
4616
4617             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4618             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4619         }
4620
4621         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4622         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4623         {
4624             tiDeclaredThis.SetIsReadonlyByRef();
4625         }
4626
4627         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4628
4629         if (tiThis.IsByRef())
4630         {
4631             // Find the actual type where the method exists (as opposed to what is declared
4632             // in the metadata). This is to prevent passing a byref as the "this" argument
4633             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4634
4635             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4636             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4637                            "Call to base type of valuetype (which is never a valuetype)");
4638         }
4639
4640         // Rules for non-virtual call to a non-final virtual method:
4641
4642         // Define:
4643         // The "this" pointer is considered to be "possibly written" if
4644         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4645         //   (or)
4646         //   2. It has been stored to (STARG.0) anywhere in the method.
4647
4648         // A non-virtual call to a non-final virtual method is only allowed if
4649         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4650         //   (or)
4651         //   2. The this pointer passed to the callee is the current method's this pointer.
4652         //      (and) The current method's this pointer is not "possibly written".
4653
4654         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4655         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4656         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4657         // hard and more error prone.
4658
4659         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4660 #ifdef DEBUG
4661             && StrictCheckForNonVirtualCallToVirtualMethod()
4662 #endif
4663                 )
4664         {
4665             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4666             {
4667                 VerifyOrReturn(
4668                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4669                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4670                     "a boxed value type.");
4671             }
4672         }
4673     }
4674
4675     // check any constraints on the callee's class and type parameters
4676     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4677                    "method has unsatisfied class constraints");
4678     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4679                    "method has unsatisfied method constraints");
4680
4681     if (mflags & CORINFO_FLG_PROTECTED)
4682     {
4683         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4684                        "Can't access protected method");
4685     }
4686
4687     // Get the exact view of the signature for an array method
4688     if (sig->retType != CORINFO_TYPE_VOID)
4689     {
4690         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4691     }
4692
4693     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4694     // The methods supported by array types are under the control of the EE
4695     // so we can trust that only the Address operation returns a byref.
4696     if (readonlyCall)
4697     {
4698         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4699         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4700                        "unexpected use of readonly prefix");
4701     }
4702
4703     // Verify the tailcall
4704     if (tailCall)
4705     {
4706         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4707     }
4708 }
4709
4710 /*****************************************************************************
4711  *  Checks that a delegate creation is done using the following pattern:
4712  *     dup
4713  *     ldvirtftn targetMemberRef
4714  *  OR
4715  *     ldftn targetMemberRef
4716  *
4717  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4718  *  not in this basic block)
4719  *
4720  *  targetMemberRef is read from the code sequence.
4721  *  targetMemberRef is validated iff verificationNeeded.
4722  */
4723
4724 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4725                                         const BYTE*  codeAddr,
4726                                         mdMemberRef& targetMemberRef)
4727 {
4728     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4729     {
4730         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4731         return TRUE;
4732     }
4733     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4734     {
4735         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4736         return TRUE;
4737     }
4738
4739     return FALSE;
4740 }
4741
4742 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4743 {
4744     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4745     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4746     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4747     if (!tiCompatibleWith(value, normPtrVal, true))
4748     {
4749         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4750         compUnsafeCastUsed = true;
4751     }
4752     return ptrVal;
4753 }
4754
4755 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4756 {
4757     assert(!instrType.IsStruct());
4758
4759     typeInfo ptrVal;
4760     if (ptr.IsByRef())
4761     {
4762         ptrVal = DereferenceByRef(ptr);
4763         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4764         {
4765             Verify(false, "bad pointer");
4766             compUnsafeCastUsed = true;
4767         }
4768         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4769         {
4770             Verify(false, "pointer not consistent with instr");
4771             compUnsafeCastUsed = true;
4772         }
4773     }
4774     else
4775     {
4776         Verify(false, "pointer not byref");
4777         compUnsafeCastUsed = true;
4778     }
4779
4780     return ptrVal;
4781 }
4782
4783 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4784 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4785 // ld*flda or a st*fld.
4786 // 'enclosingClass' is given if we are accessing a field in some specific type.
4787
4788 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4789                               const CORINFO_FIELD_INFO& fieldInfo,
4790                               const typeInfo*           tiThis,
4791                               BOOL                      mutator,
4792                               BOOL                      allowPlainStructAsThis)
4793 {
4794     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4795     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4796     CORINFO_CLASS_HANDLE instanceClass =
4797         info.compClassHnd; // for statics, we imagine the instance is the current class.
4798
4799     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4800     if (mutator)
4801     {
4802         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4803         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4804         {
4805             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4806                        info.compIsStatic == isStaticField,
4807                    "bad use of initonly field (set or address taken)");
4808         }
4809     }
4810
4811     if (tiThis == nullptr)
4812     {
4813         Verify(isStaticField, "used static opcode with non-static field");
4814     }
4815     else
4816     {
4817         typeInfo tThis = *tiThis;
4818
4819         if (allowPlainStructAsThis && tThis.IsValueClass())
4820         {
4821             tThis.MakeByRef();
4822         }
4823
4824         // If it is null, we assume we can access it (since it will AV shortly)
4825         // If it is anything but a refernce class, there is no hierarchy, so
4826         // again, we don't need the precise instance class to compute 'protected' access
4827         if (tiThis->IsType(TI_REF))
4828         {
4829             instanceClass = tiThis->GetClassHandleForObjRef();
4830         }
4831
4832         // Note that even if the field is static, we require that the this pointer
4833         // satisfy the same constraints as a non-static field  This happens to
4834         // be simpler and seems reasonable
4835         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4836         if (tiDeclaredThis.IsValueClass())
4837         {
4838             tiDeclaredThis.MakeByRef();
4839
4840             // we allow read-only tThis, on any field access (even stores!), because if the
4841             // class implementor wants to prohibit stores he should make the field private.
4842             // we do this by setting the read-only bit on the type we compare tThis to.
4843             tiDeclaredThis.SetIsReadonlyByRef();
4844         }
4845         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4846         {
4847             // Any field access is legal on "uninitialized" this pointers.
4848             // The easiest way to implement this is to simply set the
4849             // initialized bit for the duration of the type check on the
4850             // field access only.  It does not change the state of the "this"
4851             // for the function as a whole. Note that the "tThis" is a copy
4852             // of the original "this" type (*tiThis) passed in.
4853             tThis.SetInitialisedObjRef();
4854         }
4855
4856         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4857     }
4858
4859     // Presently the JIT does not check that we don't store or take the address of init-only fields
4860     // since we cannot guarantee their immutability and it is not a security issue.
4861
4862     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4863     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4864                    "field has unsatisfied class constraints");
4865     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4866     {
4867         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4868                "Accessing protected method through wrong type.");
4869     }
4870 }
4871
4872 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4873 {
4874     if (tiOp1.IsNumberType())
4875     {
4876 #ifdef _TARGET_64BIT_
4877         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4878 #else  // _TARGET_64BIT
4879         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4880         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4881         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4882         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4883 #endif // !_TARGET_64BIT_
4884     }
4885     else if (tiOp1.IsObjRef())
4886     {
4887         switch (opcode)
4888         {
4889             case CEE_BEQ_S:
4890             case CEE_BEQ:
4891             case CEE_BNE_UN_S:
4892             case CEE_BNE_UN:
4893             case CEE_CEQ:
4894             case CEE_CGT_UN:
4895                 break;
4896             default:
4897                 Verify(FALSE, "Cond not allowed on object types");
4898         }
4899         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4900     }
4901     else if (tiOp1.IsByRef())
4902     {
4903         Verify(tiOp2.IsByRef(), "Cond type mismatch");
4904     }
4905     else
4906     {
4907         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4908     }
4909 }
4910
4911 void Compiler::verVerifyThisPtrInitialised()
4912 {
4913     if (verTrackObjCtorInitState)
4914     {
4915         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4916     }
4917 }
4918
4919 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4920 {
4921     // Either target == context, in this case calling an alternate .ctor
4922     // Or target is the immediate parent of context
4923
4924     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4925 }
4926
4927 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
4928                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
4929                                         CORINFO_CALL_INFO*      pCallInfo)
4930 {
4931     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4932     {
4933         NO_WAY("Virtual call to a function added via EnC is not supported");
4934     }
4935
4936 #ifdef FEATURE_READYTORUN_COMPILER
4937     if (opts.IsReadyToRun() && !pCallInfo->exactContextNeedsRuntimeLookup)
4938     {
4939         GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4940                                                 gtNewArgList(thisPtr));
4941
4942         call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4943
4944         return call;
4945     }
4946 #endif
4947
4948     // Get the exact descriptor for the static callsite
4949     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4950     if (exactTypeDesc == nullptr)
4951     { // compDonotInline()
4952         return nullptr;
4953     }
4954
4955     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4956     if (exactMethodDesc == nullptr)
4957     { // compDonotInline()
4958         return nullptr;
4959     }
4960
4961     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
4962
4963     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
4964
4965     helpArgs = gtNewListNode(thisPtr, helpArgs);
4966
4967     // Call helper function.  This gets the target address of the final destination callsite.
4968
4969     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
4970 }
4971
4972 /*****************************************************************************
4973  *
4974  *  Build and import a box node
4975  */
4976
4977 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
4978 {
4979     // Get the tree for the type handle for the boxed object.  In the case
4980     // of shared generic code or ngen'd code this might be an embedded
4981     // computation.
4982     // Note we can only box do it if the class construtor has been called
4983     // We can always do it on primitive types
4984
4985     GenTreePtr op1 = nullptr;
4986     GenTreePtr op2 = nullptr;
4987     var_types  lclTyp;
4988
4989     impSpillSpecialSideEff();
4990
4991     // Now get the expression to box from the stack.
4992     CORINFO_CLASS_HANDLE operCls;
4993     GenTreePtr           exprToBox = impPopStack(operCls).val;
4994
4995     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
4996     if (boxHelper == CORINFO_HELP_BOX)
4997     {
4998         // we are doing 'normal' boxing.  This means that we can inline the box operation
4999         // Box(expr) gets morphed into
5000         // temp = new(clsHnd)
5001         // cpobj(temp+4, expr, clsHnd)
5002         // push temp
5003         // The code paths differ slightly below for structs and primitives because
5004         // "cpobj" differs in these cases.  In one case you get
5005         //    impAssignStructPtr(temp+4, expr, clsHnd)
5006         // and the other you get
5007         //    *(temp+4) = expr
5008
5009         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5010         {
5011             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5012         }
5013
5014         // needs to stay in use until this box expression is appended
5015         // some other node.  We approximate this by keeping it alive until
5016         // the opcode stack becomes empty
5017         impBoxTempInUse = true;
5018
5019 #ifdef FEATURE_READYTORUN_COMPILER
5020         bool usingReadyToRunHelper = false;
5021
5022         if (opts.IsReadyToRun())
5023         {
5024             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5025             usingReadyToRunHelper = (op1 != nullptr);
5026         }
5027
5028         if (!usingReadyToRunHelper)
5029 #endif
5030         {
5031             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5032             // and the newfast call with a single call to a dynamic R2R cell that will:
5033             //      1) Load the context
5034             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5035             //      3) Allocate and return the new object for boxing
5036             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5037
5038             // Ensure that the value class is restored
5039             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5040             if (op2 == nullptr)
5041             { // compDonotInline()
5042                 return;
5043             }
5044
5045             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5046                                       gtNewArgList(op2));
5047         }
5048
5049         /* Remember that this basic block contains 'new' of an array */
5050         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5051
5052         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5053
5054         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5055
5056         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5057         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5058         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5059
5060         if (varTypeIsStruct(exprToBox))
5061         {
5062             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5063             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5064         }
5065         else
5066         {
5067             lclTyp = exprToBox->TypeGet();
5068             if (lclTyp == TYP_BYREF)
5069             {
5070                 lclTyp = TYP_I_IMPL;
5071             }
5072             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5073             if (impIsPrimitive(jitType))
5074             {
5075                 lclTyp = JITtype2varType(jitType);
5076             }
5077             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5078                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5079             var_types srcTyp = exprToBox->TypeGet();
5080             var_types dstTyp = lclTyp;
5081
5082             if (srcTyp != dstTyp)
5083             {
5084                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5085                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5086                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5087             }
5088             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5089         }
5090
5091         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5092         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5093
5094         // Record that this is a "box" node.
5095         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5096
5097         // If it is a value class, mark the "box" node.  We can use this information
5098         // to optimise several cases:
5099         //    "box(x) == null" --> false
5100         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5101         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5102
5103         op1->gtFlags |= GTF_BOX_VALUE;
5104         assert(op1->IsBoxedValue());
5105         assert(asg->gtOper == GT_ASG);
5106     }
5107     else
5108     {
5109         // Don't optimize, just call the helper and be done with it
5110
5111         // Ensure that the value class is restored
5112         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5113         if (op2 == nullptr)
5114         { // compDonotInline()
5115             return;
5116         }
5117
5118         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5119         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5120     }
5121
5122     /* Push the result back on the stack, */
5123     /* even if clsHnd is a value class we want the TI_REF */
5124     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5125     impPushOnStack(op1, tiRetVal);
5126 }
5127
5128 //------------------------------------------------------------------------
5129 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5130 //
5131 // Arguments:
5132 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5133 //                     by a call to CEEInfo::resolveToken().
5134 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5135 //                by a call to CEEInfo::getCallInfo().
5136 //
5137 // Assumptions:
5138 //    The multi-dimensional array constructor arguments (array dimensions) are
5139 //    pushed on the IL stack on entry to this method.
5140 //
5141 // Notes:
5142 //    Multi-dimensional array constructors are imported as calls to a JIT
5143 //    helper, not as regular calls.
5144
5145 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5146 {
5147     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5148     if (classHandle == nullptr)
5149     { // compDonotInline()
5150         return;
5151     }
5152
5153     assert(pCallInfo->sig.numArgs);
5154
5155     GenTreePtr      node;
5156     GenTreeArgList* args;
5157
5158     //
5159     // There are two different JIT helpers that can be used to allocate
5160     // multi-dimensional arrays:
5161     //
5162     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5163     //      This variant is deprecated. It should be eventually removed.
5164     //
5165     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5166     //      pointer to block of int32s. This variant is more portable.
5167     //
5168     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5169     // unconditionally would require ReadyToRun version bump.
5170     //
5171     CLANG_FORMAT_COMMENT_ANCHOR;
5172
5173 #if COR_JIT_EE_VERSION > 460
5174     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5175     {
5176         LclVarDsc* newObjArrayArgsVar;
5177
5178         // Reuse the temp used to pass the array dimensions to avoid bloating
5179         // the stack frame in case there are multiple calls to multi-dim array
5180         // constructors within a single method.
5181         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5182         {
5183             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5184             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5185             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5186         }
5187
5188         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5189         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5190         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5191             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5192
5193         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5194         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5195         // to one allocation at a time.
5196         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5197
5198         //
5199         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5200         //  - Array class handle
5201         //  - Number of dimension arguments
5202         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5203         //
5204
5205         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5206         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5207
5208         // Pop dimension arguments from the stack one at a time and store it
5209         // into lvaNewObjArrayArgs temp.
5210         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5211         {
5212             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5213
5214             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5215             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5216             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5217                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5218             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5219
5220             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5221         }
5222
5223         args = gtNewArgList(node);
5224
5225         // pass number of arguments to the helper
5226         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5227
5228         args = gtNewListNode(classHandle, args);
5229
5230         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5231     }
5232     else
5233 #endif
5234     {
5235         //
5236         // The varargs helper needs the type and method handles as last
5237         // and  last-1 param (this is a cdecl call, so args will be
5238         // pushed in reverse order on the CPU stack)
5239         //
5240
5241         args = gtNewArgList(classHandle);
5242
5243         // pass number of arguments to the helper
5244         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5245
5246         unsigned argFlags = 0;
5247         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5248
5249         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5250
5251         // varargs, so we pop the arguments
5252         node->gtFlags |= GTF_CALL_POP_ARGS;
5253
5254 #ifdef DEBUG
5255         // At the present time we don't track Caller pop arguments
5256         // that have GC references in them
5257         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5258         {
5259             assert(temp->Current()->gtType != TYP_REF);
5260         }
5261 #endif
5262     }
5263
5264     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5265     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5266
5267     // Remember that this basic block contains 'new' of a md array
5268     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5269
5270     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5271 }
5272
5273 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5274                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5275                                       CORINFO_THIS_TRANSFORM  transform)
5276 {
5277     switch (transform)
5278     {
5279         case CORINFO_DEREF_THIS:
5280         {
5281             GenTreePtr obj = thisPtr;
5282
5283             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5284             impBashVarAddrsToI(obj);
5285             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5286             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5287
5288             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5289             // ldind could point anywhere, example a boxed class static int
5290             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5291
5292             return obj;
5293         }
5294
5295         case CORINFO_BOX_THIS:
5296         {
5297             // Constraint calls where there might be no
5298             // unboxed entry point require us to implement the call via helper.
5299             // These only occur when a possible target of the call
5300             // may have inherited an implementation of an interface
5301             // method from System.Object or System.ValueType.  The EE does not provide us with
5302             // "unboxed" versions of these methods.
5303
5304             GenTreePtr obj = thisPtr;
5305
5306             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5307             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5308             obj->gtFlags |= GTF_EXCEPT;
5309
5310             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5311             var_types   objType = JITtype2varType(jitTyp);
5312             if (impIsPrimitive(jitTyp))
5313             {
5314                 if (obj->OperIsBlk())
5315                 {
5316                     obj->ChangeOperUnchecked(GT_IND);
5317
5318                     // Obj could point anywhere, example a boxed class static int
5319                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5320                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5321                 }
5322
5323                 obj->gtType = JITtype2varType(jitTyp);
5324                 assert(varTypeIsArithmetic(obj->gtType));
5325             }
5326
5327             // This pushes on the dereferenced byref
5328             // This is then used immediately to box.
5329             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5330
5331             // This pops off the byref-to-a-value-type remaining on the stack and
5332             // replaces it with a boxed object.
5333             // This is then used as the object to the virtual call immediately below.
5334             impImportAndPushBox(pConstrainedResolvedToken);
5335             if (compDonotInline())
5336             {
5337                 return nullptr;
5338             }
5339
5340             obj = impPopStack().val;
5341             return obj;
5342         }
5343         case CORINFO_NO_THIS_TRANSFORM:
5344         default:
5345             return thisPtr;
5346     }
5347 }
5348
5349 //------------------------------------------------------------------------
5350 // impCanPInvokeInline: examine information from a call to see if the call
5351 // qualifies as an inline pinvoke.
5352 //
5353 // Arguments:
5354 //    block      - block contaning the call, or for inlinees, block
5355 //                 containing the call being inlined
5356 //
5357 // Return Value:
5358 //    true if this call qualifies as an inline pinvoke, false otherwise
5359 //
5360 // Notes:
5361 //    Checks basic legality and then a number of ambient conditions
5362 //    where we could pinvoke but choose not to
5363
5364 bool Compiler::impCanPInvokeInline(BasicBlock* block)
5365 {
5366     return impCanPInvokeInlineCallSite(block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
5367            (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5368         ;
5369 }
5370
5371 //------------------------------------------------------------------------
5372 // impCanPInvokeInlineSallSite: basic legality checks using information
5373 // from a call to see if the call qualifies as an inline pinvoke.
5374 //
5375 // Arguments:
5376 //    block      - block contaning the call, or for inlinees, block
5377 //                 containing the call being inlined
5378 //
5379 // Return Value:
5380 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5381 //
5382 // Notes:
5383 //    For runtimes that support exception handling interop there are
5384 //    restrictions on using inline pinvoke in handler regions.
5385 //
5386 //    * We have to disable pinvoke inlining inside of filters because
5387 //    in case the main execution (i.e. in the try block) is inside
5388 //    unmanaged code, we cannot reuse the inlined stub (we still need
5389 //    the original state until we are in the catch handler)
5390 //
5391 //    * We disable pinvoke inlining inside handlers since the GSCookie
5392 //    is in the inlined Frame (see
5393 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5394 //    this would not protect framelets/return-address of handlers.
5395 //
5396 //    These restrictions are currently also in place for CoreCLR but
5397 //    can be relaxed when coreclr/#8459 is addressed.
5398
5399 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5400 {
5401 #ifdef _TARGET_AMD64_
5402     // On x64, we disable pinvoke inlining inside of try regions.
5403     // Here is the comment from JIT64 explaining why:
5404     //
5405     //   [VSWhidbey: 611015] - because the jitted code links in the
5406     //   Frame (instead of the stub) we rely on the Frame not being
5407     //   'active' until inside the stub.  This normally happens by the
5408     //   stub setting the return address pointer in the Frame object
5409     //   inside the stub.  On a normal return, the return address
5410     //   pointer is zeroed out so the Frame can be safely re-used, but
5411     //   if an exception occurs, nobody zeros out the return address
5412     //   pointer.  Thus if we re-used the Frame object, it would go
5413     //   'active' as soon as we link it into the Frame chain.
5414     //
5415     //   Technically we only need to disable PInvoke inlining if we're
5416     //   in a handler or if we're in a try body with a catch or
5417     //   filter/except where other non-handler code in this method
5418     //   might run and try to re-use the dirty Frame object.
5419     //
5420     //   A desktop test case where this seems to matter is
5421     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5422     const bool inX64Try = block->hasTryIndex();
5423 #else
5424     const bool inX64Try = false;
5425 #endif // _TARGET_AMD64_
5426
5427     return !inX64Try && !block->hasHndIndex();
5428 }
5429
5430 //------------------------------------------------------------------------
5431 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5432 // if it can be expressed as an inline pinvoke.
5433 //
5434 // Arguments:
5435 //    call       - tree for the call
5436 //    methHnd    - handle for the method being called (may be null)
5437 //    sig        - signature of the method being called
5438 //    mflags     - method flags for the method being called
5439 //    block      - block contaning the call, or for inlinees, block
5440 //                 containing the call being inlined
5441 //
5442 // Notes:
5443 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5444 //
5445 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5446 //   call passes a combination of legality and profitabilty checks.
5447 //
5448 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5449
5450 void Compiler::impCheckForPInvokeCall(
5451     GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5452 {
5453     CorInfoUnmanagedCallConv unmanagedCallConv;
5454
5455     // If VM flagged it as Pinvoke, flag the call node accordingly
5456     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5457     {
5458         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5459     }
5460
5461     if (methHnd)
5462     {
5463         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5464         {
5465             return;
5466         }
5467
5468         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5469     }
5470     else
5471     {
5472         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5473         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5474         {
5475             // Used by the IL Stubs.
5476             callConv = CORINFO_CALLCONV_C;
5477         }
5478         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5479         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5480         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5481         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5482
5483         assert(!call->gtCall.gtCallCookie);
5484     }
5485
5486     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5487         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5488     {
5489         return;
5490     }
5491     optNativeCallCount++;
5492
5493     if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
5494     {
5495         // Always inline pinvoke.
5496     }
5497     else
5498     {
5499         // Check legality and profitability.
5500         if (!impCanPInvokeInline(block))
5501         {
5502             return;
5503         }
5504
5505         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5506         {
5507             return;
5508         }
5509
5510         // Size-speed tradeoff: don't use inline pinvoke at rarely
5511         // executed call sites.  The non-inline version is more
5512         // compact.
5513         if (block->isRunRarely())
5514         {
5515             return;
5516         }
5517     }
5518
5519     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5520
5521     call->gtFlags |= GTF_CALL_UNMANAGED;
5522     info.compCallUnmanaged++;
5523
5524     // AMD64 convention is same for native and managed
5525     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5526     {
5527         call->gtFlags |= GTF_CALL_POP_ARGS;
5528     }
5529
5530     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5531     {
5532         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5533     }
5534 }
5535
5536 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5537 {
5538     var_types callRetTyp = JITtype2varType(sig->retType);
5539
5540     /* The function pointer is on top of the stack - It may be a
5541      * complex expression. As it is evaluated after the args,
5542      * it may cause registered args to be spilled. Simply spill it.
5543      */
5544
5545     // Ignore this trivial case.
5546     if (impStackTop().val->gtOper != GT_LCL_VAR)
5547     {
5548         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5549                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5550     }
5551
5552     /* Get the function pointer */
5553
5554     GenTreePtr fptr = impPopStack().val;
5555     assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5556
5557 #ifdef DEBUG
5558     // This temporary must never be converted to a double in stress mode,
5559     // because that can introduce a call to the cast helper after the
5560     // arguments have already been evaluated.
5561
5562     if (fptr->OperGet() == GT_LCL_VAR)
5563     {
5564         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5565     }
5566 #endif
5567
5568     /* Create the call node */
5569
5570     GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5571
5572     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5573
5574     return call;
5575 }
5576
5577 /*****************************************************************************/
5578
5579 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5580 {
5581     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5582
5583     /* Since we push the arguments in reverse order (i.e. right -> left)
5584      * spill any side effects from the stack
5585      *
5586      * OBS: If there is only one side effect we do not need to spill it
5587      *      thus we have to spill all side-effects except last one
5588      */
5589
5590     unsigned lastLevelWithSideEffects = UINT_MAX;
5591
5592     unsigned argsToReverse = sig->numArgs;
5593
5594     // For "thiscall", the first argument goes in a register. Since its
5595     // order does not need to be changed, we do not need to spill it
5596
5597     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5598     {
5599         assert(argsToReverse);
5600         argsToReverse--;
5601     }
5602
5603 #ifndef _TARGET_X86_
5604     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5605     argsToReverse = 0;
5606 #endif
5607
5608     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5609     {
5610         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5611         {
5612             assert(lastLevelWithSideEffects == UINT_MAX);
5613
5614             impSpillStackEntry(level,
5615                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5616         }
5617         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5618         {
5619             if (lastLevelWithSideEffects != UINT_MAX)
5620             {
5621                 /* We had a previous side effect - must spill it */
5622                 impSpillStackEntry(lastLevelWithSideEffects,
5623                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5624
5625                 /* Record the level for the current side effect in case we will spill it */
5626                 lastLevelWithSideEffects = level;
5627             }
5628             else
5629             {
5630                 /* This is the first side effect encountered - record its level */
5631
5632                 lastLevelWithSideEffects = level;
5633             }
5634         }
5635     }
5636
5637     /* The argument list is now "clean" - no out-of-order side effects
5638      * Pop the argument list in reverse order */
5639
5640     unsigned   argFlags = 0;
5641     GenTreePtr args     = call->gtCall.gtCallArgs =
5642         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5643
5644     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5645     {
5646         GenTreePtr thisPtr = args->Current();
5647         impBashVarAddrsToI(thisPtr);
5648         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5649     }
5650
5651     if (args)
5652     {
5653         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5654     }
5655 }
5656
5657 //------------------------------------------------------------------------
5658 // impInitClass: Build a node to initialize the class before accessing the
5659 //               field if necessary
5660 //
5661 // Arguments:
5662 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5663 //                     by a call to CEEInfo::resolveToken().
5664 //
5665 // Return Value: If needed, a pointer to the node that will perform the class
5666 //               initializtion.  Otherwise, nullptr.
5667 //
5668
5669 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5670 {
5671     CorInfoInitClassResult initClassResult =
5672         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5673
5674     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5675     {
5676         return nullptr;
5677     }
5678     BOOL runtimeLookup;
5679
5680     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5681
5682     if (node == nullptr)
5683     {
5684         assert(compDonotInline());
5685         return nullptr;
5686     }
5687
5688     if (runtimeLookup)
5689     {
5690         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5691     }
5692     else
5693     {
5694         // Call the shared non gc static helper, as its the fastest
5695         node = fgGetSharedCCtor(pResolvedToken->hClass);
5696     }
5697
5698     return node;
5699 }
5700
5701 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5702 {
5703     GenTreePtr op1 = nullptr;
5704
5705     switch (lclTyp)
5706     {
5707         int     ival;
5708         __int64 lval;
5709         double  dval;
5710
5711         case TYP_BOOL:
5712             ival = *((bool*)fldAddr);
5713             goto IVAL_COMMON;
5714
5715         case TYP_BYTE:
5716             ival = *((signed char*)fldAddr);
5717             goto IVAL_COMMON;
5718
5719         case TYP_UBYTE:
5720             ival = *((unsigned char*)fldAddr);
5721             goto IVAL_COMMON;
5722
5723         case TYP_SHORT:
5724             ival = *((short*)fldAddr);
5725             goto IVAL_COMMON;
5726
5727         case TYP_CHAR:
5728         case TYP_USHORT:
5729             ival = *((unsigned short*)fldAddr);
5730             goto IVAL_COMMON;
5731
5732         case TYP_UINT:
5733         case TYP_INT:
5734             ival = *((int*)fldAddr);
5735         IVAL_COMMON:
5736             op1 = gtNewIconNode(ival);
5737             break;
5738
5739         case TYP_LONG:
5740         case TYP_ULONG:
5741             lval = *((__int64*)fldAddr);
5742             op1  = gtNewLconNode(lval);
5743             break;
5744
5745         case TYP_FLOAT:
5746             dval = *((float*)fldAddr);
5747             op1  = gtNewDconNode(dval);
5748 #if !FEATURE_X87_DOUBLES
5749             // X87 stack doesn't differentiate between float/double
5750             // so R4 is treated as R8, but everybody else does
5751             op1->gtType = TYP_FLOAT;
5752 #endif // FEATURE_X87_DOUBLES
5753             break;
5754
5755         case TYP_DOUBLE:
5756             dval = *((double*)fldAddr);
5757             op1  = gtNewDconNode(dval);
5758             break;
5759
5760         default:
5761             assert(!"Unexpected lclTyp");
5762             break;
5763     }
5764
5765     return op1;
5766 }
5767
5768 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5769                                                 CORINFO_ACCESS_FLAGS    access,
5770                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5771                                                 var_types               lclTyp)
5772 {
5773     GenTreePtr op1;
5774
5775     switch (pFieldInfo->fieldAccessor)
5776     {
5777         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5778         {
5779             assert(!compIsForInlining());
5780
5781             // We first call a special helper to get the statics base pointer
5782             op1 = impParentClassTokenToHandle(pResolvedToken);
5783
5784             // compIsForInlining() is false so we should not neve get NULL here
5785             assert(op1 != nullptr);
5786
5787             var_types type = TYP_BYREF;
5788
5789             switch (pFieldInfo->helper)
5790             {
5791                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5792                     type = TYP_I_IMPL;
5793                     break;
5794                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5795                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5796                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5797                     break;
5798                 default:
5799                     assert(!"unknown generic statics helper");
5800                     break;
5801             }
5802
5803             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5804
5805             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5806             op1              = gtNewOperNode(GT_ADD, type, op1,
5807                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5808         }
5809         break;
5810
5811         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5812         {
5813 #ifdef FEATURE_READYTORUN_COMPILER
5814             if (opts.IsReadyToRun())
5815             {
5816                 unsigned callFlags = 0;
5817
5818                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5819                 {
5820                     callFlags |= GTF_CALL_HOISTABLE;
5821                 }
5822
5823                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5824
5825                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5826             }
5827             else
5828 #endif
5829             {
5830                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5831             }
5832
5833             {
5834                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5835                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5836                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5837             }
5838             break;
5839         }
5840 #if COR_JIT_EE_VERSION > 460
5841         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5842         {
5843 #ifdef FEATURE_READYTORUN_COMPILER
5844             noway_assert(opts.IsReadyToRun());
5845             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5846             assert(kind.needsRuntimeLookup);
5847
5848             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5849             GenTreeArgList* args    = gtNewArgList(ctxTree);
5850
5851             unsigned callFlags = 0;
5852
5853             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5854             {
5855                 callFlags |= GTF_CALL_HOISTABLE;
5856             }
5857             var_types type = TYP_BYREF;
5858             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5859
5860             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5861             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5862             op1              = gtNewOperNode(GT_ADD, type, op1,
5863                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5864 #else
5865             unreached();
5866 #endif // FEATURE_READYTORUN_COMPILER
5867         }
5868         break;
5869 #endif // COR_JIT_EE_VERSION > 460
5870         default:
5871         {
5872             if (!(access & CORINFO_ACCESS_ADDRESS))
5873             {
5874                 // In future, it may be better to just create the right tree here instead of folding it later.
5875                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5876
5877                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5878                 {
5879                     op1->gtType = TYP_REF; // points at boxed object
5880                     FieldSeqNode* firstElemFldSeq =
5881                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5882                     op1 =
5883                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5884                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5885
5886                     if (varTypeIsStruct(lclTyp))
5887                     {
5888                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
5889                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
5890                     }
5891                     else
5892                     {
5893                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5894                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5895                     }
5896                 }
5897
5898                 return op1;
5899             }
5900             else
5901             {
5902                 void** pFldAddr = nullptr;
5903                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5904
5905                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5906
5907                 /* Create the data member node */
5908                 if (pFldAddr == nullptr)
5909                 {
5910                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5911                 }
5912                 else
5913                 {
5914                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5915
5916                     // There are two cases here, either the static is RVA based,
5917                     // in which case the type of the FIELD node is not a GC type
5918                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
5919                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
5920                     // because handles to statics now go into the large object heap
5921
5922                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5923                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
5924                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5925                 }
5926             }
5927             break;
5928         }
5929     }
5930
5931     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5932     {
5933         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5934
5935         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5936
5937         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5938                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
5939     }
5940
5941     if (!(access & CORINFO_ACCESS_ADDRESS))
5942     {
5943         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5944         op1->gtFlags |= GTF_GLOB_REF;
5945     }
5946
5947     return op1;
5948 }
5949
5950 // In general try to call this before most of the verification work.  Most people expect the access
5951 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
5952 // out if you can't access something we also think that you're unverifiable for other reasons.
5953 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5954 {
5955     if (result != CORINFO_ACCESS_ALLOWED)
5956     {
5957         impHandleAccessAllowedInternal(result, helperCall);
5958     }
5959 }
5960
5961 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5962 {
5963     switch (result)
5964     {
5965         case CORINFO_ACCESS_ALLOWED:
5966             break;
5967         case CORINFO_ACCESS_ILLEGAL:
5968             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
5969             // method is verifiable.  Otherwise, delay the exception to runtime.
5970             if (compIsForImportOnly())
5971             {
5972                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
5973             }
5974             else
5975             {
5976                 impInsertHelperCall(helperCall);
5977             }
5978             break;
5979         case CORINFO_ACCESS_RUNTIME_CHECK:
5980             impInsertHelperCall(helperCall);
5981             break;
5982     }
5983 }
5984
5985 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
5986 {
5987     // Construct the argument list
5988     GenTreeArgList* args = nullptr;
5989     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
5990     for (unsigned i = helperInfo->numArgs; i > 0; --i)
5991     {
5992         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
5993         GenTreePtr                currentArg = nullptr;
5994         switch (helperArg.argType)
5995         {
5996             case CORINFO_HELPER_ARG_TYPE_Field:
5997                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
5998                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
5999                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6000                 break;
6001             case CORINFO_HELPER_ARG_TYPE_Method:
6002                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6003                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6004                 break;
6005             case CORINFO_HELPER_ARG_TYPE_Class:
6006                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6007                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6008                 break;
6009             case CORINFO_HELPER_ARG_TYPE_Module:
6010                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6011                 break;
6012             case CORINFO_HELPER_ARG_TYPE_Const:
6013                 currentArg = gtNewIconNode(helperArg.constant);
6014                 break;
6015             default:
6016                 NO_WAY("Illegal helper arg type");
6017         }
6018         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6019     }
6020
6021     /* TODO-Review:
6022      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6023      * Also, consider sticking this in the first basic block.
6024      */
6025     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6026     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6027 }
6028
6029 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6030                                            CORINFO_METHOD_HANDLE calleeMethodHnd,
6031                                            CORINFO_CLASS_HANDLE  delegateTypeHnd)
6032 {
6033 #ifdef FEATURE_CORECLR
6034     if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6035     {
6036         // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6037         // This helper throws an exception if the CLR host disallows the call.
6038
6039         GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6040                                                 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6041                                                              gtNewIconEmbMethHndNode(calleeMethodHnd)));
6042         // Append the callout statement
6043         impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6044     }
6045 #endif // FEATURE_CORECLR
6046 }
6047
6048 // Checks whether the return types of caller and callee are compatible
6049 // so that callee can be tail called. Note that here we don't check
6050 // compatibility in IL Verifier sense, but on the lines of return type
6051 // sizes are equal and get returned in the same return register.
6052 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6053                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6054                                             var_types            calleeRetType,
6055                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6056 {
6057     // Note that we can not relax this condition with genActualType() as the
6058     // calling convention dictates that the caller of a function with a small
6059     // typed return value is responsible for normalizing the return val.
6060     if (callerRetType == calleeRetType)
6061     {
6062         return true;
6063     }
6064
6065 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6066     // Jit64 compat:
6067     if (callerRetType == TYP_VOID)
6068     {
6069         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6070         //     tail.call
6071         //     pop
6072         //     ret
6073         //
6074         // Note that the above IL pattern is not valid as per IL verification rules.
6075         // Therefore, only full trust code can take advantage of this pattern.
6076         return true;
6077     }
6078
6079     // These checks return true if the return value type sizes are the same and
6080     // get returned in the same return register i.e. caller doesn't need to normalize
6081     // return value. Some of the tail calls permitted by below checks would have
6082     // been rejected by IL Verifier before we reached here.  Therefore, only full
6083     // trust code can make those tail calls.
6084     unsigned callerRetTypeSize = 0;
6085     unsigned calleeRetTypeSize = 0;
6086     bool     isCallerRetTypMBEnreg =
6087         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6088     bool isCalleeRetTypMBEnreg =
6089         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6090
6091     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6092     {
6093         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6094     }
6095 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6096
6097     return false;
6098 }
6099
6100 // For prefixFlags
6101 enum
6102 {
6103     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6104     PREFIX_TAILCALL_IMPLICIT =
6105         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6106     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6107     PREFIX_VOLATILE    = 0x00000100,
6108     PREFIX_UNALIGNED   = 0x00001000,
6109     PREFIX_CONSTRAINED = 0x00010000,
6110     PREFIX_READONLY    = 0x00100000
6111 };
6112
6113 /********************************************************************************
6114  *
6115  * Returns true if the current opcode and and the opcodes following it correspond
6116  * to a supported tail call IL pattern.
6117  *
6118  */
6119 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6120                                       OPCODE      curOpcode,
6121                                       const BYTE* codeAddrOfNextOpcode,
6122                                       const BYTE* codeEnd,
6123                                       bool        isRecursive,
6124                                       bool*       isCallPopAndRet /* = nullptr */)
6125 {
6126     // Bail out if the current opcode is not a call.
6127     if (!impOpcodeIsCallOpcode(curOpcode))
6128     {
6129         return false;
6130     }
6131
6132 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6133     // If shared ret tail opt is not enabled, we will enable
6134     // it for recursive methods.
6135     if (isRecursive)
6136 #endif
6137     {
6138         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6139         // sequence. Make sure we don't go past the end of the IL however.
6140         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6141     }
6142
6143     // Bail out if there is no next opcode after call
6144     if (codeAddrOfNextOpcode >= codeEnd)
6145     {
6146         return false;
6147     }
6148
6149     // Scan the opcodes to look for the following IL patterns if either
6150     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6151     //  ii) if tail prefixed, IL verification is not needed for the method.
6152     //
6153     // Only in the above two cases we can allow the below tail call patterns
6154     // violating ECMA spec.
6155     //
6156     // Pattern1:
6157     //       call
6158     //       nop*
6159     //       ret
6160     //
6161     // Pattern2:
6162     //       call
6163     //       nop*
6164     //       pop
6165     //       nop*
6166     //       ret
6167     int    cntPop = 0;
6168     OPCODE nextOpcode;
6169
6170 #ifdef _TARGET_AMD64_
6171     do
6172     {
6173         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6174         codeAddrOfNextOpcode += sizeof(__int8);
6175     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6176              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6177              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6178                                                                                          // one pop seen so far.
6179 #else
6180     nextOpcode          = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6181 #endif
6182
6183     if (isCallPopAndRet)
6184     {
6185         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6186         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6187     }
6188
6189 #ifdef _TARGET_AMD64_
6190     // Jit64 Compat:
6191     // Tail call IL pattern could be either of the following
6192     // 1) call/callvirt/calli + ret
6193     // 2) call/callvirt/calli + pop + ret in a method returning void.
6194     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6195 #else //!_TARGET_AMD64_
6196     return (nextOpcode == CEE_RET) && (cntPop == 0);
6197 #endif
6198 }
6199
6200 /*****************************************************************************
6201  *
6202  * Determine whether the call could be converted to an implicit tail call
6203  *
6204  */
6205 bool Compiler::impIsImplicitTailCallCandidate(
6206     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6207 {
6208
6209 #if FEATURE_TAILCALL_OPT
6210     if (!opts.compTailCallOpt)
6211     {
6212         return false;
6213     }
6214
6215     if (opts.compDbgCode || opts.MinOpts())
6216     {
6217         return false;
6218     }
6219
6220     // must not be tail prefixed
6221     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6222     {
6223         return false;
6224     }
6225
6226 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6227     // the block containing call is marked as BBJ_RETURN
6228     // We allow shared ret tail call optimization on recursive calls even under
6229     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6230     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6231         return false;
6232 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6233
6234     // must be call+ret or call+pop+ret
6235     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6236     {
6237         return false;
6238     }
6239
6240     return true;
6241 #else
6242     return false;
6243 #endif // FEATURE_TAILCALL_OPT
6244 }
6245
6246 //------------------------------------------------------------------------
6247 // impImportCall: import a call-inspiring opcode
6248 //
6249 // Arguments:
6250 //    opcode                    - opcode that inspires the call
6251 //    pResolvedToken            - resolved token for the call target
6252 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6253 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6254 //    prefixFlags               - IL prefix flags for the call
6255 //    callInfo                  - EE supplied info for the call
6256 //    rawILOffset               - IL offset of the opcode
6257 //
6258 // Returns:
6259 //    Type of the call's return value.
6260 //
6261 // Notes:
6262 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6263 //
6264 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6265 //    uninitalized object.
6266
6267 #ifdef _PREFAST_
6268 #pragma warning(push)
6269 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6270 #endif
6271
6272 var_types Compiler::impImportCall(OPCODE                  opcode,
6273                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6274                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6275                                   GenTreePtr              newobjThis,
6276                                   int                     prefixFlags,
6277                                   CORINFO_CALL_INFO*      callInfo,
6278                                   IL_OFFSET               rawILOffset)
6279 {
6280     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6281
6282     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6283     var_types              callRetTyp                     = TYP_COUNT;
6284     CORINFO_SIG_INFO*      sig                            = nullptr;
6285     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6286     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6287     unsigned               clsFlags                       = 0;
6288     unsigned               mflags                         = 0;
6289     unsigned               argFlags                       = 0;
6290     GenTreePtr             call                           = nullptr;
6291     GenTreeArgList*        args                           = nullptr;
6292     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6293     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6294     BOOL                   exactContextNeedsRuntimeLookup = FALSE;
6295     bool                   canTailCall                    = true;
6296     const char*            szCanTailCallFailReason        = nullptr;
6297     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6298     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6299
6300     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6301     // do that before tailcalls, but that is probably not the intended
6302     // semantic. So just disallow tailcalls from synchronized methods.
6303     // Also, popping arguments in a varargs function is more work and NYI
6304     // If we have a security object, we have to keep our frame around for callers
6305     // to see any imperative security.
6306     if (info.compFlags & CORINFO_FLG_SYNCH)
6307     {
6308         canTailCall             = false;
6309         szCanTailCallFailReason = "Caller is synchronized";
6310     }
6311 #if !FEATURE_FIXED_OUT_ARGS
6312     else if (info.compIsVarArgs)
6313     {
6314         canTailCall             = false;
6315         szCanTailCallFailReason = "Caller is varargs";
6316     }
6317 #endif // FEATURE_FIXED_OUT_ARGS
6318     else if (opts.compNeedSecurityCheck)
6319     {
6320         canTailCall             = false;
6321         szCanTailCallFailReason = "Caller requires a security check.";
6322     }
6323
6324     // We only need to cast the return value of pinvoke inlined calls that return small types
6325
6326     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6327     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6328     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6329     // the time being that the callee might be compiled by the other JIT and thus the return
6330     // value will need to be widened by us (or not widened at all...)
6331
6332     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6333
6334     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6335     bool bIntrinsicImported = false;
6336
6337     CORINFO_SIG_INFO calliSig;
6338     GenTreeArgList*  extraArg = nullptr;
6339
6340     /*-------------------------------------------------------------------------
6341      * First create the call node
6342      */
6343
6344     if (opcode == CEE_CALLI)
6345     {
6346         /* Get the call site sig */
6347         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6348
6349         callRetTyp = JITtype2varType(calliSig.retType);
6350
6351         call = impImportIndirectCall(&calliSig, ilOffset);
6352
6353         // We don't know the target method, so we have to infer the flags, or
6354         // assume the worst-case.
6355         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6356
6357 #ifdef DEBUG
6358         if (verbose)
6359         {
6360             unsigned structSize =
6361                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6362             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6363                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6364         }
6365 #endif
6366         // This should be checked in impImportBlockCode.
6367         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6368
6369         sig = &calliSig;
6370
6371 #ifdef DEBUG
6372         // We cannot lazily obtain the signature of a CALLI call because it has no method
6373         // handle that we can use, so we need to save its full call signature here.
6374         assert(call->gtCall.callSig == nullptr);
6375         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6376         *call->gtCall.callSig = calliSig;
6377 #endif // DEBUG
6378     }
6379     else // (opcode != CEE_CALLI)
6380     {
6381         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6382
6383         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6384         // supply the instantiation parameters necessary to make direct calls to underlying
6385         // shared generic code, rather than calling through instantiating stubs.  If the
6386         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6387         // must indeed pass an instantiation parameter.
6388
6389         methHnd = callInfo->hMethod;
6390
6391         sig        = &(callInfo->sig);
6392         callRetTyp = JITtype2varType(sig->retType);
6393
6394         mflags = callInfo->methodFlags;
6395
6396 #ifdef DEBUG
6397         if (verbose)
6398         {
6399             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6400             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6401                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6402         }
6403 #endif
6404         if (compIsForInlining())
6405         {
6406             /* Does this call site have security boundary restrictions? */
6407
6408             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6409             {
6410                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6411                 return callRetTyp;
6412             }
6413
6414             /* Does the inlinee need a security check token on the frame */
6415
6416             if (mflags & CORINFO_FLG_SECURITYCHECK)
6417             {
6418                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6419                 return callRetTyp;
6420             }
6421
6422             /* Does the inlinee use StackCrawlMark */
6423
6424             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6425             {
6426                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6427                 return callRetTyp;
6428             }
6429
6430             /* For now ignore delegate invoke */
6431
6432             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6433             {
6434                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6435                 return callRetTyp;
6436             }
6437
6438             /* For now ignore varargs */
6439             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6440             {
6441                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6442                 return callRetTyp;
6443             }
6444
6445             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6446             {
6447                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6448                 return callRetTyp;
6449             }
6450
6451             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6452             {
6453                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6454                 return callRetTyp;
6455             }
6456         }
6457
6458         clsHnd = pResolvedToken->hClass;
6459
6460         clsFlags = callInfo->classFlags;
6461
6462 #ifdef DEBUG
6463         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6464
6465         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6466         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6467         const char* modName;
6468         const char* className;
6469         const char* methodName;
6470         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6471             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6472             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6473         {
6474             return impImportJitTestLabelMark(sig->numArgs);
6475         }
6476 #endif // DEBUG
6477
6478         // <NICE> Factor this into getCallInfo </NICE>
6479         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6480         {
6481             call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6482                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6483
6484             if (call != nullptr)
6485             {
6486                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6487                        (clsFlags & CORINFO_FLG_FINAL));
6488
6489 #ifdef FEATURE_READYTORUN_COMPILER
6490                 if (call->OperGet() == GT_INTRINSIC)
6491                 {
6492                     if (opts.IsReadyToRun())
6493                     {
6494                         noway_assert(callInfo->kind == CORINFO_CALL);
6495                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6496                     }
6497                     else
6498                     {
6499                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6500                     }
6501                 }
6502 #endif
6503
6504                 bIntrinsicImported = true;
6505                 goto DONE_CALL;
6506             }
6507         }
6508
6509 #ifdef FEATURE_SIMD
6510         if (featureSIMD)
6511         {
6512             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6513             if (call != nullptr)
6514             {
6515                 bIntrinsicImported = true;
6516                 goto DONE_CALL;
6517             }
6518         }
6519 #endif // FEATURE_SIMD
6520
6521         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6522         {
6523             NO_WAY("Virtual call to a function added via EnC is not supported");
6524             goto DONE_CALL;
6525         }
6526
6527         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6528             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6529             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6530         {
6531             BADCODE("Bad calling convention");
6532         }
6533
6534         //-------------------------------------------------------------------------
6535         //  Construct the call node
6536         //
6537         // Work out what sort of call we're making.
6538         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6539
6540         constraintCallThisTransform = callInfo->thisTransform;
6541
6542         exactContextHnd                = callInfo->contextHandle;
6543         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6544
6545         // Recursive call is treaded as a loop to the begining of the method.
6546         if (methHnd == info.compMethodHnd)
6547         {
6548 #ifdef DEBUG
6549             if (verbose)
6550             {
6551                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6552                         fgFirstBB->bbNum, compCurBB->bbNum);
6553             }
6554 #endif
6555             fgMarkBackwardJump(fgFirstBB, compCurBB);
6556         }
6557
6558         switch (callInfo->kind)
6559         {
6560
6561             case CORINFO_VIRTUALCALL_STUB:
6562             {
6563                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6564                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6565                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6566                 {
6567
6568                     if (compIsForInlining())
6569                     {
6570                         // Don't import runtime lookups when inlining
6571                         // Inlining has to be aborted in such a case
6572                         /* XXX Fri 3/20/2009
6573                          * By the way, this would never succeed.  If the handle lookup is into the generic
6574                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6575                          * inlined code will crash.
6576                          *
6577                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6578                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6579                          * failing here.
6580                          */
6581                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6582                         return callRetTyp;
6583                     }
6584
6585                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6586                     assert(!compDonotInline());
6587
6588                     // This is the rough code to set up an indirect stub call
6589                     assert(stubAddr != nullptr);
6590
6591                     // The stubAddr may be a
6592                     // complex expression. As it is evaluated after the args,
6593                     // it may cause registered args to be spilled. Simply spill it.
6594
6595                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6596                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6597                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6598
6599                     // Create the actual call node
6600
6601                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6602                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6603
6604                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6605
6606                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6607                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6608
6609 #ifdef _TARGET_X86_
6610                     // No tailcalls allowed for these yet...
6611                     canTailCall             = false;
6612                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6613 #endif
6614                 }
6615                 else
6616                 {
6617                     // ok, the stub is available at compile type.
6618
6619                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6620                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6621                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6622                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6623                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6624                     {
6625                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6626                     }
6627                 }
6628
6629 #ifdef FEATURE_READYTORUN_COMPILER
6630                 if (opts.IsReadyToRun())
6631                 {
6632                     // Null check is sometimes needed for ready to run to handle
6633                     // non-virtual <-> virtual changes between versions
6634                     if (callInfo->nullInstanceCheck)
6635                     {
6636                         call->gtFlags |= GTF_CALL_NULLCHECK;
6637                     }
6638                 }
6639 #endif
6640
6641                 break;
6642             }
6643
6644             case CORINFO_VIRTUALCALL_VTABLE:
6645             {
6646                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6647                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6648                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6649                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6650                 break;
6651             }
6652
6653             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6654             {
6655                 if (compIsForInlining())
6656                 {
6657                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6658                     return callRetTyp;
6659                 }
6660
6661                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6662                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6663                 // OK, We've been told to call via LDVIRTFTN, so just
6664                 // take the call now....
6665
6666                 args = impPopList(sig->numArgs, &argFlags, sig);
6667
6668                 GenTreePtr thisPtr = impPopStack().val;
6669                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6670                 if (compDonotInline())
6671                 {
6672                     return callRetTyp;
6673                 }
6674
6675                 // Clone the (possibly transformed) "this" pointer
6676                 GenTreePtr thisPtrCopy;
6677                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6678                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6679
6680                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6681                 if (compDonotInline())
6682                 {
6683                     return callRetTyp;
6684                 }
6685
6686                 thisPtr = nullptr; // can't reuse it
6687
6688                 // Now make an indirect call through the function pointer
6689
6690                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6691                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6692                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6693
6694                 // Create the actual call node
6695
6696                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6697                 call->gtCall.gtCallObjp = thisPtrCopy;
6698                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6699
6700 #ifdef FEATURE_READYTORUN_COMPILER
6701                 if (opts.IsReadyToRun())
6702                 {
6703                     // Null check is needed for ready to run to handle
6704                     // non-virtual <-> virtual changes between versions
6705                     call->gtFlags |= GTF_CALL_NULLCHECK;
6706                 }
6707 #endif
6708
6709                 // Sine we are jumping over some code, check that its OK to skip that code
6710                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6711                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6712                 goto DONE;
6713             }
6714
6715             case CORINFO_CALL:
6716             {
6717                 // This is for a non-virtual, non-interface etc. call
6718                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6719
6720                 // We remove the nullcheck for the GetType call instrinsic.
6721                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6722                 // and instrinsics.
6723                 if (callInfo->nullInstanceCheck &&
6724                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6725                 {
6726                     call->gtFlags |= GTF_CALL_NULLCHECK;
6727                 }
6728
6729 #ifdef FEATURE_READYTORUN_COMPILER
6730                 if (opts.IsReadyToRun())
6731                 {
6732                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6733                 }
6734 #endif
6735                 break;
6736             }
6737
6738             case CORINFO_CALL_CODE_POINTER:
6739             {
6740                 // The EE has asked us to call by computing a code pointer and then doing an
6741                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6742
6743                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6744                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6745
6746                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6747                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6748
6749                 GenTreePtr fptr =
6750                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6751
6752                 if (compDonotInline())
6753                 {
6754                     return callRetTyp;
6755                 }
6756
6757                 // Now make an indirect call through the function pointer
6758
6759                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6760                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6761                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6762
6763                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6764                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6765                 if (callInfo->nullInstanceCheck)
6766                 {
6767                     call->gtFlags |= GTF_CALL_NULLCHECK;
6768                 }
6769
6770                 break;
6771             }
6772
6773             default:
6774                 assert(!"unknown call kind");
6775                 break;
6776         }
6777
6778         //-------------------------------------------------------------------------
6779         // Set more flags
6780
6781         PREFIX_ASSUME(call != nullptr);
6782
6783         if (mflags & CORINFO_FLG_NOGCCHECK)
6784         {
6785             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6786         }
6787
6788         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6789         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6790             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6791             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6792         {
6793             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6794         }
6795     }
6796     assert(sig);
6797     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6798
6799     /* Some sanity checks */
6800
6801     // CALL_VIRT and NEWOBJ must have a THIS pointer
6802     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6803     // static bit and hasThis are negations of one another
6804     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6805     assert(call != nullptr);
6806
6807     /*-------------------------------------------------------------------------
6808      * Check special-cases etc
6809      */
6810
6811     /* Special case - Check if it is a call to Delegate.Invoke(). */
6812
6813     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6814     {
6815         assert(!compIsForInlining());
6816         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6817         assert(mflags & CORINFO_FLG_FINAL);
6818
6819         /* Set the delegate flag */
6820         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6821
6822         if (callInfo->secureDelegateInvoke)
6823         {
6824             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6825         }
6826
6827         if (opcode == CEE_CALLVIRT)
6828         {
6829             assert(mflags & CORINFO_FLG_FINAL);
6830
6831             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6832             assert(call->gtFlags & GTF_CALL_NULLCHECK);
6833             call->gtFlags &= ~GTF_CALL_NULLCHECK;
6834         }
6835     }
6836
6837     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6838     actualMethodRetTypeSigClass = sig->retTypeSigClass;
6839     if (varTypeIsStruct(callRetTyp))
6840     {
6841         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
6842         call->gtType = callRetTyp;
6843     }
6844
6845 #if !FEATURE_VARARG
6846     /* Check for varargs */
6847     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6848         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6849     {
6850         BADCODE("Varargs not supported.");
6851     }
6852 #endif // !FEATURE_VARARG
6853
6854     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6855         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6856     {
6857         assert(!compIsForInlining());
6858
6859         /* Set the right flags */
6860
6861         call->gtFlags |= GTF_CALL_POP_ARGS;
6862         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6863
6864         /* Can't allow tailcall for varargs as it is caller-pop. The caller
6865            will be expecting to pop a certain number of arguments, but if we
6866            tailcall to a function with a different number of arguments, we
6867            are hosed. There are ways around this (caller remembers esp value,
6868            varargs is not caller-pop, etc), but not worth it. */
6869         CLANG_FORMAT_COMMENT_ANCHOR;
6870
6871 #ifdef _TARGET_X86_
6872         if (canTailCall)
6873         {
6874             canTailCall             = false;
6875             szCanTailCallFailReason = "Callee is varargs";
6876         }
6877 #endif
6878
6879         /* Get the total number of arguments - this is already correct
6880          * for CALLI - for methods we have to get it from the call site */
6881
6882         if (opcode != CEE_CALLI)
6883         {
6884 #ifdef DEBUG
6885             unsigned numArgsDef = sig->numArgs;
6886 #endif
6887             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6888
6889 #ifdef DEBUG
6890             // We cannot lazily obtain the signature of a vararg call because using its method
6891             // handle will give us only the declared argument list, not the full argument list.
6892             assert(call->gtCall.callSig == nullptr);
6893             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6894             *call->gtCall.callSig = *sig;
6895 #endif
6896
6897             // For vararg calls we must be sure to load the return type of the
6898             // method actually being called, as well as the return types of the
6899             // specified in the vararg signature. With type equivalency, these types
6900             // may not be the same.
6901             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6902             {
6903                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6904                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6905                     sig->retType != CORINFO_TYPE_VAR)
6906                 {
6907                     // Make sure that all valuetypes (including enums) that we push are loaded.
6908                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6909                     // all valuetypes in the method signature are already loaded.
6910                     // We need to be able to find the size of the valuetypes, but we cannot
6911                     // do a class-load from within GC.
6912                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6913                 }
6914             }
6915
6916             assert(numArgsDef <= sig->numArgs);
6917         }
6918
6919         /* We will have "cookie" as the last argument but we cannot push
6920          * it on the operand stack because we may overflow, so we append it
6921          * to the arg list next after we pop them */
6922     }
6923
6924     if (mflags & CORINFO_FLG_SECURITYCHECK)
6925     {
6926         assert(!compIsForInlining());
6927
6928         // Need security prolog/epilog callouts when there is
6929         // imperative security in the method. This is to give security a
6930         // chance to do any setup in the prolog and cleanup in the epilog if needed.
6931
6932         if (compIsForInlining())
6933         {
6934             // Cannot handle this if the method being imported is an inlinee by itself.
6935             // Because inlinee method does not have its own frame.
6936
6937             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6938             return callRetTyp;
6939         }
6940         else
6941         {
6942             tiSecurityCalloutNeeded = true;
6943
6944             // If the current method calls a method which needs a security check,
6945             // (i.e. the method being compiled has imperative security)
6946             // we need to reserve a slot for the security object in
6947             // the current method's stack frame
6948             opts.compNeedSecurityCheck = true;
6949         }
6950     }
6951
6952     //--------------------------- Inline NDirect ------------------------------
6953
6954     // For inline cases we technically should look at both the current
6955     // block and the call site block (or just the latter if we've
6956     // fused the EH trees). However the block-related checks pertain to
6957     // EH and we currently won't inline a method with EH. So for
6958     // inlinees, just checking the call site block is sufficient.
6959     {
6960         // New lexical block here to avoid compilation errors because of GOTOs.
6961         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
6962         impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
6963     }
6964
6965     if (call->gtFlags & GTF_CALL_UNMANAGED)
6966     {
6967         // We set up the unmanaged call by linking the frame, disabling GC, etc
6968         // This needs to be cleaned up on return
6969         if (canTailCall)
6970         {
6971             canTailCall             = false;
6972             szCanTailCallFailReason = "Callee is native";
6973         }
6974
6975         checkForSmallType = true;
6976
6977         impPopArgsForUnmanagedCall(call, sig);
6978
6979         goto DONE;
6980     }
6981     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
6982                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
6983                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
6984                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
6985     {
6986         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
6987         {
6988             // Normally this only happens with inlining.
6989             // However, a generic method (or type) being NGENd into another module
6990             // can run into this issue as well.  There's not an easy fall-back for NGEN
6991             // so instead we fallback to JIT.
6992             if (compIsForInlining())
6993             {
6994                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
6995             }
6996             else
6997             {
6998                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
6999             }
7000
7001             return callRetTyp;
7002         }
7003
7004         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7005
7006         // This cookie is required to be either a simple GT_CNS_INT or
7007         // an indirection of a GT_CNS_INT
7008         //
7009         GenTreePtr cookieConst = cookie;
7010         if (cookie->gtOper == GT_IND)
7011         {
7012             cookieConst = cookie->gtOp.gtOp1;
7013         }
7014         assert(cookieConst->gtOper == GT_CNS_INT);
7015
7016         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7017         // we won't allow this tree to participate in any CSE logic
7018         //
7019         cookie->gtFlags |= GTF_DONT_CSE;
7020         cookieConst->gtFlags |= GTF_DONT_CSE;
7021
7022         call->gtCall.gtCallCookie = cookie;
7023
7024         if (canTailCall)
7025         {
7026             canTailCall             = false;
7027             szCanTailCallFailReason = "PInvoke calli";
7028         }
7029     }
7030
7031     /*-------------------------------------------------------------------------
7032      * Create the argument list
7033      */
7034
7035     //-------------------------------------------------------------------------
7036     // Special case - for varargs we have an implicit last argument
7037
7038     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7039     {
7040         assert(!compIsForInlining());
7041
7042         void *varCookie, *pVarCookie;
7043         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7044         {
7045             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7046             return callRetTyp;
7047         }
7048
7049         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7050         assert((!varCookie) != (!pVarCookie));
7051         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7052
7053         assert(extraArg == nullptr);
7054         extraArg = gtNewArgList(cookie);
7055     }
7056
7057     //-------------------------------------------------------------------------
7058     // Extra arg for shared generic code and array methods
7059     //
7060     // Extra argument containing instantiation information is passed in the
7061     // following circumstances:
7062     // (a) To the "Address" method on array classes; the extra parameter is
7063     //     the array's type handle (a TypeDesc)
7064     // (b) To shared-code instance methods in generic structs; the extra parameter
7065     //     is the struct's type handle (a vtable ptr)
7066     // (c) To shared-code per-instantiation non-generic static methods in generic
7067     //     classes and structs; the extra parameter is the type handle
7068     // (d) To shared-code generic methods; the extra parameter is an
7069     //     exact-instantiation MethodDesc
7070     //
7071     // We also set the exact type context associated with the call so we can
7072     // inline the call correctly later on.
7073
7074     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7075     {
7076         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7077         if (clsHnd == nullptr)
7078         {
7079             NO_WAY("CALLI on parameterized type");
7080         }
7081
7082         assert(opcode != CEE_CALLI);
7083
7084         GenTreePtr instParam;
7085         BOOL       runtimeLookup;
7086
7087         // Instantiated generic method
7088         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7089         {
7090             CORINFO_METHOD_HANDLE exactMethodHandle =
7091                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7092
7093             if (!exactContextNeedsRuntimeLookup)
7094             {
7095 #ifdef FEATURE_READYTORUN_COMPILER
7096                 if (opts.IsReadyToRun())
7097                 {
7098                     instParam =
7099                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7100                     if (instParam == nullptr)
7101                     {
7102                         return callRetTyp;
7103                     }
7104                 }
7105                 else
7106 #endif
7107                 {
7108                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7109                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7110                 }
7111             }
7112             else
7113             {
7114                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7115                 if (instParam == nullptr)
7116                 {
7117                     return callRetTyp;
7118                 }
7119             }
7120         }
7121
7122         // otherwise must be an instance method in a generic struct,
7123         // a static method in a generic type, or a runtime-generated array method
7124         else
7125         {
7126             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7127             CORINFO_CLASS_HANDLE exactClassHandle =
7128                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7129
7130             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7131             {
7132                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7133                 return callRetTyp;
7134             }
7135
7136             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7137             {
7138                 // We indicate "readonly" to the Address operation by using a null
7139                 // instParam.
7140                 instParam = gtNewIconNode(0, TYP_REF);
7141             }
7142
7143             if (!exactContextNeedsRuntimeLookup)
7144             {
7145 #ifdef FEATURE_READYTORUN_COMPILER
7146                 if (opts.IsReadyToRun())
7147                 {
7148                     instParam =
7149                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7150                     if (instParam == nullptr)
7151                     {
7152                         return callRetTyp;
7153                     }
7154                 }
7155                 else
7156 #endif
7157                 {
7158                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7159                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7160                 }
7161             }
7162             else
7163             {
7164                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7165                 if (instParam == nullptr)
7166                 {
7167                     return callRetTyp;
7168                 }
7169             }
7170         }
7171
7172         assert(extraArg == nullptr);
7173         extraArg = gtNewArgList(instParam);
7174     }
7175
7176     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7177     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7178     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7179     // exactContextHnd is not currently required when inlining shared generic code into shared
7180     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7181     // (e.g. anything marked needsRuntimeLookup)
7182     if (exactContextNeedsRuntimeLookup)
7183     {
7184         exactContextHnd = nullptr;
7185     }
7186
7187     //-------------------------------------------------------------------------
7188     // The main group of arguments
7189
7190     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7191
7192     if (args)
7193     {
7194         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7195     }
7196
7197     //-------------------------------------------------------------------------
7198     // The "this" pointer
7199
7200     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7201     {
7202         GenTreePtr obj;
7203
7204         if (opcode == CEE_NEWOBJ)
7205         {
7206             obj = newobjThis;
7207         }
7208         else
7209         {
7210             obj = impPopStack().val;
7211             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7212             if (compDonotInline())
7213             {
7214                 return callRetTyp;
7215             }
7216         }
7217
7218         /* Is this a virtual or interface call? */
7219
7220         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7221         {
7222             /* only true object pointers can be virtual */
7223
7224             assert(obj->gtType == TYP_REF);
7225         }
7226         else
7227         {
7228             if (impIsThis(obj))
7229             {
7230                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7231             }
7232         }
7233
7234         /* Store the "this" value in the call */
7235
7236         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7237         call->gtCall.gtCallObjp = obj;
7238     }
7239
7240     //-------------------------------------------------------------------------
7241     // The "this" pointer for "newobj"
7242
7243     if (opcode == CEE_NEWOBJ)
7244     {
7245         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7246         {
7247             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7248             // This is a 'new' of a variable sized object, wher
7249             // the constructor is to return the object.  In this case
7250             // the constructor claims to return VOID but we know it
7251             // actually returns the new object
7252             assert(callRetTyp == TYP_VOID);
7253             callRetTyp   = TYP_REF;
7254             call->gtType = TYP_REF;
7255             impSpillSpecialSideEff();
7256
7257             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7258         }
7259         else
7260         {
7261             if (clsFlags & CORINFO_FLG_DELEGATE)
7262             {
7263                 // New inliner morph it in impImportCall.
7264                 // This will allow us to inline the call to the delegate constructor.
7265                 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7266             }
7267
7268             if (!bIntrinsicImported)
7269             {
7270
7271 #if defined(DEBUG) || defined(INLINE_DATA)
7272
7273                 // Keep track of the raw IL offset of the call
7274                 call->gtCall.gtRawILOffset = rawILOffset;
7275
7276 #endif // defined(DEBUG) || defined(INLINE_DATA)
7277
7278                 // Is it an inline candidate?
7279                 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7280             }
7281
7282             // append the call node.
7283             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7284
7285             // Now push the value of the 'new onto the stack
7286
7287             // This is a 'new' of a non-variable sized object.
7288             // Append the new node (op1) to the statement list,
7289             // and then push the local holding the value of this
7290             // new instruction on the stack.
7291
7292             if (clsFlags & CORINFO_FLG_VALUECLASS)
7293             {
7294                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7295
7296                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7297                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7298             }
7299             else
7300             {
7301                 if (newobjThis->gtOper == GT_COMMA)
7302                 {
7303                     // In coreclr the callout can be inserted even if verification is disabled
7304                     // so we cannot rely on tiVerificationNeeded alone
7305
7306                     // We must have inserted the callout. Get the real newobj.
7307                     newobjThis = newobjThis->gtOp.gtOp2;
7308                 }
7309
7310                 assert(newobjThis->gtOper == GT_LCL_VAR);
7311                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7312             }
7313         }
7314         return callRetTyp;
7315     }
7316
7317 DONE:
7318
7319     if (tailCall)
7320     {
7321         // This check cannot be performed for implicit tail calls for the reason
7322         // that impIsImplicitTailCallCandidate() is not checking whether return
7323         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7324         // As a result it is possible that in the following case, we find that
7325         // the type stack is non-empty if Callee() is considered for implicit
7326         // tail calling.
7327         //      int Caller(..) { .... void Callee(); ret val; ... }
7328         //
7329         // Note that we cannot check return type compatibility before ImpImportCall()
7330         // as we don't have required info or need to duplicate some of the logic of
7331         // ImpImportCall().
7332         //
7333         // For implicit tail calls, we perform this check after return types are
7334         // known to be compatible.
7335         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7336         {
7337             BADCODE("Stack should be empty after tailcall");
7338         }
7339
7340         // Note that we can not relax this condition with genActualType() as
7341         // the calling convention dictates that the caller of a function with
7342         // a small-typed return value is responsible for normalizing the return val
7343
7344         if (canTailCall &&
7345             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7346                                           callInfo->sig.retTypeClass))
7347         {
7348             canTailCall             = false;
7349             szCanTailCallFailReason = "Return types are not tail call compatible";
7350         }
7351
7352         // Stack empty check for implicit tail calls.
7353         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7354         {
7355 #ifdef _TARGET_AMD64_
7356             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7357             // in JIT64, not an InvalidProgramException.
7358             Verify(false, "Stack should be empty after tailcall");
7359 #else  // _TARGET_64BIT_
7360             BADCODE("Stack should be empty after tailcall");
7361 #endif //!_TARGET_64BIT_
7362         }
7363
7364         // assert(compCurBB is not a catch, finally or filter block);
7365         // assert(compCurBB is not a try block protected by a finally block);
7366
7367         // Check for permission to tailcall
7368         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7369
7370         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7371
7372         if (canTailCall)
7373         {
7374             // True virtual or indirect calls, shouldn't pass in a callee handle.
7375             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7376                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7377                                                        ? nullptr
7378                                                        : methHnd;
7379             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7380
7381             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7382             {
7383                 canTailCall = true;
7384                 if (explicitTailCall)
7385                 {
7386                     // In case of explicit tail calls, mark it so that it is not considered
7387                     // for in-lining.
7388                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7389 #ifdef DEBUG
7390                     if (verbose)
7391                     {
7392                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7393                         printTreeID(call);
7394                         printf("\n");
7395                     }
7396 #endif
7397                 }
7398                 else
7399                 {
7400 #if FEATURE_TAILCALL_OPT
7401                     // Must be an implicit tail call.
7402                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7403
7404                     // It is possible that a call node is both an inline candidate and marked
7405                     // for opportunistic tail calling.  In-lining happens before morhphing of
7406                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7407                     // reason, it will survive to the morphing stage at which point it will be
7408                     // transformed into a tail call after performing additional checks.
7409
7410                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7411 #ifdef DEBUG
7412                     if (verbose)
7413                     {
7414                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7415                         printTreeID(call);
7416                         printf("\n");
7417                     }
7418 #endif
7419
7420 #else //! FEATURE_TAILCALL_OPT
7421                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7422
7423 #endif // FEATURE_TAILCALL_OPT
7424                 }
7425
7426                 // we can't report success just yet...
7427             }
7428             else
7429             {
7430                 canTailCall = false;
7431 // canTailCall reported its reasons already
7432 #ifdef DEBUG
7433                 if (verbose)
7434                 {
7435                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7436                     printTreeID(call);
7437                     printf("\n");
7438                 }
7439 #endif
7440             }
7441         }
7442         else
7443         {
7444             // If this assert fires it means that canTailCall was set to false without setting a reason!
7445             assert(szCanTailCallFailReason != nullptr);
7446
7447 #ifdef DEBUG
7448             if (verbose)
7449             {
7450                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7451                 printTreeID(call);
7452                 printf(": %s\n", szCanTailCallFailReason);
7453             }
7454 #endif
7455             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7456                                                      szCanTailCallFailReason);
7457         }
7458     }
7459
7460 // Note: we assume that small return types are already normalized by the managed callee
7461 // or by the pinvoke stub for calls to unmanaged code.
7462
7463 DONE_CALL:
7464
7465     if (!bIntrinsicImported)
7466     {
7467         //
7468         // Things needed to be checked when bIntrinsicImported is false.
7469         //
7470
7471         assert(call->gtOper == GT_CALL);
7472         assert(sig != nullptr);
7473
7474         // Tail calls require us to save the call site's sig info so we can obtain an argument
7475         // copying thunk from the EE later on.
7476         if (call->gtCall.callSig == nullptr)
7477         {
7478             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7479             *call->gtCall.callSig = *sig;
7480         }
7481
7482         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7483         {
7484             GenTreePtr callObj = call->gtCall.gtCallObjp;
7485             assert(callObj != nullptr);
7486
7487             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7488
7489             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7490                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7491                                                                    impInlineInfo->inlArgInfo))
7492             {
7493                 impInlineInfo->thisDereferencedFirst = true;
7494             }
7495         }
7496
7497 #if defined(DEBUG) || defined(INLINE_DATA)
7498
7499         // Keep track of the raw IL offset of the call
7500         call->gtCall.gtRawILOffset = rawILOffset;
7501
7502 #endif // defined(DEBUG) || defined(INLINE_DATA)
7503
7504         // Is it an inline candidate?
7505         impMarkInlineCandidate(call, exactContextHnd, callInfo);
7506     }
7507
7508     // Push or append the result of the call
7509     if (callRetTyp == TYP_VOID)
7510     {
7511         if (opcode == CEE_NEWOBJ)
7512         {
7513             // we actually did push something, so don't spill the thing we just pushed.
7514             assert(verCurrentState.esStackDepth > 0);
7515             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7516         }
7517         else
7518         {
7519             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7520         }
7521     }
7522     else
7523     {
7524         impSpillSpecialSideEff();
7525
7526         if (clsFlags & CORINFO_FLG_ARRAY)
7527         {
7528             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7529         }
7530
7531         // Find the return type used for verification by interpreting the method signature.
7532         // NB: we are clobbering the already established sig.
7533         if (tiVerificationNeeded)
7534         {
7535             // Actually, we never get the sig for the original method.
7536             sig = &(callInfo->verSig);
7537         }
7538
7539         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7540         tiRetVal.NormaliseForStack();
7541
7542         // The CEE_READONLY prefix modifies the verification semantics of an Address
7543         // operation on an array type.
7544         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7545         {
7546             tiRetVal.SetIsReadonlyByRef();
7547         }
7548
7549         if (tiVerificationNeeded)
7550         {
7551             // We assume all calls return permanent home byrefs. If they
7552             // didn't they wouldn't be verifiable. This is also covering
7553             // the Address() helper for multidimensional arrays.
7554             if (tiRetVal.IsByRef())
7555             {
7556                 tiRetVal.SetIsPermanentHomeByRef();
7557             }
7558         }
7559
7560         if (call->gtOper == GT_CALL)
7561         {
7562             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7563             if (varTypeIsStruct(callRetTyp))
7564             {
7565                 call = impFixupCallStructReturn(call, sig->retTypeClass);
7566             }
7567
7568             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7569             {
7570                 assert(opts.OptEnabled(CLFLG_INLINING));
7571
7572                 // Make the call its own tree (spill the stack if needed).
7573                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7574
7575                 // TODO: Still using the widened type.
7576                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7577             }
7578             else
7579             {
7580                 // For non-candidates we must also spill, since we
7581                 // might have locals live on the eval stack that this
7582                 // call can modify.
7583                 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7584             }
7585         }
7586
7587         if (!bIntrinsicImported)
7588         {
7589             //-------------------------------------------------------------------------
7590             //
7591             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7592                 before returning.
7593                 However, we need to normalize small type values returned by unmanaged
7594                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7595                 if we use the shorter inlined pinvoke stub. */
7596
7597             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7598             {
7599                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7600             }
7601         }
7602
7603         impPushOnStack(call, tiRetVal);
7604     }
7605
7606     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7607     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7608     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7609     //  callInfoCache.uncacheCallInfo();
7610
7611     return callRetTyp;
7612 }
7613 #ifdef _PREFAST_
7614 #pragma warning(pop)
7615 #endif
7616
7617 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7618 {
7619     CorInfoType corType = methInfo->args.retType;
7620
7621     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7622     {
7623         // We have some kind of STRUCT being returned
7624
7625         structPassingKind howToReturnStruct = SPK_Unknown;
7626
7627         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7628
7629         if (howToReturnStruct == SPK_ByReference)
7630         {
7631             return true;
7632         }
7633     }
7634
7635     return false;
7636 }
7637
7638 #ifdef DEBUG
7639 //
7640 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7641 {
7642     TestLabelAndNum tlAndN;
7643     if (numArgs == 2)
7644     {
7645         tlAndN.m_num  = 0;
7646         StackEntry se = impPopStack();
7647         assert(se.seTypeInfo.GetType() == TI_INT);
7648         GenTreePtr val = se.val;
7649         assert(val->IsCnsIntOrI());
7650         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7651     }
7652     else if (numArgs == 3)
7653     {
7654         StackEntry se = impPopStack();
7655         assert(se.seTypeInfo.GetType() == TI_INT);
7656         GenTreePtr val = se.val;
7657         assert(val->IsCnsIntOrI());
7658         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7659         se           = impPopStack();
7660         assert(se.seTypeInfo.GetType() == TI_INT);
7661         val = se.val;
7662         assert(val->IsCnsIntOrI());
7663         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7664     }
7665     else
7666     {
7667         assert(false);
7668     }
7669
7670     StackEntry expSe = impPopStack();
7671     GenTreePtr node  = expSe.val;
7672
7673     // There are a small number of special cases, where we actually put the annotation on a subnode.
7674     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7675     {
7676         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7677         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7678         // offset within the the static field block whose address is returned by the helper call.
7679         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7680         GenTreePtr helperCall = nullptr;
7681         assert(node->OperGet() == GT_IND);
7682         tlAndN.m_num -= 100;
7683         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7684         GetNodeTestData()->Remove(node);
7685     }
7686     else
7687     {
7688         GetNodeTestData()->Set(node, tlAndN);
7689     }
7690
7691     impPushOnStack(node, expSe.seTypeInfo);
7692     return node->TypeGet();
7693 }
7694 #endif // DEBUG
7695
7696 //-----------------------------------------------------------------------------------
7697 //  impFixupCallStructReturn: For a call node that returns a struct type either
7698 //  adjust the return type to an enregisterable type, or set the flag to indicate
7699 //  struct return via retbuf arg.
7700 //
7701 //  Arguments:
7702 //    call       -  GT_CALL GenTree node
7703 //    retClsHnd  -  Class handle of return type of the call
7704 //
7705 //  Return Value:
7706 //    Returns new GenTree node after fixing struct return of call node
7707 //
7708 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7709 {
7710     assert(call->gtOper == GT_CALL);
7711
7712     if (!varTypeIsStruct(call))
7713     {
7714         return call;
7715     }
7716
7717     call->gtCall.gtRetClsHnd = retClsHnd;
7718
7719     GenTreeCall* callNode = call->AsCall();
7720
7721 #if FEATURE_MULTIREG_RET
7722     // Initialize Return type descriptor of call node
7723     ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7724     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7725 #endif // FEATURE_MULTIREG_RET
7726
7727 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7728
7729     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7730     assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7731
7732     // The return type will remain as the incoming struct type unless normalized to a
7733     // single eightbyte return type below.
7734     callNode->gtReturnType = call->gtType;
7735
7736     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7737     if (retRegCount != 0)
7738     {
7739         if (retRegCount == 1)
7740         {
7741             // struct returned in a single register
7742             callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7743         }
7744         else
7745         {
7746             // must be a struct returned in two registers
7747             assert(retRegCount == 2);
7748
7749             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7750             {
7751                 // Force a call returning multi-reg struct to be always of the IR form
7752                 //   tmp = call
7753                 //
7754                 // No need to assign a multi-reg struct to a local var if:
7755                 //  - It is a tail call or
7756                 //  - The call is marked for in-lining later
7757                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7758             }
7759         }
7760     }
7761     else
7762     {
7763         // struct not returned in registers i.e returned via hiddden retbuf arg.
7764         callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7765     }
7766
7767 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7768
7769 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7770     // There is no fixup necessary if the return type is a HFA struct.
7771     // HFA structs are returned in registers for ARM32 and ARM64
7772     //
7773     if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7774     {
7775         if (call->gtCall.CanTailCall())
7776         {
7777             if (info.compIsVarArgs)
7778             {
7779                 // We cannot tail call because control needs to return to fixup the calling
7780                 // convention for result return.
7781                 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7782             }
7783             else
7784             {
7785                 // If we can tail call returning HFA, then don't assign it to
7786                 // a variable back and forth.
7787                 return call;
7788             }
7789         }
7790
7791         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7792         {
7793             return call;
7794         }
7795
7796         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7797         if (retRegCount >= 2)
7798         {
7799             return impAssignMultiRegTypeToVar(call, retClsHnd);
7800         }
7801     }
7802 #endif // _TARGET_ARM_
7803
7804     // Check for TYP_STRUCT type that wraps a primitive type
7805     // Such structs are returned using a single register
7806     // and we change the return type on those calls here.
7807     //
7808     structPassingKind howToReturnStruct;
7809     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7810
7811     if (howToReturnStruct == SPK_ByReference)
7812     {
7813         assert(returnType == TYP_UNKNOWN);
7814         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7815     }
7816     else
7817     {
7818         assert(returnType != TYP_UNKNOWN);
7819         call->gtCall.gtReturnType = returnType;
7820
7821         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7822         if ((returnType == TYP_LONG) && (compLongUsed == false))
7823         {
7824             compLongUsed = true;
7825         }
7826         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7827         {
7828             compFloatingPointUsed = true;
7829         }
7830
7831 #if FEATURE_MULTIREG_RET
7832         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7833         assert(retRegCount != 0);
7834
7835         if (retRegCount >= 2)
7836         {
7837             if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7838             {
7839                 // Force a call returning multi-reg struct to be always of the IR form
7840                 //   tmp = call
7841                 //
7842                 // No need to assign a multi-reg struct to a local var if:
7843                 //  - It is a tail call or
7844                 //  - The call is marked for in-lining later
7845                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7846             }
7847         }
7848 #endif // FEATURE_MULTIREG_RET
7849     }
7850
7851 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7852
7853     return call;
7854 }
7855
7856 /*****************************************************************************
7857    For struct return values, re-type the operand in the case where the ABI
7858    does not use a struct return buffer
7859    Note that this method is only call for !_TARGET_X86_
7860  */
7861
7862 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7863 {
7864     assert(varTypeIsStruct(info.compRetType));
7865     assert(info.compRetBuffArg == BAD_VAR_NUM);
7866
7867 #if defined(_TARGET_XARCH_)
7868
7869 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7870     // No VarArgs for CoreCLR on x64 Unix
7871     assert(!info.compIsVarArgs);
7872
7873     // Is method returning a multi-reg struct?
7874     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7875     {
7876         // In case of multi-reg struct return, we force IR to be one of the following:
7877         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
7878         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7879
7880         if (op->gtOper == GT_LCL_VAR)
7881         {
7882             // Make sure that this struct stays in memory and doesn't get promoted.
7883             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
7884             lvaTable[lclNum].lvIsMultiRegRet = true;
7885
7886             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7887             op->gtFlags |= GTF_DONT_CSE;
7888
7889             return op;
7890         }
7891
7892         if (op->gtOper == GT_CALL)
7893         {
7894             return op;
7895         }
7896
7897         return impAssignMultiRegTypeToVar(op, retClsHnd);
7898     }
7899 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7900     assert(info.compRetNativeType != TYP_STRUCT);
7901 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7902
7903 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7904
7905     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7906     {
7907         if (op->gtOper == GT_LCL_VAR)
7908         {
7909             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7910             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7911             // Make sure this struct type stays as struct so that we can return it as an HFA
7912             lvaTable[lclNum].lvIsMultiRegRet = true;
7913
7914             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7915             op->gtFlags |= GTF_DONT_CSE;
7916
7917             return op;
7918         }
7919
7920         if (op->gtOper == GT_CALL)
7921         {
7922             if (op->gtCall.IsVarargs())
7923             {
7924                 // We cannot tail call because control needs to return to fixup the calling
7925                 // convention for result return.
7926                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7927                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7928             }
7929             else
7930             {
7931                 return op;
7932             }
7933         }
7934         return impAssignMultiRegTypeToVar(op, retClsHnd);
7935     }
7936
7937 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7938
7939     // Is method returning a multi-reg struct?
7940     if (IsMultiRegReturnedType(retClsHnd))
7941     {
7942         if (op->gtOper == GT_LCL_VAR)
7943         {
7944             // This LCL_VAR stays as a TYP_STRUCT
7945             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7946
7947             // Make sure this struct type is not struct promoted
7948             lvaTable[lclNum].lvIsMultiRegRet = true;
7949
7950             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7951             op->gtFlags |= GTF_DONT_CSE;
7952
7953             return op;
7954         }
7955
7956         if (op->gtOper == GT_CALL)
7957         {
7958             if (op->gtCall.IsVarargs())
7959             {
7960                 // We cannot tail call because control needs to return to fixup the calling
7961                 // convention for result return.
7962                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7963                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7964             }
7965             else
7966             {
7967                 return op;
7968             }
7969         }
7970         return impAssignMultiRegTypeToVar(op, retClsHnd);
7971     }
7972
7973 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
7974
7975 REDO_RETURN_NODE:
7976     // adjust the type away from struct to integral
7977     // and no normalizing
7978     if (op->gtOper == GT_LCL_VAR)
7979     {
7980         op->ChangeOper(GT_LCL_FLD);
7981     }
7982     else if (op->gtOper == GT_OBJ)
7983     {
7984         GenTreePtr op1 = op->AsObj()->Addr();
7985
7986         // We will fold away OBJ/ADDR
7987         // except for OBJ/ADDR/INDEX
7988         //     as the array type influences the array element's offset
7989         //     Later in this method we change op->gtType to info.compRetNativeType
7990         //     This is not correct when op is a GT_INDEX as the starting offset
7991         //     for the array elements 'elemOffs' is different for an array of
7992         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
7993         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
7994         //
7995         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
7996         {
7997             // Change '*(&X)' to 'X' and see if we can do better
7998             op = op1->gtOp.gtOp1;
7999             goto REDO_RETURN_NODE;
8000         }
8001         op->gtObj.gtClass = NO_CLASS_HANDLE;
8002         op->ChangeOperUnchecked(GT_IND);
8003         op->gtFlags |= GTF_IND_TGTANYWHERE;
8004     }
8005     else if (op->gtOper == GT_CALL)
8006     {
8007         if (op->AsCall()->TreatAsHasRetBufArg(this))
8008         {
8009             // This must be one of those 'special' helpers that don't
8010             // really have a return buffer, but instead use it as a way
8011             // to keep the trees cleaner with fewer address-taken temps.
8012             //
8013             // Well now we have to materialize the the return buffer as
8014             // an address-taken temp. Then we can return the temp.
8015             //
8016             // NOTE: this code assumes that since the call directly
8017             // feeds the return, then the call must be returning the
8018             // same structure/class/type.
8019             //
8020             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8021
8022             // No need to spill anything as we're about to return.
8023             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8024
8025             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8026             // jump directly to a GT_LCL_FLD.
8027             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8028             op->ChangeOper(GT_LCL_FLD);
8029         }
8030         else
8031         {
8032             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8033
8034             // Don't change the gtType of the node just yet, it will get changed later.
8035             return op;
8036         }
8037     }
8038     else if (op->gtOper == GT_COMMA)
8039     {
8040         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8041     }
8042
8043     op->gtType = info.compRetNativeType;
8044
8045     return op;
8046 }
8047
8048 /*****************************************************************************
8049    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8050    finally-protected try. We find the finally blocks protecting the current
8051    offset (in order) by walking over the complete exception table and
8052    finding enclosing clauses. This assumes that the table is sorted.
8053    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8054
8055    If we are leaving a catch handler, we need to attach the
8056    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8057
8058    After this function, the BBJ_LEAVE block has been converted to a different type.
8059  */
8060
8061 #if !FEATURE_EH_FUNCLETS
8062
8063 void Compiler::impImportLeave(BasicBlock* block)
8064 {
8065 #ifdef DEBUG
8066     if (verbose)
8067     {
8068         printf("\nBefore import CEE_LEAVE:\n");
8069         fgDispBasicBlocks();
8070         fgDispHandlerTab();
8071     }
8072 #endif // DEBUG
8073
8074     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8075     unsigned    blkAddr         = block->bbCodeOffs;
8076     BasicBlock* leaveTarget     = block->bbJumpDest;
8077     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8078
8079     // LEAVE clears the stack, spill side effects, and set stack to 0
8080
8081     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8082     verCurrentState.esStackDepth = 0;
8083
8084     assert(block->bbJumpKind == BBJ_LEAVE);
8085     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8086
8087     BasicBlock* step         = DUMMY_INIT(NULL);
8088     unsigned    encFinallies = 0; // Number of enclosing finallies.
8089     GenTreePtr  endCatches   = NULL;
8090     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8091
8092     unsigned  XTnum;
8093     EHblkDsc* HBtab;
8094
8095     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8096     {
8097         // Grab the handler offsets
8098
8099         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8100         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8101         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8102         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8103
8104         /* Is this a catch-handler we are CEE_LEAVEing out of?
8105          * If so, we need to call CORINFO_HELP_ENDCATCH.
8106          */
8107
8108         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8109         {
8110             // Can't CEE_LEAVE out of a finally/fault handler
8111             if (HBtab->HasFinallyOrFaultHandler())
8112                 BADCODE("leave out of fault/finally block");
8113
8114             // Create the call to CORINFO_HELP_ENDCATCH
8115             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8116
8117             // Make a list of all the currently pending endCatches
8118             if (endCatches)
8119                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8120             else
8121                 endCatches = endCatch;
8122
8123 #ifdef DEBUG
8124             if (verbose)
8125             {
8126                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8127                        "CORINFO_HELP_ENDCATCH\n",
8128                        block->bbNum, XTnum);
8129             }
8130 #endif
8131         }
8132         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8133                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8134         {
8135             /* This is a finally-protected try we are jumping out of */
8136
8137             /* If there are any pending endCatches, and we have already
8138                jumped out of a finally-protected try, then the endCatches
8139                have to be put in a block in an outer try for async
8140                exceptions to work correctly.
8141                Else, just use append to the original block */
8142
8143             BasicBlock* callBlock;
8144
8145             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8146
8147             if (encFinallies == 0)
8148             {
8149                 assert(step == DUMMY_INIT(NULL));
8150                 callBlock             = block;
8151                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8152
8153                 if (endCatches)
8154                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8155
8156 #ifdef DEBUG
8157                 if (verbose)
8158                 {
8159                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8160                            "block BB%02u [%08p]\n",
8161                            callBlock->bbNum, dspPtr(callBlock));
8162                 }
8163 #endif
8164             }
8165             else
8166             {
8167                 assert(step != DUMMY_INIT(NULL));
8168
8169                 /* Calling the finally block */
8170                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8171                 assert(step->bbJumpKind == BBJ_ALWAYS);
8172                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8173                                               // finally in the chain)
8174                 step->bbJumpDest->bbRefs++;
8175
8176                 /* The new block will inherit this block's weight */
8177                 callBlock->setBBWeight(block->bbWeight);
8178                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8179
8180 #ifdef DEBUG
8181                 if (verbose)
8182                 {
8183                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8184                            "[%08p]\n",
8185                            callBlock->bbNum, dspPtr(callBlock));
8186                 }
8187 #endif
8188
8189                 GenTreePtr lastStmt;
8190
8191                 if (endCatches)
8192                 {
8193                     lastStmt         = gtNewStmt(endCatches);
8194                     endLFin->gtNext  = lastStmt;
8195                     lastStmt->gtPrev = endLFin;
8196                 }
8197                 else
8198                 {
8199                     lastStmt = endLFin;
8200                 }
8201
8202                 // note that this sets BBF_IMPORTED on the block
8203                 impEndTreeList(callBlock, endLFin, lastStmt);
8204             }
8205
8206             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8207             /* The new block will inherit this block's weight */
8208             step->setBBWeight(block->bbWeight);
8209             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8210
8211 #ifdef DEBUG
8212             if (verbose)
8213             {
8214                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8215                        "BB%02u [%08p]\n",
8216                        step->bbNum, dspPtr(step));
8217             }
8218 #endif
8219
8220             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8221             assert(finallyNesting <= compHndBBtabCount);
8222
8223             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8224             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8225             endLFin               = gtNewStmt(endLFin);
8226             endCatches            = NULL;
8227
8228             encFinallies++;
8229
8230             invalidatePreds = true;
8231         }
8232     }
8233
8234     /* Append any remaining endCatches, if any */
8235
8236     assert(!encFinallies == !endLFin);
8237
8238     if (encFinallies == 0)
8239     {
8240         assert(step == DUMMY_INIT(NULL));
8241         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8242
8243         if (endCatches)
8244             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8245
8246 #ifdef DEBUG
8247         if (verbose)
8248         {
8249             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8250                    "block BB%02u [%08p]\n",
8251                    block->bbNum, dspPtr(block));
8252         }
8253 #endif
8254     }
8255     else
8256     {
8257         // If leaveTarget is the start of another try block, we want to make sure that
8258         // we do not insert finalStep into that try block. Hence, we find the enclosing
8259         // try block.
8260         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8261
8262         // Insert a new BB either in the try region indicated by tryIndex or
8263         // the handler region indicated by leaveTarget->bbHndIndex,
8264         // depending on which is the inner region.
8265         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8266         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8267         step->bbJumpDest = finalStep;
8268
8269         /* The new block will inherit this block's weight */
8270         finalStep->setBBWeight(block->bbWeight);
8271         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8272
8273 #ifdef DEBUG
8274         if (verbose)
8275         {
8276             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8277                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8278         }
8279 #endif
8280
8281         GenTreePtr lastStmt;
8282
8283         if (endCatches)
8284         {
8285             lastStmt         = gtNewStmt(endCatches);
8286             endLFin->gtNext  = lastStmt;
8287             lastStmt->gtPrev = endLFin;
8288         }
8289         else
8290         {
8291             lastStmt = endLFin;
8292         }
8293
8294         impEndTreeList(finalStep, endLFin, lastStmt);
8295
8296         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8297
8298         // Queue up the jump target for importing
8299
8300         impImportBlockPending(leaveTarget);
8301
8302         invalidatePreds = true;
8303     }
8304
8305     if (invalidatePreds && fgComputePredsDone)
8306     {
8307         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8308         fgRemovePreds();
8309     }
8310
8311 #ifdef DEBUG
8312     fgVerifyHandlerTab();
8313
8314     if (verbose)
8315     {
8316         printf("\nAfter import CEE_LEAVE:\n");
8317         fgDispBasicBlocks();
8318         fgDispHandlerTab();
8319     }
8320 #endif // DEBUG
8321 }
8322
8323 #else // FEATURE_EH_FUNCLETS
8324
8325 void Compiler::impImportLeave(BasicBlock* block)
8326 {
8327 #ifdef DEBUG
8328     if (verbose)
8329     {
8330         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8331         fgDispBasicBlocks();
8332         fgDispHandlerTab();
8333     }
8334 #endif // DEBUG
8335
8336     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8337     unsigned    blkAddr         = block->bbCodeOffs;
8338     BasicBlock* leaveTarget     = block->bbJumpDest;
8339     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8340
8341     // LEAVE clears the stack, spill side effects, and set stack to 0
8342
8343     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8344     verCurrentState.esStackDepth = 0;
8345
8346     assert(block->bbJumpKind == BBJ_LEAVE);
8347     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8348
8349     BasicBlock* step = nullptr;
8350
8351     enum StepType
8352     {
8353         // No step type; step == NULL.
8354         ST_None,
8355
8356         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8357         // That is, is step->bbJumpDest where a finally will return to?
8358         ST_FinallyReturn,
8359
8360         // The step block is a catch return.
8361         ST_Catch,
8362
8363         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8364         ST_Try
8365     };
8366     StepType stepType = ST_None;
8367
8368     unsigned  XTnum;
8369     EHblkDsc* HBtab;
8370
8371     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8372     {
8373         // Grab the handler offsets
8374
8375         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8376         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8377         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8378         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8379
8380         /* Is this a catch-handler we are CEE_LEAVEing out of?
8381          */
8382
8383         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8384         {
8385             // Can't CEE_LEAVE out of a finally/fault handler
8386             if (HBtab->HasFinallyOrFaultHandler())
8387             {
8388                 BADCODE("leave out of fault/finally block");
8389             }
8390
8391             /* We are jumping out of a catch */
8392
8393             if (step == nullptr)
8394             {
8395                 step             = block;
8396                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8397                 stepType         = ST_Catch;
8398
8399 #ifdef DEBUG
8400                 if (verbose)
8401                 {
8402                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8403                            "block\n",
8404                            XTnum, step->bbNum);
8405                 }
8406 #endif
8407             }
8408             else
8409             {
8410                 BasicBlock* exitBlock;
8411
8412                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8413                  * scope */
8414                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8415
8416                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8417                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8418                                               // exit) returns to this block
8419                 step->bbJumpDest->bbRefs++;
8420
8421 #if defined(_TARGET_ARM_)
8422                 if (stepType == ST_FinallyReturn)
8423                 {
8424                     assert(step->bbJumpKind == BBJ_ALWAYS);
8425                     // Mark the target of a finally return
8426                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8427                 }
8428 #endif // defined(_TARGET_ARM_)
8429
8430                 /* The new block will inherit this block's weight */
8431                 exitBlock->setBBWeight(block->bbWeight);
8432                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8433
8434                 /* This exit block is the new step */
8435                 step     = exitBlock;
8436                 stepType = ST_Catch;
8437
8438                 invalidatePreds = true;
8439
8440 #ifdef DEBUG
8441                 if (verbose)
8442                 {
8443                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8444                            exitBlock->bbNum);
8445                 }
8446 #endif
8447             }
8448         }
8449         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8450                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8451         {
8452             /* We are jumping out of a finally-protected try */
8453
8454             BasicBlock* callBlock;
8455
8456             if (step == nullptr)
8457             {
8458 #if FEATURE_EH_CALLFINALLY_THUNKS
8459
8460                 // Put the call to the finally in the enclosing region.
8461                 unsigned callFinallyTryIndex =
8462                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8463                 unsigned callFinallyHndIndex =
8464                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8465                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8466
8467                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8468                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8469                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8470                 // next block, and flow optimizations will remove it.
8471                 block->bbJumpKind = BBJ_ALWAYS;
8472                 block->bbJumpDest = callBlock;
8473                 block->bbJumpDest->bbRefs++;
8474
8475                 /* The new block will inherit this block's weight */
8476                 callBlock->setBBWeight(block->bbWeight);
8477                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8478
8479 #ifdef DEBUG
8480                 if (verbose)
8481                 {
8482                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8483                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8484                            XTnum, block->bbNum, callBlock->bbNum);
8485                 }
8486 #endif
8487
8488 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8489
8490                 callBlock             = block;
8491                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8492
8493 #ifdef DEBUG
8494                 if (verbose)
8495                 {
8496                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8497                            "BBJ_CALLFINALLY block\n",
8498                            XTnum, callBlock->bbNum);
8499                 }
8500 #endif
8501
8502 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8503             }
8504             else
8505             {
8506                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8507                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8508                 // a 'finally'), or the step block is the return from a catch.
8509                 //
8510                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8511                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8512                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8513                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8514                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8515                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8516                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8517                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8518                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8519                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8520                 // stack walks.)
8521
8522                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8523
8524 #if FEATURE_EH_CALLFINALLY_THUNKS
8525                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8526                 {
8527                     // Need to create another step block in the 'try' region that will actually branch to the
8528                     // call-to-finally thunk.
8529                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8530                     step->bbJumpDest  = step2;
8531                     step->bbJumpDest->bbRefs++;
8532                     step2->setBBWeight(block->bbWeight);
8533                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8534
8535 #ifdef DEBUG
8536                     if (verbose)
8537                     {
8538                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8539                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8540                                XTnum, step->bbNum, step2->bbNum);
8541                     }
8542 #endif
8543
8544                     step = step2;
8545                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8546                 }
8547 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8548
8549 #if FEATURE_EH_CALLFINALLY_THUNKS
8550                 unsigned callFinallyTryIndex =
8551                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8552                 unsigned callFinallyHndIndex =
8553                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8554 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8555                 unsigned callFinallyTryIndex = XTnum + 1;
8556                 unsigned callFinallyHndIndex = 0; // don't care
8557 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8558
8559                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8560                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8561                                               // finally in the chain)
8562                 step->bbJumpDest->bbRefs++;
8563
8564 #if defined(_TARGET_ARM_)
8565                 if (stepType == ST_FinallyReturn)
8566                 {
8567                     assert(step->bbJumpKind == BBJ_ALWAYS);
8568                     // Mark the target of a finally return
8569                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8570                 }
8571 #endif // defined(_TARGET_ARM_)
8572
8573                 /* The new block will inherit this block's weight */
8574                 callBlock->setBBWeight(block->bbWeight);
8575                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8576
8577 #ifdef DEBUG
8578                 if (verbose)
8579                 {
8580                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8581                            "BB%02u\n",
8582                            XTnum, callBlock->bbNum);
8583                 }
8584 #endif
8585             }
8586
8587             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8588             stepType = ST_FinallyReturn;
8589
8590             /* The new block will inherit this block's weight */
8591             step->setBBWeight(block->bbWeight);
8592             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8593
8594 #ifdef DEBUG
8595             if (verbose)
8596             {
8597                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8598                        "block BB%02u\n",
8599                        XTnum, step->bbNum);
8600             }
8601 #endif
8602
8603             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8604
8605             invalidatePreds = true;
8606         }
8607         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8608                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8609         {
8610             // We are jumping out of a catch-protected try.
8611             //
8612             // If we are returning from a call to a finally, then we must have a step block within a try
8613             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8614             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8615             // and invoke the appropriate catch.
8616             //
8617             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8618             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8619             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8620             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8621             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8622             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8623             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8624             // For example:
8625             //
8626             // try {
8627             //    try {
8628             //       // something here raises ThreadAbortException
8629             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8630             //    } catch (Exception) {
8631             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8632             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8633             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8634             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8635             //       // need to do this transformation if the current EH block is a try/catch that catches
8636             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8637             //       // information, so currently we do it for all catch types.
8638             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8639             //    }
8640             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8641             // } catch (ThreadAbortException) {
8642             // }
8643             // LABEL_1:
8644             //
8645             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8646             // compiler.
8647
8648             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8649             {
8650                 BasicBlock* catchStep;
8651
8652                 assert(step);
8653
8654                 if (stepType == ST_FinallyReturn)
8655                 {
8656                     assert(step->bbJumpKind == BBJ_ALWAYS);
8657                 }
8658                 else
8659                 {
8660                     assert(stepType == ST_Catch);
8661                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8662                 }
8663
8664                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8665                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8666                 step->bbJumpDest = catchStep;
8667                 step->bbJumpDest->bbRefs++;
8668
8669 #if defined(_TARGET_ARM_)
8670                 if (stepType == ST_FinallyReturn)
8671                 {
8672                     // Mark the target of a finally return
8673                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8674                 }
8675 #endif // defined(_TARGET_ARM_)
8676
8677                 /* The new block will inherit this block's weight */
8678                 catchStep->setBBWeight(block->bbWeight);
8679                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8680
8681 #ifdef DEBUG
8682                 if (verbose)
8683                 {
8684                     if (stepType == ST_FinallyReturn)
8685                     {
8686                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8687                                "BBJ_ALWAYS block BB%02u\n",
8688                                XTnum, catchStep->bbNum);
8689                     }
8690                     else
8691                     {
8692                         assert(stepType == ST_Catch);
8693                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8694                                "BBJ_ALWAYS block BB%02u\n",
8695                                XTnum, catchStep->bbNum);
8696                     }
8697                 }
8698 #endif // DEBUG
8699
8700                 /* This block is the new step */
8701                 step     = catchStep;
8702                 stepType = ST_Try;
8703
8704                 invalidatePreds = true;
8705             }
8706         }
8707     }
8708
8709     if (step == nullptr)
8710     {
8711         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8712
8713 #ifdef DEBUG
8714         if (verbose)
8715         {
8716             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8717                    "block BB%02u to BBJ_ALWAYS\n",
8718                    block->bbNum);
8719         }
8720 #endif
8721     }
8722     else
8723     {
8724         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8725
8726 #if defined(_TARGET_ARM_)
8727         if (stepType == ST_FinallyReturn)
8728         {
8729             assert(step->bbJumpKind == BBJ_ALWAYS);
8730             // Mark the target of a finally return
8731             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8732         }
8733 #endif // defined(_TARGET_ARM_)
8734
8735 #ifdef DEBUG
8736         if (verbose)
8737         {
8738             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8739         }
8740 #endif
8741
8742         // Queue up the jump target for importing
8743
8744         impImportBlockPending(leaveTarget);
8745     }
8746
8747     if (invalidatePreds && fgComputePredsDone)
8748     {
8749         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8750         fgRemovePreds();
8751     }
8752
8753 #ifdef DEBUG
8754     fgVerifyHandlerTab();
8755
8756     if (verbose)
8757     {
8758         printf("\nAfter import CEE_LEAVE:\n");
8759         fgDispBasicBlocks();
8760         fgDispHandlerTab();
8761     }
8762 #endif // DEBUG
8763 }
8764
8765 #endif // FEATURE_EH_FUNCLETS
8766
8767 /*****************************************************************************/
8768 // This is called when reimporting a leave block. It resets the JumpKind,
8769 // JumpDest, and bbNext to the original values
8770
8771 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8772 {
8773 #if FEATURE_EH_FUNCLETS
8774     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8775     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
8776     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8777     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8778     // only predecessor are also considered orphans and attempted to be deleted.
8779     //
8780     //  try  {
8781     //     ....
8782     //     try
8783     //     {
8784     //         ....
8785     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
8786     //     } finally { }
8787     //  } finally { }
8788     //  OUTSIDE:
8789     //
8790     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8791     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
8792     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
8793     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8794     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8795     // will be treated as pair and handled correctly.
8796     if (block->bbJumpKind == BBJ_CALLFINALLY)
8797     {
8798         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8799         dupBlock->bbFlags    = block->bbFlags;
8800         dupBlock->bbJumpDest = block->bbJumpDest;
8801         dupBlock->copyEHRegion(block);
8802         dupBlock->bbCatchTyp = block->bbCatchTyp;
8803
8804         // Mark this block as
8805         //  a) not referenced by any other block to make sure that it gets deleted
8806         //  b) weight zero
8807         //  c) prevent from being imported
8808         //  d) as internal
8809         //  e) as rarely run
8810         dupBlock->bbRefs   = 0;
8811         dupBlock->bbWeight = 0;
8812         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8813
8814         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8815         // will be next to each other.
8816         fgInsertBBafter(block, dupBlock);
8817
8818 #ifdef DEBUG
8819         if (verbose)
8820         {
8821             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8822         }
8823 #endif
8824     }
8825 #endif // FEATURE_EH_FUNCLETS
8826
8827     block->bbJumpKind = BBJ_LEAVE;
8828     fgInitBBLookup();
8829     block->bbJumpDest = fgLookupBB(jmpAddr);
8830
8831     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8832     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8833     // reason we don't want to remove the block at this point is that if we call
8834     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8835     // added and the linked list length will be different than fgBBcount.
8836 }
8837
8838 /*****************************************************************************/
8839 // Get the first non-prefix opcode. Used for verification of valid combinations
8840 // of prefixes and actual opcodes.
8841
8842 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8843 {
8844     while (codeAddr < codeEndp)
8845     {
8846         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8847         codeAddr += sizeof(__int8);
8848
8849         if (opcode == CEE_PREFIX1)
8850         {
8851             if (codeAddr >= codeEndp)
8852             {
8853                 break;
8854             }
8855             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8856             codeAddr += sizeof(__int8);
8857         }
8858
8859         switch (opcode)
8860         {
8861             case CEE_UNALIGNED:
8862             case CEE_VOLATILE:
8863             case CEE_TAILCALL:
8864             case CEE_CONSTRAINED:
8865             case CEE_READONLY:
8866                 break;
8867             default:
8868                 return opcode;
8869         }
8870
8871         codeAddr += opcodeSizes[opcode];
8872     }
8873
8874     return CEE_ILLEGAL;
8875 }
8876
8877 /*****************************************************************************/
8878 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8879
8880 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8881 {
8882     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8883
8884     if (!(
8885             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8886             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8887             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8888             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8889             // volatile. prefix is allowed with the ldsfld and stsfld
8890             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8891     {
8892         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8893     }
8894 }
8895
8896 /*****************************************************************************/
8897
8898 #ifdef DEBUG
8899
8900 #undef RETURN // undef contracts RETURN macro
8901
8902 enum controlFlow_t
8903 {
8904     NEXT,
8905     CALL,
8906     RETURN,
8907     THROW,
8908     BRANCH,
8909     COND_BRANCH,
8910     BREAK,
8911     PHI,
8912     META,
8913 };
8914
8915 const static controlFlow_t controlFlow[] = {
8916 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8917 #include "opcode.def"
8918 #undef OPDEF
8919 };
8920
8921 #endif // DEBUG
8922
8923 /*****************************************************************************
8924  *  Determine the result type of an arithemetic operation
8925  *  On 64-bit inserts upcasts when native int is mixed with int32
8926  */
8927 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8928 {
8929     var_types  type = TYP_UNDEF;
8930     GenTreePtr op1 = *pOp1, op2 = *pOp2;
8931
8932     // Arithemetic operations are generally only allowed with
8933     // primitive types, but certain operations are allowed
8934     // with byrefs
8935
8936     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8937     {
8938         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8939         {
8940             // byref1-byref2 => gives a native int
8941             type = TYP_I_IMPL;
8942         }
8943         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8944         {
8945             // [native] int - byref => gives a native int
8946
8947             //
8948             // The reason is that it is possible, in managed C++,
8949             // to have a tree like this:
8950             //
8951             //              -
8952             //             / \
8953             //            /   \
8954             //           /     \
8955             //          /       \
8956             // const(h) int     addr byref
8957             //
8958             // <BUGNUM> VSW 318822 </BUGNUM>
8959             //
8960             // So here we decide to make the resulting type to be a native int.
8961             CLANG_FORMAT_COMMENT_ANCHOR;
8962
8963 #ifdef _TARGET_64BIT_
8964             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
8965             {
8966                 // insert an explicit upcast
8967                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8968             }
8969 #endif // _TARGET_64BIT_
8970
8971             type = TYP_I_IMPL;
8972         }
8973         else
8974         {
8975             // byref - [native] int => gives a byref
8976             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
8977
8978 #ifdef _TARGET_64BIT_
8979             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
8980             {
8981                 // insert an explicit upcast
8982                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8983             }
8984 #endif // _TARGET_64BIT_
8985
8986             type = TYP_BYREF;
8987         }
8988     }
8989     else if ((oper == GT_ADD) &&
8990              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8991     {
8992         // byref + [native] int => gives a byref
8993         // (or)
8994         // [native] int + byref => gives a byref
8995
8996         // only one can be a byref : byref op byref not allowed
8997         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
8998         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
8999
9000 #ifdef _TARGET_64BIT_
9001         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9002         {
9003             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9004             {
9005                 // insert an explicit upcast
9006                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9007             }
9008         }
9009         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9010         {
9011             // insert an explicit upcast
9012             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9013         }
9014 #endif // _TARGET_64BIT_
9015
9016         type = TYP_BYREF;
9017     }
9018 #ifdef _TARGET_64BIT_
9019     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9020     {
9021         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9022
9023         // int + long => gives long
9024         // long + int => gives long
9025         // we get this because in the IL the long isn't Int64, it's just IntPtr
9026
9027         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9028         {
9029             // insert an explicit upcast
9030             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9031         }
9032         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9033         {
9034             // insert an explicit upcast
9035             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9036         }
9037
9038         type = TYP_I_IMPL;
9039     }
9040 #else  // 32-bit TARGET
9041     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9042     {
9043         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9044
9045         // int + long => gives long
9046         // long + int => gives long
9047
9048         type = TYP_LONG;
9049     }
9050 #endif // _TARGET_64BIT_
9051     else
9052     {
9053         // int + int => gives an int
9054         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9055
9056         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9057                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9058
9059         type = genActualType(op1->gtType);
9060
9061 #if FEATURE_X87_DOUBLES
9062
9063         // For x87, since we only have 1 size of registers, prefer double
9064         // For everybody else, be more precise
9065         if (type == TYP_FLOAT)
9066             type = TYP_DOUBLE;
9067
9068 #else // !FEATURE_X87_DOUBLES
9069
9070         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9071         // Otherwise, turn floats into doubles
9072         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9073         {
9074             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9075             type = TYP_DOUBLE;
9076         }
9077
9078 #endif // FEATURE_X87_DOUBLES
9079     }
9080
9081 #if FEATURE_X87_DOUBLES
9082     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9083 #else  // FEATURE_X87_DOUBLES
9084     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9085 #endif // FEATURE_X87_DOUBLES
9086
9087     return type;
9088 }
9089
9090 /*****************************************************************************
9091  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9092  *
9093  * typeRef contains the token, op1 to contain the value being cast,
9094  * and op2 to contain code that creates the type handle corresponding to typeRef
9095  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9096  */
9097 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9098                                                 GenTreePtr              op2,
9099                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9100                                                 bool                    isCastClass)
9101 {
9102     bool expandInline;
9103
9104     assert(op1->TypeGet() == TYP_REF);
9105
9106     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9107
9108     if (isCastClass)
9109     {
9110         // We only want to expand inline the normal CHKCASTCLASS helper;
9111         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9112     }
9113     else
9114     {
9115         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9116         {
9117             // Get the Class Handle abd class attributes for the type we are casting to
9118             //
9119             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9120
9121             //
9122             // If the class handle is marked as final we can also expand the IsInst check inline
9123             //
9124             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9125
9126             //
9127             // But don't expand inline these two cases
9128             //
9129             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9130             {
9131                 expandInline = false;
9132             }
9133             else if (flags & CORINFO_FLG_CONTEXTFUL)
9134             {
9135                 expandInline = false;
9136             }
9137         }
9138         else
9139         {
9140             //
9141             // We can't expand inline any other helpers
9142             //
9143             expandInline = false;
9144         }
9145     }
9146
9147     if (expandInline)
9148     {
9149         if (compCurBB->isRunRarely())
9150         {
9151             expandInline = false; // not worth the code expansion in a rarely run block
9152         }
9153
9154         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9155         {
9156             expandInline = false; // not worth creating an untracked local variable
9157         }
9158     }
9159
9160     if (!expandInline)
9161     {
9162         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9163         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9164         //
9165         op2->gtFlags |= GTF_DONT_CSE;
9166
9167         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9168     }
9169
9170     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9171
9172     GenTreePtr temp;
9173     GenTreePtr condMT;
9174     //
9175     // expand the methodtable match:
9176     //
9177     //  condMT ==>   GT_NE
9178     //               /    \
9179     //           GT_IND   op2 (typically CNS_INT)
9180     //              |
9181     //           op1Copy
9182     //
9183
9184     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9185     //
9186     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9187     //
9188     // op1 is now known to be a non-complex tree
9189     // thus we can use gtClone(op1) from now on
9190     //
9191
9192     GenTreePtr op2Var = op2;
9193     if (isCastClass)
9194     {
9195         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9196         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9197     }
9198     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9199     temp->gtFlags |= GTF_EXCEPT;
9200     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9201
9202     GenTreePtr condNull;
9203     //
9204     // expand the null check:
9205     //
9206     //  condNull ==>   GT_EQ
9207     //                 /    \
9208     //             op1Copy CNS_INT
9209     //                      null
9210     //
9211     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9212
9213     //
9214     // expand the true and false trees for the condMT
9215     //
9216     GenTreePtr condFalse = gtClone(op1);
9217     GenTreePtr condTrue;
9218     if (isCastClass)
9219     {
9220         //
9221         // use the special helper that skips the cases checked by our inlined cast
9222         //
9223         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9224
9225         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9226     }
9227     else
9228     {
9229         condTrue = gtNewIconNode(0, TYP_REF);
9230     }
9231
9232 #define USE_QMARK_TREES
9233
9234 #ifdef USE_QMARK_TREES
9235     GenTreePtr qmarkMT;
9236     //
9237     // Generate first QMARK - COLON tree
9238     //
9239     //  qmarkMT ==>   GT_QMARK
9240     //                 /     \
9241     //            condMT   GT_COLON
9242     //                      /     \
9243     //                condFalse  condTrue
9244     //
9245     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9246     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9247     condMT->gtFlags |= GTF_RELOP_QMARK;
9248
9249     GenTreePtr qmarkNull;
9250     //
9251     // Generate second QMARK - COLON tree
9252     //
9253     //  qmarkNull ==>  GT_QMARK
9254     //                 /     \
9255     //           condNull  GT_COLON
9256     //                      /     \
9257     //                qmarkMT   op1Copy
9258     //
9259     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9260     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9261     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9262     condNull->gtFlags |= GTF_RELOP_QMARK;
9263
9264     // Make QMark node a top level node by spilling it.
9265     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9266     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9267     return gtNewLclvNode(tmp, TYP_REF);
9268 #endif
9269 }
9270
9271 #ifndef DEBUG
9272 #define assertImp(cond) ((void)0)
9273 #else
9274 #define assertImp(cond)                                                                                                \
9275     do                                                                                                                 \
9276     {                                                                                                                  \
9277         if (!(cond))                                                                                                   \
9278         {                                                                                                              \
9279             const int cchAssertImpBuf = 600;                                                                           \
9280             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9281             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9282                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9283                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9284                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9285             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9286         }                                                                                                              \
9287     } while (0)
9288 #endif // DEBUG
9289
9290 #ifdef _PREFAST_
9291 #pragma warning(push)
9292 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9293 #endif
9294 /*****************************************************************************
9295  *  Import the instr for the given basic block
9296  */
9297 void Compiler::impImportBlockCode(BasicBlock* block)
9298 {
9299 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9300
9301 #ifdef DEBUG
9302
9303     if (verbose)
9304     {
9305         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9306     }
9307 #endif
9308
9309     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9310     IL_OFFSET nxtStmtOffs;
9311
9312     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9313     bool                         expandInline;
9314     CorInfoHelpFunc              helper;
9315     CorInfoIsAccessAllowedResult accessAllowedResult;
9316     CORINFO_HELPER_DESC          calloutHelper;
9317     const BYTE*                  lastLoadToken = nullptr;
9318
9319     // reject cyclic constraints
9320     if (tiVerificationNeeded)
9321     {
9322         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9323         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9324     }
9325
9326     /* Get the tree list started */
9327
9328     impBeginTreeList();
9329
9330     /* Walk the opcodes that comprise the basic block */
9331
9332     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9333     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9334
9335     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9336     IL_OFFSET lastSpillOffs = opcodeOffs;
9337
9338     signed jmpDist;
9339
9340     /* remember the start of the delegate creation sequence (used for verification) */
9341     const BYTE* delegateCreateStart = nullptr;
9342
9343     int  prefixFlags = 0;
9344     bool explicitTailCall, constraintCall, readonlyCall;
9345
9346     bool     insertLdloc = false; // set by CEE_DUP and cleared by following store
9347     typeInfo tiRetVal;
9348
9349     unsigned numArgs = info.compArgsCount;
9350
9351     /* Now process all the opcodes in the block */
9352
9353     var_types callTyp    = TYP_COUNT;
9354     OPCODE    prevOpcode = CEE_ILLEGAL;
9355
9356     if (block->bbCatchTyp)
9357     {
9358         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9359         {
9360             impCurStmtOffsSet(block->bbCodeOffs);
9361         }
9362
9363         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9364         // to a temp. This is a trade off for code simplicity
9365         impSpillSpecialSideEff();
9366     }
9367
9368     while (codeAddr < codeEndp)
9369     {
9370         bool                   usingReadyToRunHelper = false;
9371         CORINFO_RESOLVED_TOKEN resolvedToken;
9372         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9373         CORINFO_CALL_INFO      callInfo;
9374         CORINFO_FIELD_INFO     fieldInfo;
9375
9376         tiRetVal = typeInfo(); // Default type info
9377
9378         //---------------------------------------------------------------------
9379
9380         /* We need to restrict the max tree depth as many of the Compiler
9381            functions are recursive. We do this by spilling the stack */
9382
9383         if (verCurrentState.esStackDepth)
9384         {
9385             /* Has it been a while since we last saw a non-empty stack (which
9386                guarantees that the tree depth isnt accumulating. */
9387
9388             if ((opcodeOffs - lastSpillOffs) > 200)
9389             {
9390                 impSpillStackEnsure();
9391                 lastSpillOffs = opcodeOffs;
9392             }
9393         }
9394         else
9395         {
9396             lastSpillOffs   = opcodeOffs;
9397             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9398         }
9399
9400         /* Compute the current instr offset */
9401
9402         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9403
9404 #ifndef DEBUG
9405         if (opts.compDbgInfo)
9406 #endif
9407         {
9408             if (!compIsForInlining())
9409             {
9410                 nxtStmtOffs =
9411                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9412
9413                 /* Have we reached the next stmt boundary ? */
9414
9415                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9416                 {
9417                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9418
9419                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9420                     {
9421                         /* We need to provide accurate IP-mapping at this point.
9422                            So spill anything on the stack so that it will form
9423                            gtStmts with the correct stmt offset noted */
9424
9425                         impSpillStackEnsure(true);
9426                     }
9427
9428                     // Has impCurStmtOffs been reported in any tree?
9429
9430                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9431                     {
9432                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9433                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9434
9435                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9436                     }
9437
9438                     if (impCurStmtOffs == BAD_IL_OFFSET)
9439                     {
9440                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9441                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9442
9443                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9444                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9445                         {
9446                             nxtStmtIndex++;
9447                         }
9448
9449                         /* Go to the new stmt */
9450
9451                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9452
9453                         /* Update the stmt boundary index */
9454
9455                         nxtStmtIndex++;
9456                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9457
9458                         /* Are there any more line# entries after this one? */
9459
9460                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9461                         {
9462                             /* Remember where the next line# starts */
9463
9464                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9465                         }
9466                         else
9467                         {
9468                             /* No more line# entries */
9469
9470                             nxtStmtOffs = BAD_IL_OFFSET;
9471                         }
9472                     }
9473                 }
9474                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9475                          (verCurrentState.esStackDepth == 0))
9476                 {
9477                     /* At stack-empty locations, we have already added the tree to
9478                        the stmt list with the last offset. We just need to update
9479                        impCurStmtOffs
9480                      */
9481
9482                     impCurStmtOffsSet(opcodeOffs);
9483                 }
9484                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9485                          impOpcodeIsCallSiteBoundary(prevOpcode))
9486                 {
9487                     /* Make sure we have a type cached */
9488                     assert(callTyp != TYP_COUNT);
9489
9490                     if (callTyp == TYP_VOID)
9491                     {
9492                         impCurStmtOffsSet(opcodeOffs);
9493                     }
9494                     else if (opts.compDbgCode)
9495                     {
9496                         impSpillStackEnsure(true);
9497                         impCurStmtOffsSet(opcodeOffs);
9498                     }
9499                 }
9500                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9501                 {
9502                     if (opts.compDbgCode)
9503                     {
9504                         impSpillStackEnsure(true);
9505                     }
9506
9507                     impCurStmtOffsSet(opcodeOffs);
9508                 }
9509
9510                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9511                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9512             }
9513         }
9514
9515         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9516         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9517         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9518
9519         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9520         GenTreePtr      op1           = DUMMY_INIT(NULL);
9521         GenTreePtr      op2           = DUMMY_INIT(NULL);
9522         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9523         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9524         bool            uns           = DUMMY_INIT(false);
9525
9526         /* Get the next opcode and the size of its parameters */
9527
9528         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9529         codeAddr += sizeof(__int8);
9530
9531 #ifdef DEBUG
9532         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9533         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9534 #endif
9535
9536     DECODE_OPCODE:
9537
9538         // Return if any previous code has caused inline to fail.
9539         if (compDonotInline())
9540         {
9541             return;
9542         }
9543
9544         /* Get the size of additional parameters */
9545
9546         signed int sz = opcodeSizes[opcode];
9547
9548 #ifdef DEBUG
9549         clsHnd  = NO_CLASS_HANDLE;
9550         lclTyp  = TYP_COUNT;
9551         callTyp = TYP_COUNT;
9552
9553         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9554         impCurOpcName = opcodeNames[opcode];
9555
9556         if (verbose && (opcode != CEE_PREFIX1))
9557         {
9558             printf("%s", impCurOpcName);
9559         }
9560
9561         /* Use assertImp() to display the opcode */
9562
9563         op1 = op2 = nullptr;
9564 #endif
9565
9566         /* See what kind of an opcode we have, then */
9567
9568         unsigned mflags   = 0;
9569         unsigned clsFlags = 0;
9570
9571         switch (opcode)
9572         {
9573             unsigned  lclNum;
9574             var_types type;
9575
9576             GenTreePtr op3;
9577             genTreeOps oper;
9578             unsigned   size;
9579
9580             int val;
9581
9582             CORINFO_SIG_INFO     sig;
9583             unsigned             flags;
9584             IL_OFFSET            jmpAddr;
9585             bool                 ovfl, unordered, callNode;
9586             bool                 ldstruct;
9587             CORINFO_CLASS_HANDLE tokenType;
9588
9589             union {
9590                 int     intVal;
9591                 float   fltVal;
9592                 __int64 lngVal;
9593                 double  dblVal;
9594             } cval;
9595
9596             case CEE_PREFIX1:
9597                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9598                 codeAddr += sizeof(__int8);
9599                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9600                 goto DECODE_OPCODE;
9601
9602             SPILL_APPEND:
9603
9604                 // We need to call impSpillLclRefs() for a struct type lclVar.
9605                 // This is done for non-block assignments in the handling of stloc.
9606                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9607                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9608                 {
9609                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9610                 }
9611
9612                 /* Append 'op1' to the list of statements */
9613                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9614                 goto DONE_APPEND;
9615
9616             APPEND:
9617
9618                 /* Append 'op1' to the list of statements */
9619
9620                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9621                 goto DONE_APPEND;
9622
9623             DONE_APPEND:
9624
9625 #ifdef DEBUG
9626                 // Remember at which BC offset the tree was finished
9627                 impNoteLastILoffs();
9628 #endif
9629                 break;
9630
9631             case CEE_LDNULL:
9632                 impPushNullObjRefOnStack();
9633                 break;
9634
9635             case CEE_LDC_I4_M1:
9636             case CEE_LDC_I4_0:
9637             case CEE_LDC_I4_1:
9638             case CEE_LDC_I4_2:
9639             case CEE_LDC_I4_3:
9640             case CEE_LDC_I4_4:
9641             case CEE_LDC_I4_5:
9642             case CEE_LDC_I4_6:
9643             case CEE_LDC_I4_7:
9644             case CEE_LDC_I4_8:
9645                 cval.intVal = (opcode - CEE_LDC_I4_0);
9646                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9647                 goto PUSH_I4CON;
9648
9649             case CEE_LDC_I4_S:
9650                 cval.intVal = getI1LittleEndian(codeAddr);
9651                 goto PUSH_I4CON;
9652             case CEE_LDC_I4:
9653                 cval.intVal = getI4LittleEndian(codeAddr);
9654                 goto PUSH_I4CON;
9655             PUSH_I4CON:
9656                 JITDUMP(" %d", cval.intVal);
9657                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9658                 break;
9659
9660             case CEE_LDC_I8:
9661                 cval.lngVal = getI8LittleEndian(codeAddr);
9662                 JITDUMP(" 0x%016llx", cval.lngVal);
9663                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9664                 break;
9665
9666             case CEE_LDC_R8:
9667                 cval.dblVal = getR8LittleEndian(codeAddr);
9668                 JITDUMP(" %#.17g", cval.dblVal);
9669                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9670                 break;
9671
9672             case CEE_LDC_R4:
9673                 cval.dblVal = getR4LittleEndian(codeAddr);
9674                 JITDUMP(" %#.17g", cval.dblVal);
9675                 {
9676                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9677 #if !FEATURE_X87_DOUBLES
9678                     // X87 stack doesn't differentiate between float/double
9679                     // so R4 is treated as R8, but everybody else does
9680                     cnsOp->gtType = TYP_FLOAT;
9681 #endif // FEATURE_X87_DOUBLES
9682                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9683                 }
9684                 break;
9685
9686             case CEE_LDSTR:
9687
9688                 if (compIsForInlining())
9689                 {
9690                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9691                     {
9692                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9693                         return;
9694                     }
9695                 }
9696
9697                 val = getU4LittleEndian(codeAddr);
9698                 JITDUMP(" %08X", val);
9699                 if (tiVerificationNeeded)
9700                 {
9701                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9702                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9703                 }
9704                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9705
9706                 break;
9707
9708             case CEE_LDARG:
9709                 lclNum = getU2LittleEndian(codeAddr);
9710                 JITDUMP(" %u", lclNum);
9711                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9712                 break;
9713
9714             case CEE_LDARG_S:
9715                 lclNum = getU1LittleEndian(codeAddr);
9716                 JITDUMP(" %u", lclNum);
9717                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9718                 break;
9719
9720             case CEE_LDARG_0:
9721             case CEE_LDARG_1:
9722             case CEE_LDARG_2:
9723             case CEE_LDARG_3:
9724                 lclNum = (opcode - CEE_LDARG_0);
9725                 assert(lclNum >= 0 && lclNum < 4);
9726                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9727                 break;
9728
9729             case CEE_LDLOC:
9730                 lclNum = getU2LittleEndian(codeAddr);
9731                 JITDUMP(" %u", lclNum);
9732                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9733                 break;
9734
9735             case CEE_LDLOC_S:
9736                 lclNum = getU1LittleEndian(codeAddr);
9737                 JITDUMP(" %u", lclNum);
9738                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9739                 break;
9740
9741             case CEE_LDLOC_0:
9742             case CEE_LDLOC_1:
9743             case CEE_LDLOC_2:
9744             case CEE_LDLOC_3:
9745                 lclNum = (opcode - CEE_LDLOC_0);
9746                 assert(lclNum >= 0 && lclNum < 4);
9747                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9748                 break;
9749
9750             case CEE_STARG:
9751                 lclNum = getU2LittleEndian(codeAddr);
9752                 goto STARG;
9753
9754             case CEE_STARG_S:
9755                 lclNum = getU1LittleEndian(codeAddr);
9756             STARG:
9757                 JITDUMP(" %u", lclNum);
9758
9759                 if (tiVerificationNeeded)
9760                 {
9761                     Verify(lclNum < info.compILargsCount, "bad arg num");
9762                 }
9763
9764                 if (compIsForInlining())
9765                 {
9766                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9767                     noway_assert(op1->gtOper == GT_LCL_VAR);
9768                     lclNum = op1->AsLclVar()->gtLclNum;
9769
9770                     goto VAR_ST_VALID;
9771                 }
9772
9773                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9774                 assertImp(lclNum < numArgs);
9775
9776                 if (lclNum == info.compThisArg)
9777                 {
9778                     lclNum = lvaArg0Var;
9779                 }
9780                 lvaTable[lclNum].lvArgWrite = 1;
9781
9782                 if (tiVerificationNeeded)
9783                 {
9784                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9785                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9786                            "type mismatch");
9787
9788                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9789                     {
9790                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9791                     }
9792                 }
9793
9794                 goto VAR_ST;
9795
9796             case CEE_STLOC:
9797                 lclNum = getU2LittleEndian(codeAddr);
9798                 JITDUMP(" %u", lclNum);
9799                 goto LOC_ST;
9800
9801             case CEE_STLOC_S:
9802                 lclNum = getU1LittleEndian(codeAddr);
9803                 JITDUMP(" %u", lclNum);
9804                 goto LOC_ST;
9805
9806             case CEE_STLOC_0:
9807             case CEE_STLOC_1:
9808             case CEE_STLOC_2:
9809             case CEE_STLOC_3:
9810                 lclNum = (opcode - CEE_STLOC_0);
9811                 assert(lclNum >= 0 && lclNum < 4);
9812
9813             LOC_ST:
9814                 if (tiVerificationNeeded)
9815                 {
9816                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9817                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9818                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9819                            "type mismatch");
9820                 }
9821
9822                 if (compIsForInlining())
9823                 {
9824                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9825
9826                     /* Have we allocated a temp for this local? */
9827
9828                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9829
9830                     goto _PopValue;
9831                 }
9832
9833                 lclNum += numArgs;
9834
9835             VAR_ST:
9836
9837                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9838                 {
9839                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9840                     BADCODE("Bad IL");
9841                 }
9842
9843             VAR_ST_VALID:
9844
9845                 /* if it is a struct assignment, make certain we don't overflow the buffer */
9846                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9847
9848                 if (lvaTable[lclNum].lvNormalizeOnLoad())
9849                 {
9850                     lclTyp = lvaGetRealType(lclNum);
9851                 }
9852                 else
9853                 {
9854                     lclTyp = lvaGetActualType(lclNum);
9855                 }
9856
9857             _PopValue:
9858                 /* Pop the value being assigned */
9859
9860                 {
9861                     StackEntry se = impPopStack(clsHnd);
9862                     op1           = se.val;
9863                     tiRetVal      = se.seTypeInfo;
9864                 }
9865
9866 #ifdef FEATURE_SIMD
9867                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9868                 {
9869                     assert(op1->TypeGet() == TYP_STRUCT);
9870                     op1->gtType = lclTyp;
9871                 }
9872 #endif // FEATURE_SIMD
9873
9874                 op1 = impImplicitIorI4Cast(op1, lclTyp);
9875
9876 #ifdef _TARGET_64BIT_
9877                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9878                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9879                 {
9880                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9881                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9882                 }
9883 #endif // _TARGET_64BIT_
9884
9885                 // We had better assign it a value of the correct type
9886                 assertImp(
9887                     genActualType(lclTyp) == genActualType(op1->gtType) ||
9888                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9889                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9890                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9891                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9892                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9893
9894                 /* If op1 is "&var" then its type is the transient "*" and it can
9895                    be used either as TYP_BYREF or TYP_I_IMPL */
9896
9897                 if (op1->IsVarAddr())
9898                 {
9899                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9900
9901                     /* When "&var" is created, we assume it is a byref. If it is
9902                        being assigned to a TYP_I_IMPL var, change the type to
9903                        prevent unnecessary GC info */
9904
9905                     if (genActualType(lclTyp) == TYP_I_IMPL)
9906                     {
9907                         op1->gtType = TYP_I_IMPL;
9908                     }
9909                 }
9910
9911                 /* Filter out simple assignments to itself */
9912
9913                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9914                 {
9915                     if (insertLdloc)
9916                     {
9917                         // This is a sequence of (ldloc, dup, stloc).  Can simplify
9918                         // to (ldloc, stloc).  Goto LDVAR to reconstruct the ldloc node.
9919                         CLANG_FORMAT_COMMENT_ANCHOR;
9920
9921 #ifdef DEBUG
9922                         if (tiVerificationNeeded)
9923                         {
9924                             assert(
9925                                 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9926                         }
9927 #endif
9928
9929                         op1         = nullptr;
9930                         insertLdloc = false;
9931
9932                         impLoadVar(lclNum, opcodeOffs + sz + 1);
9933                         break;
9934                     }
9935                     else if (opts.compDbgCode)
9936                     {
9937                         op1 = gtNewNothingNode();
9938                         goto SPILL_APPEND;
9939                     }
9940                     else
9941                     {
9942                         break;
9943                     }
9944                 }
9945
9946                 /* Create the assignment node */
9947
9948                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
9949
9950                 /* If the local is aliased, we need to spill calls and
9951                    indirections from the stack. */
9952
9953                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
9954                     verCurrentState.esStackDepth > 0)
9955                 {
9956                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
9957                 }
9958
9959                 /* Spill any refs to the local from the stack */
9960
9961                 impSpillLclRefs(lclNum);
9962
9963 #if !FEATURE_X87_DOUBLES
9964                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
9965                 // We insert a cast to the dest 'op2' type
9966                 //
9967                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
9968                     varTypeIsFloating(op2->gtType))
9969                 {
9970                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
9971                 }
9972 #endif // !FEATURE_X87_DOUBLES
9973
9974                 if (varTypeIsStruct(lclTyp))
9975                 {
9976                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
9977                 }
9978                 else
9979                 {
9980                     // The code generator generates GC tracking information
9981                     // based on the RHS of the assignment.  Later the LHS (which is
9982                     // is a BYREF) gets used and the emitter checks that that variable
9983                     // is being tracked.  It is not (since the RHS was an int and did
9984                     // not need tracking).  To keep this assert happy, we change the RHS
9985                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
9986                     {
9987                         op1->gtType = TYP_BYREF;
9988                     }
9989                     op1 = gtNewAssignNode(op2, op1);
9990                 }
9991
9992                 /* If insertLdloc is true, then we need to insert a ldloc following the
9993                    stloc.  This is done when converting a (dup, stloc) sequence into
9994                    a (stloc, ldloc) sequence. */
9995
9996                 if (insertLdloc)
9997                 {
9998                     // From SPILL_APPEND
9999                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10000
10001 #ifdef DEBUG
10002                     // From DONE_APPEND
10003                     impNoteLastILoffs();
10004 #endif
10005                     op1         = nullptr;
10006                     insertLdloc = false;
10007
10008                     impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10009                     break;
10010                 }
10011
10012                 goto SPILL_APPEND;
10013
10014             case CEE_LDLOCA:
10015                 lclNum = getU2LittleEndian(codeAddr);
10016                 goto LDLOCA;
10017
10018             case CEE_LDLOCA_S:
10019                 lclNum = getU1LittleEndian(codeAddr);
10020             LDLOCA:
10021                 JITDUMP(" %u", lclNum);
10022                 if (tiVerificationNeeded)
10023                 {
10024                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10025                     Verify(info.compInitMem, "initLocals not set");
10026                 }
10027
10028                 if (compIsForInlining())
10029                 {
10030                     // Get the local type
10031                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10032
10033                     /* Have we allocated a temp for this local? */
10034
10035                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10036
10037                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10038
10039                     goto _PUSH_ADRVAR;
10040                 }
10041
10042                 lclNum += numArgs;
10043                 assertImp(lclNum < info.compLocalsCount);
10044                 goto ADRVAR;
10045
10046             case CEE_LDARGA:
10047                 lclNum = getU2LittleEndian(codeAddr);
10048                 goto LDARGA;
10049
10050             case CEE_LDARGA_S:
10051                 lclNum = getU1LittleEndian(codeAddr);
10052             LDARGA:
10053                 JITDUMP(" %u", lclNum);
10054                 Verify(lclNum < info.compILargsCount, "bad arg num");
10055
10056                 if (compIsForInlining())
10057                 {
10058                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10059                     // followed by a ldfld to load the field.
10060
10061                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10062                     if (op1->gtOper != GT_LCL_VAR)
10063                     {
10064                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10065                         return;
10066                     }
10067
10068                     assert(op1->gtOper == GT_LCL_VAR);
10069
10070                     goto _PUSH_ADRVAR;
10071                 }
10072
10073                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10074                 assertImp(lclNum < numArgs);
10075
10076                 if (lclNum == info.compThisArg)
10077                 {
10078                     lclNum = lvaArg0Var;
10079                 }
10080
10081                 goto ADRVAR;
10082
10083             ADRVAR:
10084
10085                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10086
10087             _PUSH_ADRVAR:
10088                 assert(op1->gtOper == GT_LCL_VAR);
10089
10090                 /* Note that this is supposed to create the transient type "*"
10091                    which may be used as a TYP_I_IMPL. However we catch places
10092                    where it is used as a TYP_I_IMPL and change the node if needed.
10093                    Thus we are pessimistic and may report byrefs in the GC info
10094                    where it was not absolutely needed, but it is safer this way.
10095                  */
10096                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10097
10098                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10099                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10100
10101                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10102                 if (tiVerificationNeeded)
10103                 {
10104                     // Don't allow taking address of uninit this ptr.
10105                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10106                     {
10107                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10108                     }
10109
10110                     if (!tiRetVal.IsByRef())
10111                     {
10112                         tiRetVal.MakeByRef();
10113                     }
10114                     else
10115                     {
10116                         Verify(false, "byref to byref");
10117                     }
10118                 }
10119
10120                 impPushOnStack(op1, tiRetVal);
10121                 break;
10122
10123             case CEE_ARGLIST:
10124
10125                 if (!info.compIsVarArgs)
10126                 {
10127                     BADCODE("arglist in non-vararg method");
10128                 }
10129
10130                 if (tiVerificationNeeded)
10131                 {
10132                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10133                 }
10134                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10135
10136                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10137                    adjusted the arg count cos this is like fetching the last param */
10138                 assertImp(0 < numArgs);
10139                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10140                 lclNum = lvaVarargsHandleArg;
10141                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10142                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10143                 impPushOnStack(op1, tiRetVal);
10144                 break;
10145
10146             case CEE_ENDFINALLY:
10147
10148                 if (compIsForInlining())
10149                 {
10150                     assert(!"Shouldn't have exception handlers in the inliner!");
10151                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10152                     return;
10153                 }
10154
10155                 if (verCurrentState.esStackDepth > 0)
10156                 {
10157                     impEvalSideEffects();
10158                 }
10159
10160                 if (info.compXcptnsCount == 0)
10161                 {
10162                     BADCODE("endfinally outside finally");
10163                 }
10164
10165                 assert(verCurrentState.esStackDepth == 0);
10166
10167                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10168                 goto APPEND;
10169
10170             case CEE_ENDFILTER:
10171
10172                 if (compIsForInlining())
10173                 {
10174                     assert(!"Shouldn't have exception handlers in the inliner!");
10175                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10176                     return;
10177                 }
10178
10179                 block->bbSetRunRarely(); // filters are rare
10180
10181                 if (info.compXcptnsCount == 0)
10182                 {
10183                     BADCODE("endfilter outside filter");
10184                 }
10185
10186                 if (tiVerificationNeeded)
10187                 {
10188                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10189                 }
10190
10191                 op1 = impPopStack().val;
10192                 assertImp(op1->gtType == TYP_INT);
10193                 if (!bbInFilterILRange(block))
10194                 {
10195                     BADCODE("EndFilter outside a filter handler");
10196                 }
10197
10198                 /* Mark current bb as end of filter */
10199
10200                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10201                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10202
10203                 /* Mark catch handler as successor */
10204
10205                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10206                 if (verCurrentState.esStackDepth != 0)
10207                 {
10208                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10209                                                 DEBUGARG(__LINE__));
10210                 }
10211                 goto APPEND;
10212
10213             case CEE_RET:
10214                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10215             RET:
10216                 if (!impReturnInstruction(block, prefixFlags, opcode))
10217                 {
10218                     return; // abort
10219                 }
10220                 else
10221                 {
10222                     break;
10223                 }
10224
10225             case CEE_JMP:
10226
10227                 assert(!compIsForInlining());
10228
10229                 if (tiVerificationNeeded)
10230                 {
10231                     Verify(false, "Invalid opcode: CEE_JMP");
10232                 }
10233
10234                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10235                 {
10236                     /* CEE_JMP does not make sense in some "protected" regions. */
10237
10238                     BADCODE("Jmp not allowed in protected region");
10239                 }
10240
10241                 if (verCurrentState.esStackDepth != 0)
10242                 {
10243                     BADCODE("Stack must be empty after CEE_JMPs");
10244                 }
10245
10246                 _impResolveToken(CORINFO_TOKENKIND_Method);
10247
10248                 JITDUMP(" %08X", resolvedToken.token);
10249
10250                 /* The signature of the target has to be identical to ours.
10251                    At least check that argCnt and returnType match */
10252
10253                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10254                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10255                     sig.retType != info.compMethodInfo->args.retType ||
10256                     sig.callConv != info.compMethodInfo->args.callConv)
10257                 {
10258                     BADCODE("Incompatible target for CEE_JMPs");
10259                 }
10260
10261 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10262
10263                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10264
10265                 /* Mark the basic block as being a JUMP instead of RETURN */
10266
10267                 block->bbFlags |= BBF_HAS_JMP;
10268
10269                 /* Set this flag to make sure register arguments have a location assigned
10270                  * even if we don't use them inside the method */
10271
10272                 compJmpOpUsed = true;
10273
10274                 fgNoStructPromotion = true;
10275
10276                 goto APPEND;
10277
10278 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10279
10280                 // Import this just like a series of LDARGs + tail. + call + ret
10281
10282                 if (info.compIsVarArgs)
10283                 {
10284                     // For now we don't implement true tail calls, so this breaks varargs.
10285                     // So warn the user instead of generating bad code.
10286                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10287                     // implement true tail calls.
10288                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10289                 }
10290
10291                 // First load up the arguments (0 - N)
10292                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10293                 {
10294                     impLoadArg(argNum, opcodeOffs + sz + 1);
10295                 }
10296
10297                 // Now generate the tail call
10298                 noway_assert(prefixFlags == 0);
10299                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10300                 opcode      = CEE_CALL;
10301
10302                 eeGetCallInfo(&resolvedToken, NULL,
10303                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10304
10305                 // All calls and delegates need a security callout.
10306                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10307
10308                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10309                                         opcodeOffs);
10310
10311                 // And finish with the ret
10312                 goto RET;
10313
10314 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10315
10316             case CEE_LDELEMA:
10317                 assertImp(sz == sizeof(unsigned));
10318
10319                 _impResolveToken(CORINFO_TOKENKIND_Class);
10320
10321                 JITDUMP(" %08X", resolvedToken.token);
10322
10323                 ldelemClsHnd = resolvedToken.hClass;
10324
10325                 if (tiVerificationNeeded)
10326                 {
10327                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10328                     typeInfo tiIndex = impStackTop().seTypeInfo;
10329
10330                     // As per ECMA 'index' specified can be either int32 or native int.
10331                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10332
10333                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10334                     Verify(tiArray.IsNullObjRef() ||
10335                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10336                            "bad array");
10337
10338                     tiRetVal = arrayElemType;
10339                     tiRetVal.MakeByRef();
10340                     if (prefixFlags & PREFIX_READONLY)
10341                     {
10342                         tiRetVal.SetIsReadonlyByRef();
10343                     }
10344
10345                     // an array interior pointer is always in the heap
10346                     tiRetVal.SetIsPermanentHomeByRef();
10347                 }
10348
10349                 // If it's a value class array we just do a simple address-of
10350                 if (eeIsValueClass(ldelemClsHnd))
10351                 {
10352                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10353                     if (cit == CORINFO_TYPE_UNDEF)
10354                     {
10355                         lclTyp = TYP_STRUCT;
10356                     }
10357                     else
10358                     {
10359                         lclTyp = JITtype2varType(cit);
10360                     }
10361                     goto ARR_LD_POST_VERIFY;
10362                 }
10363
10364                 // Similarly, if its a readonly access, we can do a simple address-of
10365                 // without doing a runtime type-check
10366                 if (prefixFlags & PREFIX_READONLY)
10367                 {
10368                     lclTyp = TYP_REF;
10369                     goto ARR_LD_POST_VERIFY;
10370                 }
10371
10372                 // Otherwise we need the full helper function with run-time type check
10373                 op1 = impTokenToHandle(&resolvedToken);
10374                 if (op1 == nullptr)
10375                 { // compDonotInline()
10376                     return;
10377                 }
10378
10379                 args = gtNewArgList(op1);                      // Type
10380                 args = gtNewListNode(impPopStack().val, args); // index
10381                 args = gtNewListNode(impPopStack().val, args); // array
10382                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10383
10384                 impPushOnStack(op1, tiRetVal);
10385                 break;
10386
10387             // ldelem for reference and value types
10388             case CEE_LDELEM:
10389                 assertImp(sz == sizeof(unsigned));
10390
10391                 _impResolveToken(CORINFO_TOKENKIND_Class);
10392
10393                 JITDUMP(" %08X", resolvedToken.token);
10394
10395                 ldelemClsHnd = resolvedToken.hClass;
10396
10397                 if (tiVerificationNeeded)
10398                 {
10399                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10400                     typeInfo tiIndex = impStackTop().seTypeInfo;
10401
10402                     // As per ECMA 'index' specified can be either int32 or native int.
10403                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10404                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10405
10406                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10407                            "type of array incompatible with type operand");
10408                     tiRetVal.NormaliseForStack();
10409                 }
10410
10411                 // If it's a reference type or generic variable type
10412                 // then just generate code as though it's a ldelem.ref instruction
10413                 if (!eeIsValueClass(ldelemClsHnd))
10414                 {
10415                     lclTyp = TYP_REF;
10416                     opcode = CEE_LDELEM_REF;
10417                 }
10418                 else
10419                 {
10420                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10421                     lclTyp             = JITtype2varType(jitTyp);
10422                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10423                     tiRetVal.NormaliseForStack();
10424                 }
10425                 goto ARR_LD_POST_VERIFY;
10426
10427             case CEE_LDELEM_I1:
10428                 lclTyp = TYP_BYTE;
10429                 goto ARR_LD;
10430             case CEE_LDELEM_I2:
10431                 lclTyp = TYP_SHORT;
10432                 goto ARR_LD;
10433             case CEE_LDELEM_I:
10434                 lclTyp = TYP_I_IMPL;
10435                 goto ARR_LD;
10436
10437             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10438             // and treating it as TYP_INT avoids other asserts.
10439             case CEE_LDELEM_U4:
10440                 lclTyp = TYP_INT;
10441                 goto ARR_LD;
10442
10443             case CEE_LDELEM_I4:
10444                 lclTyp = TYP_INT;
10445                 goto ARR_LD;
10446             case CEE_LDELEM_I8:
10447                 lclTyp = TYP_LONG;
10448                 goto ARR_LD;
10449             case CEE_LDELEM_REF:
10450                 lclTyp = TYP_REF;
10451                 goto ARR_LD;
10452             case CEE_LDELEM_R4:
10453                 lclTyp = TYP_FLOAT;
10454                 goto ARR_LD;
10455             case CEE_LDELEM_R8:
10456                 lclTyp = TYP_DOUBLE;
10457                 goto ARR_LD;
10458             case CEE_LDELEM_U1:
10459                 lclTyp = TYP_UBYTE;
10460                 goto ARR_LD;
10461             case CEE_LDELEM_U2:
10462                 lclTyp = TYP_CHAR;
10463                 goto ARR_LD;
10464
10465             ARR_LD:
10466
10467                 if (tiVerificationNeeded)
10468                 {
10469                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10470                     typeInfo tiIndex = impStackTop().seTypeInfo;
10471
10472                     // As per ECMA 'index' specified can be either int32 or native int.
10473                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10474                     if (tiArray.IsNullObjRef())
10475                     {
10476                         if (lclTyp == TYP_REF)
10477                         { // we will say a deref of a null array yields a null ref
10478                             tiRetVal = typeInfo(TI_NULL);
10479                         }
10480                         else
10481                         {
10482                             tiRetVal = typeInfo(lclTyp);
10483                         }
10484                     }
10485                     else
10486                     {
10487                         tiRetVal             = verGetArrayElemType(tiArray);
10488                         typeInfo arrayElemTi = typeInfo(lclTyp);
10489 #ifdef _TARGET_64BIT_
10490                         if (opcode == CEE_LDELEM_I)
10491                         {
10492                             arrayElemTi = typeInfo::nativeInt();
10493                         }
10494
10495                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10496                         {
10497                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10498                         }
10499                         else
10500 #endif // _TARGET_64BIT_
10501                         {
10502                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10503                         }
10504                     }
10505                     tiRetVal.NormaliseForStack();
10506                 }
10507             ARR_LD_POST_VERIFY:
10508
10509                 /* Pull the index value and array address */
10510                 op2 = impPopStack().val;
10511                 op1 = impPopStack().val;
10512                 assertImp(op1->gtType == TYP_REF);
10513
10514                 /* Check for null pointer - in the inliner case we simply abort */
10515
10516                 if (compIsForInlining())
10517                 {
10518                     if (op1->gtOper == GT_CNS_INT)
10519                     {
10520                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10521                         return;
10522                     }
10523                 }
10524
10525                 op1 = impCheckForNullPointer(op1);
10526
10527                 /* Mark the block as containing an index expression */
10528
10529                 if (op1->gtOper == GT_LCL_VAR)
10530                 {
10531                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10532                     {
10533                         block->bbFlags |= BBF_HAS_IDX_LEN;
10534                         optMethodFlags |= OMF_HAS_ARRAYREF;
10535                     }
10536                 }
10537
10538                 /* Create the index node and push it on the stack */
10539
10540                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10541
10542                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10543
10544                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10545                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10546                 {
10547                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10548
10549                     // remember the element size
10550                     if (lclTyp == TYP_REF)
10551                     {
10552                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10553                     }
10554                     else
10555                     {
10556                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10557                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10558                         {
10559                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10560                         }
10561                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10562                         if (lclTyp == TYP_STRUCT)
10563                         {
10564                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10565                             op1->gtIndex.gtIndElemSize = size;
10566                             op1->gtType                = lclTyp;
10567                         }
10568                     }
10569
10570                     if ((opcode == CEE_LDELEMA) || ldstruct)
10571                     {
10572                         // wrap it in a &
10573                         lclTyp = TYP_BYREF;
10574
10575                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10576                     }
10577                     else
10578                     {
10579                         assert(lclTyp != TYP_STRUCT);
10580                     }
10581                 }
10582
10583                 if (ldstruct)
10584                 {
10585                     // Create an OBJ for the result
10586                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10587                     op1->gtFlags |= GTF_EXCEPT;
10588                 }
10589                 impPushOnStack(op1, tiRetVal);
10590                 break;
10591
10592             // stelem for reference and value types
10593             case CEE_STELEM:
10594
10595                 assertImp(sz == sizeof(unsigned));
10596
10597                 _impResolveToken(CORINFO_TOKENKIND_Class);
10598
10599                 JITDUMP(" %08X", resolvedToken.token);
10600
10601                 stelemClsHnd = resolvedToken.hClass;
10602
10603                 if (tiVerificationNeeded)
10604                 {
10605                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10606                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10607                     typeInfo tiValue = impStackTop().seTypeInfo;
10608
10609                     // As per ECMA 'index' specified can be either int32 or native int.
10610                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10611                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10612
10613                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10614                            "type operand incompatible with array element type");
10615                     arrayElem.NormaliseForStack();
10616                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10617                 }
10618
10619                 // If it's a reference type just behave as though it's a stelem.ref instruction
10620                 if (!eeIsValueClass(stelemClsHnd))
10621                 {
10622                     goto STELEM_REF_POST_VERIFY;
10623                 }
10624
10625                 // Otherwise extract the type
10626                 {
10627                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10628                     lclTyp             = JITtype2varType(jitTyp);
10629                     goto ARR_ST_POST_VERIFY;
10630                 }
10631
10632             case CEE_STELEM_REF:
10633
10634                 if (tiVerificationNeeded)
10635                 {
10636                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10637                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10638                     typeInfo tiValue = impStackTop().seTypeInfo;
10639
10640                     // As per ECMA 'index' specified can be either int32 or native int.
10641                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10642                     Verify(tiValue.IsObjRef(), "bad value");
10643
10644                     // we only check that it is an object referece, The helper does additional checks
10645                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10646                 }
10647
10648                 arrayNodeTo      = impStackTop(2).val;
10649                 arrayNodeToIndex = impStackTop(1).val;
10650                 arrayNodeFrom    = impStackTop().val;
10651
10652                 //
10653                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10654                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10655                 //
10656
10657                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10658                 // This does not need CORINFO_HELP_ARRADDR_ST
10659
10660                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10661                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10662                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10663                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10664                 {
10665                     lclTyp = TYP_REF;
10666                     goto ARR_ST_POST_VERIFY;
10667                 }
10668
10669                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10670
10671                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10672                 {
10673                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10674
10675                     lclTyp = TYP_REF;
10676                     goto ARR_ST_POST_VERIFY;
10677                 }
10678
10679             STELEM_REF_POST_VERIFY:
10680
10681                 /* Call a helper function to do the assignment */
10682                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10683
10684                 goto SPILL_APPEND;
10685
10686             case CEE_STELEM_I1:
10687                 lclTyp = TYP_BYTE;
10688                 goto ARR_ST;
10689             case CEE_STELEM_I2:
10690                 lclTyp = TYP_SHORT;
10691                 goto ARR_ST;
10692             case CEE_STELEM_I:
10693                 lclTyp = TYP_I_IMPL;
10694                 goto ARR_ST;
10695             case CEE_STELEM_I4:
10696                 lclTyp = TYP_INT;
10697                 goto ARR_ST;
10698             case CEE_STELEM_I8:
10699                 lclTyp = TYP_LONG;
10700                 goto ARR_ST;
10701             case CEE_STELEM_R4:
10702                 lclTyp = TYP_FLOAT;
10703                 goto ARR_ST;
10704             case CEE_STELEM_R8:
10705                 lclTyp = TYP_DOUBLE;
10706                 goto ARR_ST;
10707
10708             ARR_ST:
10709
10710                 if (tiVerificationNeeded)
10711                 {
10712                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10713                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10714                     typeInfo tiValue = impStackTop().seTypeInfo;
10715
10716                     // As per ECMA 'index' specified can be either int32 or native int.
10717                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10718                     typeInfo arrayElem = typeInfo(lclTyp);
10719 #ifdef _TARGET_64BIT_
10720                     if (opcode == CEE_STELEM_I)
10721                     {
10722                         arrayElem = typeInfo::nativeInt();
10723                     }
10724 #endif // _TARGET_64BIT_
10725                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10726                            "bad array");
10727
10728                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10729                            "bad value");
10730                 }
10731
10732             ARR_ST_POST_VERIFY:
10733                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10734                    range-check, and then assignment. However, codegen currently
10735                    does the range-check before evaluation the RHS-operands. So to
10736                    maintain strict ordering, we spill the stack. */
10737
10738                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10739                 {
10740                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10741                                                    "Strict ordering of exceptions for Array store"));
10742                 }
10743
10744                 /* Pull the new value from the stack */
10745                 op2 = impPopStack().val;
10746
10747                 /* Pull the index value */
10748                 op1 = impPopStack().val;
10749
10750                 /* Pull the array address */
10751                 op3 = impPopStack().val;
10752
10753                 assertImp(op3->gtType == TYP_REF);
10754                 if (op2->IsVarAddr())
10755                 {
10756                     op2->gtType = TYP_I_IMPL;
10757                 }
10758
10759                 op3 = impCheckForNullPointer(op3);
10760
10761                 // Mark the block as containing an index expression
10762
10763                 if (op3->gtOper == GT_LCL_VAR)
10764                 {
10765                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10766                     {
10767                         block->bbFlags |= BBF_HAS_IDX_LEN;
10768                         optMethodFlags |= OMF_HAS_ARRAYREF;
10769                     }
10770                 }
10771
10772                 /* Create the index node */
10773
10774                 op1 = gtNewIndexRef(lclTyp, op3, op1);
10775
10776                 /* Create the assignment node and append it */
10777
10778                 if (lclTyp == TYP_STRUCT)
10779                 {
10780                     assert(stelemClsHnd != DUMMY_INIT(NULL));
10781
10782                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
10783                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
10784                 }
10785                 if (varTypeIsStruct(op1))
10786                 {
10787                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10788                 }
10789                 else
10790                 {
10791                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10792                     op1 = gtNewAssignNode(op1, op2);
10793                 }
10794
10795                 /* Mark the expression as containing an assignment */
10796
10797                 op1->gtFlags |= GTF_ASG;
10798
10799                 goto SPILL_APPEND;
10800
10801             case CEE_ADD:
10802                 oper = GT_ADD;
10803                 goto MATH_OP2;
10804
10805             case CEE_ADD_OVF:
10806                 uns = false;
10807                 goto ADD_OVF;
10808             case CEE_ADD_OVF_UN:
10809                 uns = true;
10810                 goto ADD_OVF;
10811
10812             ADD_OVF:
10813                 ovfl     = true;
10814                 callNode = false;
10815                 oper     = GT_ADD;
10816                 goto MATH_OP2_FLAGS;
10817
10818             case CEE_SUB:
10819                 oper = GT_SUB;
10820                 goto MATH_OP2;
10821
10822             case CEE_SUB_OVF:
10823                 uns = false;
10824                 goto SUB_OVF;
10825             case CEE_SUB_OVF_UN:
10826                 uns = true;
10827                 goto SUB_OVF;
10828
10829             SUB_OVF:
10830                 ovfl     = true;
10831                 callNode = false;
10832                 oper     = GT_SUB;
10833                 goto MATH_OP2_FLAGS;
10834
10835             case CEE_MUL:
10836                 oper = GT_MUL;
10837                 goto MATH_MAYBE_CALL_NO_OVF;
10838
10839             case CEE_MUL_OVF:
10840                 uns = false;
10841                 goto MUL_OVF;
10842             case CEE_MUL_OVF_UN:
10843                 uns = true;
10844                 goto MUL_OVF;
10845
10846             MUL_OVF:
10847                 ovfl = true;
10848                 oper = GT_MUL;
10849                 goto MATH_MAYBE_CALL_OVF;
10850
10851             // Other binary math operations
10852
10853             case CEE_DIV:
10854                 oper = GT_DIV;
10855                 goto MATH_MAYBE_CALL_NO_OVF;
10856
10857             case CEE_DIV_UN:
10858                 oper = GT_UDIV;
10859                 goto MATH_MAYBE_CALL_NO_OVF;
10860
10861             case CEE_REM:
10862                 oper = GT_MOD;
10863                 goto MATH_MAYBE_CALL_NO_OVF;
10864
10865             case CEE_REM_UN:
10866                 oper = GT_UMOD;
10867                 goto MATH_MAYBE_CALL_NO_OVF;
10868
10869             MATH_MAYBE_CALL_NO_OVF:
10870                 ovfl = false;
10871             MATH_MAYBE_CALL_OVF:
10872                 // Morpher has some complex logic about when to turn different
10873                 // typed nodes on different platforms into helper calls. We
10874                 // need to either duplicate that logic here, or just
10875                 // pessimistically make all the nodes large enough to become
10876                 // call nodes.  Since call nodes aren't that much larger and
10877                 // these opcodes are infrequent enough I chose the latter.
10878                 callNode = true;
10879                 goto MATH_OP2_FLAGS;
10880
10881             case CEE_AND:
10882                 oper = GT_AND;
10883                 goto MATH_OP2;
10884             case CEE_OR:
10885                 oper = GT_OR;
10886                 goto MATH_OP2;
10887             case CEE_XOR:
10888                 oper = GT_XOR;
10889                 goto MATH_OP2;
10890
10891             MATH_OP2: // For default values of 'ovfl' and 'callNode'
10892
10893                 ovfl     = false;
10894                 callNode = false;
10895
10896             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10897
10898                 /* Pull two values and push back the result */
10899
10900                 if (tiVerificationNeeded)
10901                 {
10902                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10903                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10904
10905                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10906                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10907                     {
10908                         Verify(tiOp1.IsNumberType(), "not number");
10909                     }
10910                     else
10911                     {
10912                         Verify(tiOp1.IsIntegerType(), "not integer");
10913                     }
10914
10915                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10916
10917                     tiRetVal = tiOp1;
10918
10919 #ifdef _TARGET_64BIT_
10920                     if (tiOp2.IsNativeIntType())
10921                     {
10922                         tiRetVal = tiOp2;
10923                     }
10924 #endif // _TARGET_64BIT_
10925                 }
10926
10927                 op2 = impPopStack().val;
10928                 op1 = impPopStack().val;
10929
10930 #if !CPU_HAS_FP_SUPPORT
10931                 if (varTypeIsFloating(op1->gtType))
10932                 {
10933                     callNode = true;
10934                 }
10935 #endif
10936                 /* Can't do arithmetic with references */
10937                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10938
10939                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
10940                 // if it is in the stack)
10941                 impBashVarAddrsToI(op1, op2);
10942
10943                 type = impGetByRefResultType(oper, uns, &op1, &op2);
10944
10945                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
10946
10947                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
10948
10949                 if (op2->gtOper == GT_CNS_INT)
10950                 {
10951                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
10952                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
10953
10954                     {
10955                         impPushOnStack(op1, tiRetVal);
10956                         break;
10957                     }
10958                 }
10959
10960 #if !FEATURE_X87_DOUBLES
10961                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
10962                 //
10963                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
10964                 {
10965                     if (op1->TypeGet() != type)
10966                     {
10967                         // We insert a cast of op1 to 'type'
10968                         op1 = gtNewCastNode(type, op1, type);
10969                     }
10970                     if (op2->TypeGet() != type)
10971                     {
10972                         // We insert a cast of op2 to 'type'
10973                         op2 = gtNewCastNode(type, op2, type);
10974                     }
10975                 }
10976 #endif // !FEATURE_X87_DOUBLES
10977
10978 #if SMALL_TREE_NODES
10979                 if (callNode)
10980                 {
10981                     /* These operators can later be transformed into 'GT_CALL' */
10982
10983                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
10984 #ifndef _TARGET_ARM_
10985                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
10986                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
10987                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
10988                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
10989 #endif
10990                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
10991                     // that we'll need to transform into a general large node, but rather specifically
10992                     // to a call: by doing it this way, things keep working if there are multiple sizes,
10993                     // and a CALL is no longer the largest.
10994                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
10995                     // than an "if".
10996                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
10997                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
10998                 }
10999                 else
11000 #endif // SMALL_TREE_NODES
11001                 {
11002                     op1 = gtNewOperNode(oper, type, op1, op2);
11003                 }
11004
11005                 /* Special case: integer/long division may throw an exception */
11006
11007                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11008                 {
11009                     op1->gtFlags |= GTF_EXCEPT;
11010                 }
11011
11012                 if (ovfl)
11013                 {
11014                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11015                     if (ovflType != TYP_UNKNOWN)
11016                     {
11017                         op1->gtType = ovflType;
11018                     }
11019                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11020                     if (uns)
11021                     {
11022                         op1->gtFlags |= GTF_UNSIGNED;
11023                     }
11024                 }
11025
11026                 impPushOnStack(op1, tiRetVal);
11027                 break;
11028
11029             case CEE_SHL:
11030                 oper = GT_LSH;
11031                 goto CEE_SH_OP2;
11032
11033             case CEE_SHR:
11034                 oper = GT_RSH;
11035                 goto CEE_SH_OP2;
11036             case CEE_SHR_UN:
11037                 oper = GT_RSZ;
11038                 goto CEE_SH_OP2;
11039
11040             CEE_SH_OP2:
11041                 if (tiVerificationNeeded)
11042                 {
11043                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11044                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11045                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11046                     tiRetVal = tiVal;
11047                 }
11048                 op2 = impPopStack().val;
11049                 op1 = impPopStack().val; // operand to be shifted
11050                 impBashVarAddrsToI(op1, op2);
11051
11052                 type = genActualType(op1->TypeGet());
11053                 op1  = gtNewOperNode(oper, type, op1, op2);
11054
11055                 impPushOnStack(op1, tiRetVal);
11056                 break;
11057
11058             case CEE_NOT:
11059                 if (tiVerificationNeeded)
11060                 {
11061                     tiRetVal = impStackTop().seTypeInfo;
11062                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11063                 }
11064
11065                 op1 = impPopStack().val;
11066                 impBashVarAddrsToI(op1, nullptr);
11067                 type = genActualType(op1->TypeGet());
11068                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11069                 break;
11070
11071             case CEE_CKFINITE:
11072                 if (tiVerificationNeeded)
11073                 {
11074                     tiRetVal = impStackTop().seTypeInfo;
11075                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11076                 }
11077                 op1  = impPopStack().val;
11078                 type = op1->TypeGet();
11079                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11080                 op1->gtFlags |= GTF_EXCEPT;
11081
11082                 impPushOnStack(op1, tiRetVal);
11083                 break;
11084
11085             case CEE_LEAVE:
11086
11087                 val     = getI4LittleEndian(codeAddr); // jump distance
11088                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11089                 goto LEAVE;
11090
11091             case CEE_LEAVE_S:
11092                 val     = getI1LittleEndian(codeAddr); // jump distance
11093                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11094
11095             LEAVE:
11096
11097                 if (compIsForInlining())
11098                 {
11099                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11100                     return;
11101                 }
11102
11103                 JITDUMP(" %04X", jmpAddr);
11104                 if (block->bbJumpKind != BBJ_LEAVE)
11105                 {
11106                     impResetLeaveBlock(block, jmpAddr);
11107                 }
11108
11109                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11110                 impImportLeave(block);
11111                 impNoteBranchOffs();
11112
11113                 break;
11114
11115             case CEE_BR:
11116             case CEE_BR_S:
11117                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11118
11119                 if (compIsForInlining() && jmpDist == 0)
11120                 {
11121                     break; /* NOP */
11122                 }
11123
11124                 impNoteBranchOffs();
11125                 break;
11126
11127             case CEE_BRTRUE:
11128             case CEE_BRTRUE_S:
11129             case CEE_BRFALSE:
11130             case CEE_BRFALSE_S:
11131
11132                 /* Pop the comparand (now there's a neat term) from the stack */
11133                 if (tiVerificationNeeded)
11134                 {
11135                     typeInfo& tiVal = impStackTop().seTypeInfo;
11136                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11137                            "bad value");
11138                 }
11139
11140                 op1  = impPopStack().val;
11141                 type = op1->TypeGet();
11142
11143                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11144                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11145                 {
11146                     block->bbJumpKind = BBJ_NONE;
11147
11148                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11149                     {
11150                         op1 = gtUnusedValNode(op1);
11151                         goto SPILL_APPEND;
11152                     }
11153                     else
11154                     {
11155                         break;
11156                     }
11157                 }
11158
11159                 if (op1->OperIsCompare())
11160                 {
11161                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11162                     {
11163                         // Flip the sense of the compare
11164
11165                         op1 = gtReverseCond(op1);
11166                     }
11167                 }
11168                 else
11169                 {
11170                     /* We'll compare against an equally-sized integer 0 */
11171                     /* For small types, we always compare against int   */
11172                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11173
11174                     /* Create the comparison operator and try to fold it */
11175
11176                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11177                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11178                 }
11179
11180             // fall through
11181
11182             COND_JUMP:
11183
11184                 /* Fold comparison if we can */
11185
11186                 op1 = gtFoldExpr(op1);
11187
11188                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11189                 /* Don't make any blocks unreachable in import only mode */
11190
11191                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11192                 {
11193                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11194                        unreachable under compDbgCode */
11195                     assert(!opts.compDbgCode);
11196
11197                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11198                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11199                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11200                                                                          // block for the second time
11201
11202                     block->bbJumpKind = foldedJumpKind;
11203 #ifdef DEBUG
11204                     if (verbose)
11205                     {
11206                         if (op1->gtIntCon.gtIconVal)
11207                         {
11208                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11209                                    block->bbJumpDest->bbNum);
11210                         }
11211                         else
11212                         {
11213                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11214                         }
11215                     }
11216 #endif
11217                     break;
11218                 }
11219
11220                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11221
11222                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11223                    in impImportBlock(block). For correct line numbers, spill stack. */
11224
11225                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11226                 {
11227                     impSpillStackEnsure(true);
11228                 }
11229
11230                 goto SPILL_APPEND;
11231
11232             case CEE_CEQ:
11233                 oper = GT_EQ;
11234                 uns  = false;
11235                 goto CMP_2_OPs;
11236             case CEE_CGT_UN:
11237                 oper = GT_GT;
11238                 uns  = true;
11239                 goto CMP_2_OPs;
11240             case CEE_CGT:
11241                 oper = GT_GT;
11242                 uns  = false;
11243                 goto CMP_2_OPs;
11244             case CEE_CLT_UN:
11245                 oper = GT_LT;
11246                 uns  = true;
11247                 goto CMP_2_OPs;
11248             case CEE_CLT:
11249                 oper = GT_LT;
11250                 uns  = false;
11251                 goto CMP_2_OPs;
11252
11253             CMP_2_OPs:
11254                 if (tiVerificationNeeded)
11255                 {
11256                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11257                     tiRetVal = typeInfo(TI_INT);
11258                 }
11259
11260                 op2 = impPopStack().val;
11261                 op1 = impPopStack().val;
11262
11263 #ifdef _TARGET_64BIT_
11264                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11265                 {
11266                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11267                 }
11268                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11269                 {
11270                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11271                 }
11272 #endif // _TARGET_64BIT_
11273
11274                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11275                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11276                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11277
11278                 /* Create the comparison node */
11279
11280                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11281
11282                 /* TODO: setting both flags when only one is appropriate */
11283                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11284                 {
11285                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11286                 }
11287
11288                 impPushOnStack(op1, tiRetVal);
11289                 break;
11290
11291             case CEE_BEQ_S:
11292             case CEE_BEQ:
11293                 oper = GT_EQ;
11294                 goto CMP_2_OPs_AND_BR;
11295
11296             case CEE_BGE_S:
11297             case CEE_BGE:
11298                 oper = GT_GE;
11299                 goto CMP_2_OPs_AND_BR;
11300
11301             case CEE_BGE_UN_S:
11302             case CEE_BGE_UN:
11303                 oper = GT_GE;
11304                 goto CMP_2_OPs_AND_BR_UN;
11305
11306             case CEE_BGT_S:
11307             case CEE_BGT:
11308                 oper = GT_GT;
11309                 goto CMP_2_OPs_AND_BR;
11310
11311             case CEE_BGT_UN_S:
11312             case CEE_BGT_UN:
11313                 oper = GT_GT;
11314                 goto CMP_2_OPs_AND_BR_UN;
11315
11316             case CEE_BLE_S:
11317             case CEE_BLE:
11318                 oper = GT_LE;
11319                 goto CMP_2_OPs_AND_BR;
11320
11321             case CEE_BLE_UN_S:
11322             case CEE_BLE_UN:
11323                 oper = GT_LE;
11324                 goto CMP_2_OPs_AND_BR_UN;
11325
11326             case CEE_BLT_S:
11327             case CEE_BLT:
11328                 oper = GT_LT;
11329                 goto CMP_2_OPs_AND_BR;
11330
11331             case CEE_BLT_UN_S:
11332             case CEE_BLT_UN:
11333                 oper = GT_LT;
11334                 goto CMP_2_OPs_AND_BR_UN;
11335
11336             case CEE_BNE_UN_S:
11337             case CEE_BNE_UN:
11338                 oper = GT_NE;
11339                 goto CMP_2_OPs_AND_BR_UN;
11340
11341             CMP_2_OPs_AND_BR_UN:
11342                 uns       = true;
11343                 unordered = true;
11344                 goto CMP_2_OPs_AND_BR_ALL;
11345             CMP_2_OPs_AND_BR:
11346                 uns       = false;
11347                 unordered = false;
11348                 goto CMP_2_OPs_AND_BR_ALL;
11349             CMP_2_OPs_AND_BR_ALL:
11350
11351                 if (tiVerificationNeeded)
11352                 {
11353                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11354                 }
11355
11356                 /* Pull two values */
11357                 op2 = impPopStack().val;
11358                 op1 = impPopStack().val;
11359
11360 #ifdef _TARGET_64BIT_
11361                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11362                 {
11363                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11364                 }
11365                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11366                 {
11367                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11368                 }
11369 #endif // _TARGET_64BIT_
11370
11371                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11372                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11373                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11374
11375                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11376                 {
11377                     block->bbJumpKind = BBJ_NONE;
11378
11379                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11380                     {
11381                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11382                                                        "Branch to next Optimization, op1 side effect"));
11383                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11384                     }
11385                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11386                     {
11387                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11388                                                        "Branch to next Optimization, op2 side effect"));
11389                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11390                     }
11391
11392 #ifdef DEBUG
11393                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11394                     {
11395                         impNoteLastILoffs();
11396                     }
11397 #endif
11398                     break;
11399                 }
11400 #if !FEATURE_X87_DOUBLES
11401                 // We can generate an compare of different sized floating point op1 and op2
11402                 // We insert a cast
11403                 //
11404                 if (varTypeIsFloating(op1->TypeGet()))
11405                 {
11406                     if (op1->TypeGet() != op2->TypeGet())
11407                     {
11408                         assert(varTypeIsFloating(op2->TypeGet()));
11409
11410                         // say op1=double, op2=float. To avoid loss of precision
11411                         // while comparing, op2 is converted to double and double
11412                         // comparison is done.
11413                         if (op1->TypeGet() == TYP_DOUBLE)
11414                         {
11415                             // We insert a cast of op2 to TYP_DOUBLE
11416                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11417                         }
11418                         else if (op2->TypeGet() == TYP_DOUBLE)
11419                         {
11420                             // We insert a cast of op1 to TYP_DOUBLE
11421                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11422                         }
11423                     }
11424                 }
11425 #endif // !FEATURE_X87_DOUBLES
11426
11427                 /* Create and append the operator */
11428
11429                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11430
11431                 if (uns)
11432                 {
11433                     op1->gtFlags |= GTF_UNSIGNED;
11434                 }
11435
11436                 if (unordered)
11437                 {
11438                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11439                 }
11440
11441                 goto COND_JUMP;
11442
11443             case CEE_SWITCH:
11444                 assert(!compIsForInlining());
11445
11446                 if (tiVerificationNeeded)
11447                 {
11448                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11449                 }
11450                 /* Pop the switch value off the stack */
11451                 op1 = impPopStack().val;
11452                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11453
11454 #ifdef _TARGET_64BIT_
11455                 // Widen 'op1' on 64-bit targets
11456                 if (op1->TypeGet() != TYP_I_IMPL)
11457                 {
11458                     if (op1->OperGet() == GT_CNS_INT)
11459                     {
11460                         op1->gtType = TYP_I_IMPL;
11461                     }
11462                     else
11463                     {
11464                         op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11465                     }
11466                 }
11467 #endif // _TARGET_64BIT_
11468                 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11469
11470                 /* We can create a switch node */
11471
11472                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11473
11474                 val = (int)getU4LittleEndian(codeAddr);
11475                 codeAddr += 4 + val * 4; // skip over the switch-table
11476
11477                 goto SPILL_APPEND;
11478
11479             /************************** Casting OPCODES ***************************/
11480
11481             case CEE_CONV_OVF_I1:
11482                 lclTyp = TYP_BYTE;
11483                 goto CONV_OVF;
11484             case CEE_CONV_OVF_I2:
11485                 lclTyp = TYP_SHORT;
11486                 goto CONV_OVF;
11487             case CEE_CONV_OVF_I:
11488                 lclTyp = TYP_I_IMPL;
11489                 goto CONV_OVF;
11490             case CEE_CONV_OVF_I4:
11491                 lclTyp = TYP_INT;
11492                 goto CONV_OVF;
11493             case CEE_CONV_OVF_I8:
11494                 lclTyp = TYP_LONG;
11495                 goto CONV_OVF;
11496
11497             case CEE_CONV_OVF_U1:
11498                 lclTyp = TYP_UBYTE;
11499                 goto CONV_OVF;
11500             case CEE_CONV_OVF_U2:
11501                 lclTyp = TYP_CHAR;
11502                 goto CONV_OVF;
11503             case CEE_CONV_OVF_U:
11504                 lclTyp = TYP_U_IMPL;
11505                 goto CONV_OVF;
11506             case CEE_CONV_OVF_U4:
11507                 lclTyp = TYP_UINT;
11508                 goto CONV_OVF;
11509             case CEE_CONV_OVF_U8:
11510                 lclTyp = TYP_ULONG;
11511                 goto CONV_OVF;
11512
11513             case CEE_CONV_OVF_I1_UN:
11514                 lclTyp = TYP_BYTE;
11515                 goto CONV_OVF_UN;
11516             case CEE_CONV_OVF_I2_UN:
11517                 lclTyp = TYP_SHORT;
11518                 goto CONV_OVF_UN;
11519             case CEE_CONV_OVF_I_UN:
11520                 lclTyp = TYP_I_IMPL;
11521                 goto CONV_OVF_UN;
11522             case CEE_CONV_OVF_I4_UN:
11523                 lclTyp = TYP_INT;
11524                 goto CONV_OVF_UN;
11525             case CEE_CONV_OVF_I8_UN:
11526                 lclTyp = TYP_LONG;
11527                 goto CONV_OVF_UN;
11528
11529             case CEE_CONV_OVF_U1_UN:
11530                 lclTyp = TYP_UBYTE;
11531                 goto CONV_OVF_UN;
11532             case CEE_CONV_OVF_U2_UN:
11533                 lclTyp = TYP_CHAR;
11534                 goto CONV_OVF_UN;
11535             case CEE_CONV_OVF_U_UN:
11536                 lclTyp = TYP_U_IMPL;
11537                 goto CONV_OVF_UN;
11538             case CEE_CONV_OVF_U4_UN:
11539                 lclTyp = TYP_UINT;
11540                 goto CONV_OVF_UN;
11541             case CEE_CONV_OVF_U8_UN:
11542                 lclTyp = TYP_ULONG;
11543                 goto CONV_OVF_UN;
11544
11545             CONV_OVF_UN:
11546                 uns = true;
11547                 goto CONV_OVF_COMMON;
11548             CONV_OVF:
11549                 uns = false;
11550                 goto CONV_OVF_COMMON;
11551
11552             CONV_OVF_COMMON:
11553                 ovfl = true;
11554                 goto _CONV;
11555
11556             case CEE_CONV_I1:
11557                 lclTyp = TYP_BYTE;
11558                 goto CONV;
11559             case CEE_CONV_I2:
11560                 lclTyp = TYP_SHORT;
11561                 goto CONV;
11562             case CEE_CONV_I:
11563                 lclTyp = TYP_I_IMPL;
11564                 goto CONV;
11565             case CEE_CONV_I4:
11566                 lclTyp = TYP_INT;
11567                 goto CONV;
11568             case CEE_CONV_I8:
11569                 lclTyp = TYP_LONG;
11570                 goto CONV;
11571
11572             case CEE_CONV_U1:
11573                 lclTyp = TYP_UBYTE;
11574                 goto CONV;
11575             case CEE_CONV_U2:
11576                 lclTyp = TYP_CHAR;
11577                 goto CONV;
11578 #if (REGSIZE_BYTES == 8)
11579             case CEE_CONV_U:
11580                 lclTyp = TYP_U_IMPL;
11581                 goto CONV_UN;
11582 #else
11583             case CEE_CONV_U:
11584                 lclTyp = TYP_U_IMPL;
11585                 goto CONV;
11586 #endif
11587             case CEE_CONV_U4:
11588                 lclTyp = TYP_UINT;
11589                 goto CONV;
11590             case CEE_CONV_U8:
11591                 lclTyp = TYP_ULONG;
11592                 goto CONV_UN;
11593
11594             case CEE_CONV_R4:
11595                 lclTyp = TYP_FLOAT;
11596                 goto CONV;
11597             case CEE_CONV_R8:
11598                 lclTyp = TYP_DOUBLE;
11599                 goto CONV;
11600
11601             case CEE_CONV_R_UN:
11602                 lclTyp = TYP_DOUBLE;
11603                 goto CONV_UN;
11604
11605             CONV_UN:
11606                 uns  = true;
11607                 ovfl = false;
11608                 goto _CONV;
11609
11610             CONV:
11611                 uns  = false;
11612                 ovfl = false;
11613                 goto _CONV;
11614
11615             _CONV:
11616                 // just check that we have a number on the stack
11617                 if (tiVerificationNeeded)
11618                 {
11619                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11620                     Verify(tiVal.IsNumberType(), "bad arg");
11621
11622 #ifdef _TARGET_64BIT_
11623                     bool isNative = false;
11624
11625                     switch (opcode)
11626                     {
11627                         case CEE_CONV_OVF_I:
11628                         case CEE_CONV_OVF_I_UN:
11629                         case CEE_CONV_I:
11630                         case CEE_CONV_OVF_U:
11631                         case CEE_CONV_OVF_U_UN:
11632                         case CEE_CONV_U:
11633                             isNative = true;
11634                         default:
11635                             // leave 'isNative' = false;
11636                             break;
11637                     }
11638                     if (isNative)
11639                     {
11640                         tiRetVal = typeInfo::nativeInt();
11641                     }
11642                     else
11643 #endif // _TARGET_64BIT_
11644                     {
11645                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11646                     }
11647                 }
11648
11649                 // only converts from FLOAT or DOUBLE to an integer type
11650                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11651
11652                 if (varTypeIsFloating(lclTyp))
11653                 {
11654                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11655 #ifdef _TARGET_64BIT_
11656                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11657                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11658                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11659                                // and generate SSE2 code instead of going through helper calls.
11660                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11661 #endif
11662                         ;
11663                 }
11664                 else
11665                 {
11666                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11667                 }
11668
11669                 // At this point uns, ovf, callNode all set
11670
11671                 op1 = impPopStack().val;
11672                 impBashVarAddrsToI(op1);
11673
11674                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11675                 {
11676                     op2 = op1->gtOp.gtOp2;
11677
11678                     if (op2->gtOper == GT_CNS_INT)
11679                     {
11680                         ssize_t ival = op2->gtIntCon.gtIconVal;
11681                         ssize_t mask, umask;
11682
11683                         switch (lclTyp)
11684                         {
11685                             case TYP_BYTE:
11686                             case TYP_UBYTE:
11687                                 mask  = 0x00FF;
11688                                 umask = 0x007F;
11689                                 break;
11690                             case TYP_CHAR:
11691                             case TYP_SHORT:
11692                                 mask  = 0xFFFF;
11693                                 umask = 0x7FFF;
11694                                 break;
11695
11696                             default:
11697                                 assert(!"unexpected type");
11698                                 return;
11699                         }
11700
11701                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11702                         {
11703                             /* Toss the cast, it's a waste of time */
11704
11705                             impPushOnStack(op1, tiRetVal);
11706                             break;
11707                         }
11708                         else if (ival == mask)
11709                         {
11710                             /* Toss the masking, it's a waste of time, since
11711                                we sign-extend from the small value anyways */
11712
11713                             op1 = op1->gtOp.gtOp1;
11714                         }
11715                     }
11716                 }
11717
11718                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11719                     since the result of a cast to one of the 'small' integer
11720                     types is an integer.
11721                  */
11722
11723                 type = genActualType(lclTyp);
11724
11725 #if SMALL_TREE_NODES
11726                 if (callNode)
11727                 {
11728                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11729                 }
11730                 else
11731 #endif // SMALL_TREE_NODES
11732                 {
11733                     op1 = gtNewCastNode(type, op1, lclTyp);
11734                 }
11735
11736                 if (ovfl)
11737                 {
11738                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11739                 }
11740                 if (uns)
11741                 {
11742                     op1->gtFlags |= GTF_UNSIGNED;
11743                 }
11744                 impPushOnStack(op1, tiRetVal);
11745                 break;
11746
11747             case CEE_NEG:
11748                 if (tiVerificationNeeded)
11749                 {
11750                     tiRetVal = impStackTop().seTypeInfo;
11751                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11752                 }
11753
11754                 op1 = impPopStack().val;
11755                 impBashVarAddrsToI(op1, nullptr);
11756                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11757                 break;
11758
11759             case CEE_POP:
11760                 if (tiVerificationNeeded)
11761                 {
11762                     impStackTop(0);
11763                 }
11764
11765                 /* Pull the top value from the stack */
11766
11767                 op1 = impPopStack(clsHnd).val;
11768
11769                 /* Get hold of the type of the value being duplicated */
11770
11771                 lclTyp = genActualType(op1->gtType);
11772
11773                 /* Does the value have any side effects? */
11774
11775                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11776                 {
11777                     // Since we are throwing away the value, just normalize
11778                     // it to its address.  This is more efficient.
11779
11780                     if (varTypeIsStruct(op1))
11781                     {
11782 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11783                         // Non-calls, such as obj or ret_expr, have to go through this.
11784                         // Calls with large struct return value have to go through this.
11785                         // Helper calls with small struct return value also have to go
11786                         // through this since they do not follow Unix calling convention.
11787                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11788                             op1->AsCall()->gtCallType == CT_HELPER)
11789 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11790                         {
11791                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11792                         }
11793                     }
11794
11795                     // If op1 is non-overflow cast, throw it away since it is useless.
11796                     // Another reason for throwing away the useless cast is in the context of
11797                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11798                     // The cast gets added as part of importing GT_CALL, which gets in the way
11799                     // of fgMorphCall() on the forms of tail call nodes that we assert.
11800                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11801                     {
11802                         op1 = op1->gtOp.gtOp1;
11803                     }
11804
11805                     // If 'op1' is an expression, create an assignment node.
11806                     // Helps analyses (like CSE) to work fine.
11807
11808                     if (op1->gtOper != GT_CALL)
11809                     {
11810                         op1 = gtUnusedValNode(op1);
11811                     }
11812
11813                     /* Append the value to the tree list */
11814                     goto SPILL_APPEND;
11815                 }
11816
11817                 /* No side effects - just throw the <BEEP> thing away */
11818                 break;
11819
11820             case CEE_DUP:
11821
11822                 if (tiVerificationNeeded)
11823                 {
11824                     // Dup could start the begining of delegate creation sequence, remember that
11825                     delegateCreateStart = codeAddr - 1;
11826                     impStackTop(0);
11827                 }
11828
11829                 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11830                 // - If this is non-debug code - so that CSE will recognize the two as equal.
11831                 //   This helps eliminate a redundant bounds check in cases such as:
11832                 //       ariba[i+3] += some_value;
11833                 // - If the top of the stack is a non-leaf that may be expensive to clone.
11834
11835                 if (codeAddr < codeEndp)
11836                 {
11837                     OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11838                     if (impIsAnySTLOC(nextOpcode))
11839                     {
11840                         if (!opts.compDbgCode)
11841                         {
11842                             insertLdloc = true;
11843                             break;
11844                         }
11845                         GenTree* stackTop = impStackTop().val;
11846                         if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11847                         {
11848                             insertLdloc = true;
11849                             break;
11850                         }
11851                     }
11852                 }
11853
11854                 /* Pull the top value from the stack */
11855                 op1 = impPopStack(tiRetVal);
11856
11857                 /* Clone the value */
11858                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11859                                    nullptr DEBUGARG("DUP instruction"));
11860
11861                 /* Either the tree started with no global effects, or impCloneExpr
11862                    evaluated the tree to a temp and returned two copies of that
11863                    temp. Either way, neither op1 nor op2 should have side effects.
11864                 */
11865                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11866
11867                 /* Push the tree/temp back on the stack */
11868                 impPushOnStack(op1, tiRetVal);
11869
11870                 /* Push the copy on the stack */
11871                 impPushOnStack(op2, tiRetVal);
11872
11873                 break;
11874
11875             case CEE_STIND_I1:
11876                 lclTyp = TYP_BYTE;
11877                 goto STIND;
11878             case CEE_STIND_I2:
11879                 lclTyp = TYP_SHORT;
11880                 goto STIND;
11881             case CEE_STIND_I4:
11882                 lclTyp = TYP_INT;
11883                 goto STIND;
11884             case CEE_STIND_I8:
11885                 lclTyp = TYP_LONG;
11886                 goto STIND;
11887             case CEE_STIND_I:
11888                 lclTyp = TYP_I_IMPL;
11889                 goto STIND;
11890             case CEE_STIND_REF:
11891                 lclTyp = TYP_REF;
11892                 goto STIND;
11893             case CEE_STIND_R4:
11894                 lclTyp = TYP_FLOAT;
11895                 goto STIND;
11896             case CEE_STIND_R8:
11897                 lclTyp = TYP_DOUBLE;
11898                 goto STIND;
11899             STIND:
11900
11901                 if (tiVerificationNeeded)
11902                 {
11903                     typeInfo instrType(lclTyp);
11904 #ifdef _TARGET_64BIT_
11905                     if (opcode == CEE_STIND_I)
11906                     {
11907                         instrType = typeInfo::nativeInt();
11908                     }
11909 #endif // _TARGET_64BIT_
11910                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11911                 }
11912                 else
11913                 {
11914                     compUnsafeCastUsed = true; // Have to go conservative
11915                 }
11916
11917             STIND_POST_VERIFY:
11918
11919                 op2 = impPopStack().val; // value to store
11920                 op1 = impPopStack().val; // address to store to
11921
11922                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11923                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11924
11925                 impBashVarAddrsToI(op1, op2);
11926
11927                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11928
11929 #ifdef _TARGET_64BIT_
11930                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11931                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11932                 {
11933                     op2->gtType = TYP_I_IMPL;
11934                 }
11935                 else
11936                 {
11937                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11938                     //
11939                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
11940                     {
11941                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11942                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
11943                     }
11944                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
11945                     //
11946                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
11947                     {
11948                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11949                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
11950                     }
11951                 }
11952 #endif // _TARGET_64BIT_
11953
11954                 if (opcode == CEE_STIND_REF)
11955                 {
11956                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
11957                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
11958                     lclTyp = genActualType(op2->TypeGet());
11959                 }
11960
11961 // Check target type.
11962 #ifdef DEBUG
11963                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
11964                 {
11965                     if (op2->gtType == TYP_BYREF)
11966                     {
11967                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
11968                     }
11969                     else if (lclTyp == TYP_BYREF)
11970                     {
11971                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
11972                     }
11973                 }
11974                 else
11975                 {
11976                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
11977                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
11978                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
11979                 }
11980 #endif
11981
11982                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
11983
11984                 // stind could point anywhere, example a boxed class static int
11985                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
11986
11987                 if (prefixFlags & PREFIX_VOLATILE)
11988                 {
11989                     assert(op1->OperGet() == GT_IND);
11990                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
11991                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
11992                     op1->gtFlags |= GTF_IND_VOLATILE;
11993                 }
11994
11995                 if (prefixFlags & PREFIX_UNALIGNED)
11996                 {
11997                     assert(op1->OperGet() == GT_IND);
11998                     op1->gtFlags |= GTF_IND_UNALIGNED;
11999                 }
12000
12001                 op1 = gtNewAssignNode(op1, op2);
12002                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12003
12004                 // Spill side-effects AND global-data-accesses
12005                 if (verCurrentState.esStackDepth > 0)
12006                 {
12007                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12008                 }
12009
12010                 goto APPEND;
12011
12012             case CEE_LDIND_I1:
12013                 lclTyp = TYP_BYTE;
12014                 goto LDIND;
12015             case CEE_LDIND_I2:
12016                 lclTyp = TYP_SHORT;
12017                 goto LDIND;
12018             case CEE_LDIND_U4:
12019             case CEE_LDIND_I4:
12020                 lclTyp = TYP_INT;
12021                 goto LDIND;
12022             case CEE_LDIND_I8:
12023                 lclTyp = TYP_LONG;
12024                 goto LDIND;
12025             case CEE_LDIND_REF:
12026                 lclTyp = TYP_REF;
12027                 goto LDIND;
12028             case CEE_LDIND_I:
12029                 lclTyp = TYP_I_IMPL;
12030                 goto LDIND;
12031             case CEE_LDIND_R4:
12032                 lclTyp = TYP_FLOAT;
12033                 goto LDIND;
12034             case CEE_LDIND_R8:
12035                 lclTyp = TYP_DOUBLE;
12036                 goto LDIND;
12037             case CEE_LDIND_U1:
12038                 lclTyp = TYP_UBYTE;
12039                 goto LDIND;
12040             case CEE_LDIND_U2:
12041                 lclTyp = TYP_CHAR;
12042                 goto LDIND;
12043             LDIND:
12044
12045                 if (tiVerificationNeeded)
12046                 {
12047                     typeInfo lclTiType(lclTyp);
12048 #ifdef _TARGET_64BIT_
12049                     if (opcode == CEE_LDIND_I)
12050                     {
12051                         lclTiType = typeInfo::nativeInt();
12052                     }
12053 #endif // _TARGET_64BIT_
12054                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12055                     tiRetVal.NormaliseForStack();
12056                 }
12057                 else
12058                 {
12059                     compUnsafeCastUsed = true; // Have to go conservative
12060                 }
12061
12062             LDIND_POST_VERIFY:
12063
12064                 op1 = impPopStack().val; // address to load from
12065                 impBashVarAddrsToI(op1);
12066
12067 #ifdef _TARGET_64BIT_
12068                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12069                 //
12070                 if (genActualType(op1->gtType) == TYP_INT)
12071                 {
12072                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12073                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12074                 }
12075 #endif
12076
12077                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12078
12079                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12080
12081                 // ldind could point anywhere, example a boxed class static int
12082                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12083
12084                 if (prefixFlags & PREFIX_VOLATILE)
12085                 {
12086                     assert(op1->OperGet() == GT_IND);
12087                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12088                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12089                     op1->gtFlags |= GTF_IND_VOLATILE;
12090                 }
12091
12092                 if (prefixFlags & PREFIX_UNALIGNED)
12093                 {
12094                     assert(op1->OperGet() == GT_IND);
12095                     op1->gtFlags |= GTF_IND_UNALIGNED;
12096                 }
12097
12098                 impPushOnStack(op1, tiRetVal);
12099
12100                 break;
12101
12102             case CEE_UNALIGNED:
12103
12104                 assert(sz == 1);
12105                 val = getU1LittleEndian(codeAddr);
12106                 ++codeAddr;
12107                 JITDUMP(" %u", val);
12108                 if ((val != 1) && (val != 2) && (val != 4))
12109                 {
12110                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12111                 }
12112
12113                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12114                 prefixFlags |= PREFIX_UNALIGNED;
12115
12116                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12117
12118             PREFIX:
12119                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12120                 codeAddr += sizeof(__int8);
12121                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12122                 goto DECODE_OPCODE;
12123
12124             case CEE_VOLATILE:
12125
12126                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12127                 prefixFlags |= PREFIX_VOLATILE;
12128
12129                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12130
12131                 assert(sz == 0);
12132                 goto PREFIX;
12133
12134             case CEE_LDFTN:
12135             {
12136                 // Need to do a lookup here so that we perform an access check
12137                 // and do a NOWAY if protections are violated
12138                 _impResolveToken(CORINFO_TOKENKIND_Method);
12139
12140                 JITDUMP(" %08X", resolvedToken.token);
12141
12142                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12143                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12144                               &callInfo);
12145
12146                 // This check really only applies to intrinsic Array.Address methods
12147                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12148                 {
12149                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12150                 }
12151
12152                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12153                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12154
12155                 if (tiVerificationNeeded)
12156                 {
12157                     // LDFTN could start the begining of delegate creation sequence, remember that
12158                     delegateCreateStart = codeAddr - 2;
12159
12160                     // check any constraints on the callee's class and type parameters
12161                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12162                                    "method has unsatisfied class constraints");
12163                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12164                                                                                 resolvedToken.hMethod),
12165                                    "method has unsatisfied method constraints");
12166
12167                     mflags = callInfo.verMethodFlags;
12168                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12169                 }
12170
12171             DO_LDFTN:
12172                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12173                 if (compDonotInline())
12174                 {
12175                     return;
12176                 }
12177
12178                 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12179
12180                 break;
12181             }
12182
12183             case CEE_LDVIRTFTN:
12184             {
12185                 /* Get the method token */
12186
12187                 _impResolveToken(CORINFO_TOKENKIND_Method);
12188
12189                 JITDUMP(" %08X", resolvedToken.token);
12190
12191                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12192                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12193                                                     CORINFO_CALLINFO_CALLVIRT)),
12194                               &callInfo);
12195
12196                 // This check really only applies to intrinsic Array.Address methods
12197                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12198                 {
12199                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12200                 }
12201
12202                 mflags = callInfo.methodFlags;
12203
12204                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12205
12206                 if (compIsForInlining())
12207                 {
12208                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12209                     {
12210                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12211                         return;
12212                     }
12213                 }
12214
12215                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12216
12217                 if (tiVerificationNeeded)
12218                 {
12219
12220                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12221                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12222
12223                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12224                     typeInfo declType =
12225                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12226
12227                     typeInfo arg = impStackTop().seTypeInfo;
12228                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12229                            "bad ldvirtftn");
12230
12231                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12232                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12233                     {
12234                         instanceClassHnd = arg.GetClassHandleForObjRef();
12235                     }
12236
12237                     // check any constraints on the method's class and type parameters
12238                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12239                                    "method has unsatisfied class constraints");
12240                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12241                                                                                 resolvedToken.hMethod),
12242                                    "method has unsatisfied method constraints");
12243
12244                     if (mflags & CORINFO_FLG_PROTECTED)
12245                     {
12246                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12247                                "Accessing protected method through wrong type.");
12248                     }
12249                 }
12250
12251                 /* Get the object-ref */
12252                 op1 = impPopStack().val;
12253                 assertImp(op1->gtType == TYP_REF);
12254
12255                 if (opts.IsReadyToRun())
12256                 {
12257                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12258                     {
12259                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12260                         {
12261                             op1 = gtUnusedValNode(op1);
12262                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12263                         }
12264                         goto DO_LDFTN;
12265                     }
12266                 }
12267                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12268                 {
12269                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12270                     {
12271                         op1 = gtUnusedValNode(op1);
12272                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12273                     }
12274                     goto DO_LDFTN;
12275                 }
12276
12277                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12278                 if (compDonotInline())
12279                 {
12280                     return;
12281                 }
12282
12283                 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12284
12285                 break;
12286             }
12287
12288             case CEE_CONSTRAINED:
12289
12290                 assertImp(sz == sizeof(unsigned));
12291                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12292                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12293                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12294
12295                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12296                 prefixFlags |= PREFIX_CONSTRAINED;
12297
12298                 {
12299                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12300                     if (actualOpcode != CEE_CALLVIRT)
12301                     {
12302                         BADCODE("constrained. has to be followed by callvirt");
12303                     }
12304                 }
12305
12306                 goto PREFIX;
12307
12308             case CEE_READONLY:
12309                 JITDUMP(" readonly.");
12310
12311                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12312                 prefixFlags |= PREFIX_READONLY;
12313
12314                 {
12315                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12316                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12317                     {
12318                         BADCODE("readonly. has to be followed by ldelema or call");
12319                     }
12320                 }
12321
12322                 assert(sz == 0);
12323                 goto PREFIX;
12324
12325             case CEE_TAILCALL:
12326                 JITDUMP(" tail.");
12327
12328                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12329                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12330
12331                 {
12332                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12333                     if (!impOpcodeIsCallOpcode(actualOpcode))
12334                     {
12335                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12336                     }
12337                 }
12338                 assert(sz == 0);
12339                 goto PREFIX;
12340
12341             case CEE_NEWOBJ:
12342
12343                 /* Since we will implicitly insert newObjThisPtr at the start of the
12344                    argument list, spill any GTF_ORDER_SIDEEFF */
12345                 impSpillSpecialSideEff();
12346
12347                 /* NEWOBJ does not respond to TAIL */
12348                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12349
12350                 /* NEWOBJ does not respond to CONSTRAINED */
12351                 prefixFlags &= ~PREFIX_CONSTRAINED;
12352
12353 #if COR_JIT_EE_VERSION > 460
12354                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12355 #else
12356                 _impResolveToken(CORINFO_TOKENKIND_Method);
12357 #endif
12358
12359                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12360                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12361                               &callInfo);
12362
12363                 if (compIsForInlining())
12364                 {
12365                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12366                     {
12367                         // Check to see if this call violates the boundary.
12368                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12369                         return;
12370                     }
12371                 }
12372
12373                 mflags = callInfo.methodFlags;
12374
12375                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12376                 {
12377                     BADCODE("newobj on static or abstract method");
12378                 }
12379
12380                 // Insert the security callout before any actual code is generated
12381                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12382
12383                 // There are three different cases for new
12384                 // Object size is variable (depends on arguments)
12385                 //      1) Object is an array (arrays treated specially by the EE)
12386                 //      2) Object is some other variable sized object (e.g. String)
12387                 //      3) Class Size can be determined beforehand (normal case)
12388                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12389                 // in the second case we call the constructor with a '0' this pointer
12390                 // In the third case we alloc the memory, then call the constuctor
12391
12392                 clsFlags = callInfo.classFlags;
12393                 if (clsFlags & CORINFO_FLG_ARRAY)
12394                 {
12395                     if (tiVerificationNeeded)
12396                     {
12397                         CORINFO_CLASS_HANDLE elemTypeHnd;
12398                         INDEBUG(CorInfoType corType =)
12399                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12400                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12401                         Verify(elemTypeHnd == nullptr ||
12402                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12403                                "newarr of byref-like objects");
12404                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12405                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12406                                       &callInfo DEBUGARG(info.compFullName));
12407                     }
12408                     // Arrays need to call the NEWOBJ helper.
12409                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12410
12411                     impImportNewObjArray(&resolvedToken, &callInfo);
12412                     if (compDonotInline())
12413                     {
12414                         return;
12415                     }
12416
12417                     callTyp = TYP_REF;
12418                     break;
12419                 }
12420                 // At present this can only be String
12421                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12422                 {
12423                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12424                     {
12425                         // The dummy argument does not exist in CoreRT
12426                         newObjThisPtr = nullptr;
12427                     }
12428                     else
12429                     {
12430                         // This is the case for variable-sized objects that are not
12431                         // arrays.  In this case, call the constructor with a null 'this'
12432                         // pointer
12433                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12434                     }
12435
12436                     /* Remember that this basic block contains 'new' of an object */
12437                     block->bbFlags |= BBF_HAS_NEWOBJ;
12438                     optMethodFlags |= OMF_HAS_NEWOBJ;
12439                 }
12440                 else
12441                 {
12442                     // This is the normal case where the size of the object is
12443                     // fixed.  Allocate the memory and call the constructor.
12444
12445                     // Note: We cannot add a peep to avoid use of temp here
12446                     // becase we don't have enough interference info to detect when
12447                     // sources and destination interfere, example: s = new S(ref);
12448
12449                     // TODO: We find the correct place to introduce a general
12450                     // reverse copy prop for struct return values from newobj or
12451                     // any function returning structs.
12452
12453                     /* get a temporary for the new object */
12454                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12455
12456                     // In the value class case we only need clsHnd for size calcs.
12457                     //
12458                     // The lookup of the code pointer will be handled by CALL in this case
12459                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12460                     {
12461                         if (compIsForInlining())
12462                         {
12463                             // If value class has GC fields, inform the inliner. It may choose to
12464                             // bail out on the inline.
12465                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12466                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12467                             {
12468                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12469                                 if (compInlineResult->IsFailure())
12470                                 {
12471                                     return;
12472                                 }
12473
12474                                 // Do further notification in the case where the call site is rare;
12475                                 // some policies do not track the relative hotness of call sites for
12476                                 // "always" inline cases.
12477                                 if (impInlineInfo->iciBlock->isRunRarely())
12478                                 {
12479                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12480                                     if (compInlineResult->IsFailure())
12481                                     {
12482                                         return;
12483                                     }
12484                                 }
12485                             }
12486                         }
12487
12488                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12489                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12490
12491                         if (impIsPrimitive(jitTyp))
12492                         {
12493                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12494                         }
12495                         else
12496                         {
12497                             // The local variable itself is the allocated space.
12498                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12499                             // and potentially exploitable.
12500                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12501                         }
12502
12503                         // Append a tree to zero-out the temp
12504                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12505
12506                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12507                                                        gtNewIconNode(0), // Value
12508                                                        size,             // Size
12509                                                        false,            // isVolatile
12510                                                        false);           // not copyBlock
12511                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12512
12513                         // Obtain the address of the temp
12514                         newObjThisPtr =
12515                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12516                     }
12517                     else
12518                     {
12519 #ifdef FEATURE_READYTORUN_COMPILER
12520                         if (opts.IsReadyToRun())
12521                         {
12522                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12523                             usingReadyToRunHelper = (op1 != nullptr);
12524                         }
12525
12526                         if (!usingReadyToRunHelper)
12527 #endif
12528                         {
12529                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12530                             if (op1 == nullptr)
12531                             { // compDonotInline()
12532                                 return;
12533                             }
12534
12535                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12536                             // and the newfast call with a single call to a dynamic R2R cell that will:
12537                             //      1) Load the context
12538                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12539                             //      stub
12540                             //      3) Allocate and return the new object
12541                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12542
12543                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12544                                                     resolvedToken.hClass, TYP_REF, op1);
12545                         }
12546
12547                         // Remember that this basic block contains 'new' of an object
12548                         block->bbFlags |= BBF_HAS_NEWOBJ;
12549                         optMethodFlags |= OMF_HAS_NEWOBJ;
12550
12551                         // Append the assignment to the temp/local. Dont need to spill
12552                         // at all as we are just calling an EE-Jit helper which can only
12553                         // cause an (async) OutOfMemoryException.
12554
12555                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12556                         // to a temp. Note that the pattern "temp = allocObj" is required
12557                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12558                         // without exhaustive walk over all expressions.
12559
12560                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12561
12562                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12563                     }
12564                 }
12565                 goto CALL;
12566
12567             case CEE_CALLI:
12568
12569                 /* CALLI does not respond to CONSTRAINED */
12570                 prefixFlags &= ~PREFIX_CONSTRAINED;
12571
12572                 if (compIsForInlining())
12573                 {
12574                     // CALLI doesn't have a method handle, so assume the worst.
12575                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12576                     {
12577                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12578                         return;
12579                     }
12580                 }
12581
12582             // fall through
12583
12584             case CEE_CALLVIRT:
12585             case CEE_CALL:
12586
12587                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12588                 // many other places.  We unfortunately embed that knowledge here.
12589                 if (opcode != CEE_CALLI)
12590                 {
12591                     _impResolveToken(CORINFO_TOKENKIND_Method);
12592
12593                     eeGetCallInfo(&resolvedToken,
12594                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12595                                   // this is how impImportCall invokes getCallInfo
12596                                   addVerifyFlag(
12597                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12598                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12599                                                                        : CORINFO_CALLINFO_NONE)),
12600                                   &callInfo);
12601                 }
12602                 else
12603                 {
12604                     // Suppress uninitialized use warning.
12605                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12606                     memset(&callInfo, 0, sizeof(callInfo));
12607
12608                     resolvedToken.token = getU4LittleEndian(codeAddr);
12609                 }
12610
12611             CALL: // memberRef should be set.
12612                 // newObjThisPtr should be set for CEE_NEWOBJ
12613
12614                 JITDUMP(" %08X", resolvedToken.token);
12615                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12616
12617                 bool newBBcreatedForTailcallStress;
12618
12619                 newBBcreatedForTailcallStress = false;
12620
12621                 if (compIsForInlining())
12622                 {
12623                     if (compDonotInline())
12624                     {
12625                         return;
12626                     }
12627                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12628                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12629                 }
12630                 else
12631                 {
12632                     if (compTailCallStress())
12633                     {
12634                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12635                         // Tail call stress only recognizes call+ret patterns and forces them to be
12636                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12637                         // doesn't import 'ret' opcode following the call into the basic block containing
12638                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12639                         // is already checking that there is an opcode following call and hence it is
12640                         // safe here to read next opcode without bounds check.
12641                         newBBcreatedForTailcallStress =
12642                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12643                                                              // make it jump to RET.
12644                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12645
12646                         if (newBBcreatedForTailcallStress &&
12647                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12648                             verCheckTailCallConstraint(opcode, &resolvedToken,
12649                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12650                                                        true) // Is it legal to do talcall?
12651                             )
12652                         {
12653                             // Stress the tailcall.
12654                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12655                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12656                         }
12657                     }
12658
12659                     // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12660                     // hence will not be considered for implicit tail calling.
12661                     bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12662                     if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12663                     {
12664                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12665                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12666                     }
12667                 }
12668
12669                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12670                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12671                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12672
12673                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12674                 {
12675                     // All calls and delegates need a security callout.
12676                     // For delegates, this is the call to the delegate constructor, not the access check on the
12677                     // LD(virt)FTN.
12678                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12679
12680 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12681      
12682                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12683                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12684                 // ldtoken <filed token>, and we now check accessibility
12685                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12686                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12687                 {
12688                     if (prevOpcode != CEE_LDTOKEN)
12689                     {
12690                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12691                     }
12692                     else
12693                     {
12694                         assert(lastLoadToken != NULL);
12695                         // Now that we know we have a token, verify that it is accessible for loading
12696                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12697                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12698                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12699                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12700                     }
12701                 }
12702
12703 #endif // DevDiv 410397
12704                 }
12705
12706                 if (tiVerificationNeeded)
12707                 {
12708                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12709                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12710                                   &callInfo DEBUGARG(info.compFullName));
12711                 }
12712
12713                 // Insert delegate callout here.
12714                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12715                 {
12716 #ifdef DEBUG
12717                     // We should do this only if verification is enabled
12718                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12719                     if (tiVerificationNeeded)
12720                     {
12721                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12722                         // We should get here only for well formed delegate creation.
12723                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12724                     }
12725 #endif
12726
12727 #ifdef FEATURE_CORECLR
12728                     // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12729                     typeInfo              tiActualFtn          = impStackTop(0).seTypeInfo;
12730                     CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12731
12732                     impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12733 #endif // FEATURE_CORECLR
12734                 }
12735
12736                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12737                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12738                 if (compDonotInline())
12739                 {
12740                     return;
12741                 }
12742
12743                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12744                                                                        // have created a new BB after the "call"
12745                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12746                 {
12747                     assert(!compIsForInlining());
12748                     goto RET;
12749                 }
12750
12751                 break;
12752
12753             case CEE_LDFLD:
12754             case CEE_LDSFLD:
12755             case CEE_LDFLDA:
12756             case CEE_LDSFLDA:
12757             {
12758
12759                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12760                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12761
12762                 /* Get the CP_Fieldref index */
12763                 assertImp(sz == sizeof(unsigned));
12764
12765                 _impResolveToken(CORINFO_TOKENKIND_Field);
12766
12767                 JITDUMP(" %08X", resolvedToken.token);
12768
12769                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12770
12771                 GenTreePtr           obj     = nullptr;
12772                 typeInfo*            tiObj   = nullptr;
12773                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12774
12775                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12776                 {
12777                     tiObj = &impStackTop().seTypeInfo;
12778                     obj   = impPopStack(objType).val;
12779
12780                     if (impIsThis(obj))
12781                     {
12782                         aflags |= CORINFO_ACCESS_THIS;
12783
12784                         // An optimization for Contextful classes:
12785                         // we unwrap the proxy when we have a 'this reference'
12786
12787                         if (info.compUnwrapContextful)
12788                         {
12789                             aflags |= CORINFO_ACCESS_UNWRAP;
12790                         }
12791                     }
12792                 }
12793
12794                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12795
12796                 // Figure out the type of the member.  We always call canAccessField, so you always need this
12797                 // handle
12798                 CorInfoType ciType = fieldInfo.fieldType;
12799                 clsHnd             = fieldInfo.structType;
12800
12801                 lclTyp = JITtype2varType(ciType);
12802
12803 #ifdef _TARGET_AMD64
12804                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12805 #endif // _TARGET_AMD64
12806
12807                 if (compIsForInlining())
12808                 {
12809                     switch (fieldInfo.fieldAccessor)
12810                     {
12811                         case CORINFO_FIELD_INSTANCE_HELPER:
12812                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12813                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
12814                         case CORINFO_FIELD_STATIC_TLS:
12815
12816                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12817                             return;
12818
12819                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12820 #if COR_JIT_EE_VERSION > 460
12821                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12822 #endif
12823                             /* We may be able to inline the field accessors in specific instantiations of generic
12824                              * methods */
12825                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12826                             return;
12827
12828                         default:
12829                             break;
12830                     }
12831
12832                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12833                         clsHnd)
12834                     {
12835                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12836                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12837                         {
12838                             // Loading a static valuetype field usually will cause a JitHelper to be called
12839                             // for the static base. This will bloat the code.
12840                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12841
12842                             if (compInlineResult->IsFailure())
12843                             {
12844                                 return;
12845                             }
12846                         }
12847                     }
12848                 }
12849
12850                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12851                 if (isLoadAddress)
12852                 {
12853                     tiRetVal.MakeByRef();
12854                 }
12855                 else
12856                 {
12857                     tiRetVal.NormaliseForStack();
12858                 }
12859
12860                 // Perform this check always to ensure that we get field access exceptions even with
12861                 // SkipVerification.
12862                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12863
12864                 if (tiVerificationNeeded)
12865                 {
12866                     // You can also pass the unboxed struct to  LDFLD
12867                     BOOL bAllowPlainValueTypeAsThis = FALSE;
12868                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12869                     {
12870                         bAllowPlainValueTypeAsThis = TRUE;
12871                     }
12872
12873                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12874
12875                     // If we're doing this on a heap object or from a 'safe' byref
12876                     // then the result is a safe byref too
12877                     if (isLoadAddress) // load address
12878                     {
12879                         if (fieldInfo.fieldFlags &
12880                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12881                         {
12882                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12883                             {
12884                                 tiRetVal.SetIsPermanentHomeByRef();
12885                             }
12886                         }
12887                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12888                         {
12889                             // ldflda of byref is safe if done on a gc object or on  a
12890                             // safe byref
12891                             tiRetVal.SetIsPermanentHomeByRef();
12892                         }
12893                     }
12894                 }
12895                 else
12896                 {
12897                     // tiVerificationNeeded is false.
12898                     // Raise InvalidProgramException if static load accesses non-static field
12899                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12900                     {
12901                         BADCODE("static access on an instance field");
12902                     }
12903                 }
12904
12905                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12906                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12907                 {
12908                     if (obj->gtFlags & GTF_SIDE_EFFECT)
12909                     {
12910                         obj = gtUnusedValNode(obj);
12911                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12912                     }
12913                     obj = nullptr;
12914                 }
12915
12916                 /* Preserve 'small' int types */
12917                 if (lclTyp > TYP_INT)
12918                 {
12919                     lclTyp = genActualType(lclTyp);
12920                 }
12921
12922                 bool usesHelper = false;
12923
12924                 switch (fieldInfo.fieldAccessor)
12925                 {
12926                     case CORINFO_FIELD_INSTANCE:
12927 #ifdef FEATURE_READYTORUN_COMPILER
12928                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
12929 #endif
12930                     {
12931                         bool nullcheckNeeded = false;
12932
12933                         obj = impCheckForNullPointer(obj);
12934
12935                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12936                         {
12937                             nullcheckNeeded = true;
12938                         }
12939
12940                         // If the object is a struct, what we really want is
12941                         // for the field to operate on the address of the struct.
12942                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
12943                         {
12944                             assert(opcode == CEE_LDFLD && objType != nullptr);
12945
12946                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
12947                         }
12948
12949                         /* Create the data member node */
12950                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
12951
12952 #ifdef FEATURE_READYTORUN_COMPILER
12953                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
12954                         {
12955                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
12956                         }
12957 #endif
12958
12959                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
12960
12961                         if (fgAddrCouldBeNull(obj))
12962                         {
12963                             op1->gtFlags |= GTF_EXCEPT;
12964                         }
12965
12966                         // If gtFldObj is a BYREF then our target is a value class and
12967                         // it could point anywhere, example a boxed class static int
12968                         if (obj->gtType == TYP_BYREF)
12969                         {
12970                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
12971                         }
12972
12973                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12974                         if (StructHasOverlappingFields(typeFlags))
12975                         {
12976                             op1->gtField.gtFldMayOverlap = true;
12977                         }
12978
12979                         // wrap it in a address of operator if necessary
12980                         if (isLoadAddress)
12981                         {
12982                             op1 = gtNewOperNode(GT_ADDR,
12983                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
12984                         }
12985                         else
12986                         {
12987                             if (compIsForInlining() &&
12988                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
12989                                                                                    impInlineInfo->inlArgInfo))
12990                             {
12991                                 impInlineInfo->thisDereferencedFirst = true;
12992                             }
12993                         }
12994                     }
12995                     break;
12996
12997                     case CORINFO_FIELD_STATIC_TLS:
12998 #ifdef _TARGET_X86_
12999                         // Legacy TLS access is implemented as intrinsic on x86 only
13000
13001                         /* Create the data member node */
13002                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13003                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13004
13005                         if (isLoadAddress)
13006                         {
13007                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13008                         }
13009                         break;
13010 #else
13011                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13012
13013                         __fallthrough;
13014 #endif
13015
13016                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13017                     case CORINFO_FIELD_INSTANCE_HELPER:
13018                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13019                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13020                                                clsHnd, nullptr);
13021                         usesHelper = true;
13022                         break;
13023
13024                     case CORINFO_FIELD_STATIC_ADDRESS:
13025                         // Replace static read-only fields with constant if possible
13026                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13027                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13028                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13029                         {
13030                             CorInfoInitClassResult initClassResult =
13031                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13032                                                             impTokenLookupContextHandle);
13033
13034                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13035                             {
13036                                 void** pFldAddr = nullptr;
13037                                 void*  fldAddr =
13038                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13039
13040                                 // We should always be able to access this static's address directly
13041                                 assert(pFldAddr == nullptr);
13042
13043                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13044                                 goto FIELD_DONE;
13045                             }
13046                         }
13047
13048                         __fallthrough;
13049
13050                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13051                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13052                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13053 #if COR_JIT_EE_VERSION > 460
13054                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13055 #endif
13056                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13057                                                          lclTyp);
13058                         break;
13059
13060                     case CORINFO_FIELD_INTRINSIC_ZERO:
13061                     {
13062                         assert(aflags & CORINFO_ACCESS_GET);
13063                         op1 = gtNewIconNode(0, lclTyp);
13064                         goto FIELD_DONE;
13065                     }
13066                     break;
13067
13068                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13069                     {
13070                         assert(aflags & CORINFO_ACCESS_GET);
13071
13072                         LPVOID         pValue;
13073                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13074                         op1                = gtNewStringLiteralNode(iat, pValue);
13075                         goto FIELD_DONE;
13076                     }
13077                     break;
13078
13079                     default:
13080                         assert(!"Unexpected fieldAccessor");
13081                 }
13082
13083                 if (!isLoadAddress)
13084                 {
13085
13086                     if (prefixFlags & PREFIX_VOLATILE)
13087                     {
13088                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13089                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13090
13091                         if (!usesHelper)
13092                         {
13093                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13094                                    (op1->OperGet() == GT_OBJ));
13095                             op1->gtFlags |= GTF_IND_VOLATILE;
13096                         }
13097                     }
13098
13099                     if (prefixFlags & PREFIX_UNALIGNED)
13100                     {
13101                         if (!usesHelper)
13102                         {
13103                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13104                                    (op1->OperGet() == GT_OBJ));
13105                             op1->gtFlags |= GTF_IND_UNALIGNED;
13106                         }
13107                     }
13108                 }
13109
13110                 /* Check if the class needs explicit initialization */
13111
13112                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13113                 {
13114                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13115                     if (compDonotInline())
13116                     {
13117                         return;
13118                     }
13119                     if (helperNode != nullptr)
13120                     {
13121                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13122                     }
13123                 }
13124
13125             FIELD_DONE:
13126                 impPushOnStack(op1, tiRetVal);
13127             }
13128             break;
13129
13130             case CEE_STFLD:
13131             case CEE_STSFLD:
13132             {
13133
13134                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13135
13136                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13137
13138                 /* Get the CP_Fieldref index */
13139
13140                 assertImp(sz == sizeof(unsigned));
13141
13142                 _impResolveToken(CORINFO_TOKENKIND_Field);
13143
13144                 JITDUMP(" %08X", resolvedToken.token);
13145
13146                 int        aflags = CORINFO_ACCESS_SET;
13147                 GenTreePtr obj    = nullptr;
13148                 typeInfo*  tiObj  = nullptr;
13149                 typeInfo   tiVal;
13150
13151                 /* Pull the value from the stack */
13152                 op2    = impPopStack(tiVal);
13153                 clsHnd = tiVal.GetClassHandle();
13154
13155                 if (opcode == CEE_STFLD)
13156                 {
13157                     tiObj = &impStackTop().seTypeInfo;
13158                     obj   = impPopStack().val;
13159
13160                     if (impIsThis(obj))
13161                     {
13162                         aflags |= CORINFO_ACCESS_THIS;
13163
13164                         // An optimization for Contextful classes:
13165                         // we unwrap the proxy when we have a 'this reference'
13166
13167                         if (info.compUnwrapContextful)
13168                         {
13169                             aflags |= CORINFO_ACCESS_UNWRAP;
13170                         }
13171                     }
13172                 }
13173
13174                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13175
13176                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13177                 // handle
13178                 CorInfoType ciType = fieldInfo.fieldType;
13179                 fieldClsHnd        = fieldInfo.structType;
13180
13181                 lclTyp = JITtype2varType(ciType);
13182
13183                 if (compIsForInlining())
13184                 {
13185                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13186                      * per-inst static? */
13187
13188                     switch (fieldInfo.fieldAccessor)
13189                     {
13190                         case CORINFO_FIELD_INSTANCE_HELPER:
13191                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13192                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13193                         case CORINFO_FIELD_STATIC_TLS:
13194
13195                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13196                             return;
13197
13198                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13199 #if COR_JIT_EE_VERSION > 460
13200                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13201 #endif
13202
13203                             /* We may be able to inline the field accessors in specific instantiations of generic
13204                              * methods */
13205                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13206                             return;
13207
13208                         default:
13209                             break;
13210                     }
13211                 }
13212
13213                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13214
13215                 if (tiVerificationNeeded)
13216                 {
13217                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13218                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13219                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13220                 }
13221                 else
13222                 {
13223                     // tiVerificationNeed is false.
13224                     // Raise InvalidProgramException if static store accesses non-static field
13225                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13226                     {
13227                         BADCODE("static access on an instance field");
13228                     }
13229                 }
13230
13231                 // We are using stfld on a static field.
13232                 // We allow it, but need to eval any side-effects for obj
13233                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13234                 {
13235                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13236                     {
13237                         obj = gtUnusedValNode(obj);
13238                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13239                     }
13240                     obj = nullptr;
13241                 }
13242
13243                 /* Preserve 'small' int types */
13244                 if (lclTyp > TYP_INT)
13245                 {
13246                     lclTyp = genActualType(lclTyp);
13247                 }
13248
13249                 switch (fieldInfo.fieldAccessor)
13250                 {
13251                     case CORINFO_FIELD_INSTANCE:
13252 #ifdef FEATURE_READYTORUN_COMPILER
13253                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13254 #endif
13255                     {
13256                         obj = impCheckForNullPointer(obj);
13257
13258                         /* Create the data member node */
13259                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13260                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13261                         if (StructHasOverlappingFields(typeFlags))
13262                         {
13263                             op1->gtField.gtFldMayOverlap = true;
13264                         }
13265
13266 #ifdef FEATURE_READYTORUN_COMPILER
13267                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13268                         {
13269                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13270                         }
13271 #endif
13272
13273                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13274
13275                         if (fgAddrCouldBeNull(obj))
13276                         {
13277                             op1->gtFlags |= GTF_EXCEPT;
13278                         }
13279
13280                         // If gtFldObj is a BYREF then our target is a value class and
13281                         // it could point anywhere, example a boxed class static int
13282                         if (obj->gtType == TYP_BYREF)
13283                         {
13284                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13285                         }
13286
13287                         if (compIsForInlining() &&
13288                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13289                         {
13290                             impInlineInfo->thisDereferencedFirst = true;
13291                         }
13292                     }
13293                     break;
13294
13295                     case CORINFO_FIELD_STATIC_TLS:
13296 #ifdef _TARGET_X86_
13297                         // Legacy TLS access is implemented as intrinsic on x86 only
13298
13299                         /* Create the data member node */
13300                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13301                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13302
13303                         break;
13304 #else
13305                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13306
13307                         __fallthrough;
13308 #endif
13309
13310                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13311                     case CORINFO_FIELD_INSTANCE_HELPER:
13312                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13313                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13314                                                clsHnd, op2);
13315                         goto SPILL_APPEND;
13316
13317                     case CORINFO_FIELD_STATIC_ADDRESS:
13318                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13319                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13320                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13321 #if COR_JIT_EE_VERSION > 460
13322                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13323 #endif
13324                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13325                                                          lclTyp);
13326                         break;
13327
13328                     default:
13329                         assert(!"Unexpected fieldAccessor");
13330                 }
13331
13332                 // Create the member assignment, unless we have a struct.
13333                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13334                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13335
13336                 if (!deferStructAssign)
13337                 {
13338                     if (prefixFlags & PREFIX_VOLATILE)
13339                     {
13340                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13341                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13342                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13343                         op1->gtFlags |= GTF_IND_VOLATILE;
13344                     }
13345                     if (prefixFlags & PREFIX_UNALIGNED)
13346                     {
13347                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13348                         op1->gtFlags |= GTF_IND_UNALIGNED;
13349                     }
13350
13351                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13352                        trust
13353                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13354                        importation
13355                        and reads from the union as if it were a long during code generation. Though this can potentially
13356                        read garbage, one can get lucky to have this working correctly.
13357
13358                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13359                        /O2
13360                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13361                        on
13362                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13363                        correctly
13364                        always.
13365
13366                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13367                        V4.0.
13368                     */
13369                     CLANG_FORMAT_COMMENT_ANCHOR;
13370
13371 #ifdef _TARGET_X86_
13372                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13373                         varTypeIsLong(op1->TypeGet()))
13374                     {
13375                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13376                     }
13377 #endif
13378
13379 #ifdef _TARGET_64BIT_
13380                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13381                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13382                     {
13383                         op2->gtType = TYP_I_IMPL;
13384                     }
13385                     else
13386                     {
13387                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13388                         //
13389                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13390                         {
13391                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13392                         }
13393                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13394                         //
13395                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13396                         {
13397                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13398                         }
13399                     }
13400 #endif
13401
13402 #if !FEATURE_X87_DOUBLES
13403                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13404                     // We insert a cast to the dest 'op1' type
13405                     //
13406                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13407                         varTypeIsFloating(op2->gtType))
13408                     {
13409                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13410                     }
13411 #endif // !FEATURE_X87_DOUBLES
13412
13413                     op1 = gtNewAssignNode(op1, op2);
13414
13415                     /* Mark the expression as containing an assignment */
13416
13417                     op1->gtFlags |= GTF_ASG;
13418                 }
13419
13420                 /* Check if the class needs explicit initialization */
13421
13422                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13423                 {
13424                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13425                     if (compDonotInline())
13426                     {
13427                         return;
13428                     }
13429                     if (helperNode != nullptr)
13430                     {
13431                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13432                     }
13433                 }
13434
13435                 /* stfld can interfere with value classes (consider the sequence
13436                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13437                    spill all value class references from the stack. */
13438
13439                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13440                 {
13441                     assert(tiObj);
13442
13443                     if (impIsValueType(tiObj))
13444                     {
13445                         impSpillEvalStack();
13446                     }
13447                     else
13448                     {
13449                         impSpillValueClasses();
13450                     }
13451                 }
13452
13453                 /* Spill any refs to the same member from the stack */
13454
13455                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13456
13457                 /* stsfld also interferes with indirect accesses (for aliased
13458                    statics) and calls. But don't need to spill other statics
13459                    as we have explicitly spilled this particular static field. */
13460
13461                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13462
13463                 if (deferStructAssign)
13464                 {
13465                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13466                 }
13467             }
13468                 goto APPEND;
13469
13470             case CEE_NEWARR:
13471             {
13472
13473                 /* Get the class type index operand */
13474
13475                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13476
13477                 JITDUMP(" %08X", resolvedToken.token);
13478
13479                 if (!opts.IsReadyToRun())
13480                 {
13481                     // Need to restore array classes before creating array objects on the heap
13482                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13483                     if (op1 == nullptr)
13484                     { // compDonotInline()
13485                         return;
13486                     }
13487                 }
13488
13489                 if (tiVerificationNeeded)
13490                 {
13491                     // As per ECMA 'numElems' specified can be either int32 or native int.
13492                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13493
13494                     CORINFO_CLASS_HANDLE elemTypeHnd;
13495                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13496                     Verify(elemTypeHnd == nullptr ||
13497                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13498                            "array of byref-like type");
13499                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13500                 }
13501
13502                 accessAllowedResult =
13503                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13504                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13505
13506                 /* Form the arglist: array class handle, size */
13507                 op2 = impPopStack().val;
13508                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13509
13510 #ifdef FEATURE_READYTORUN_COMPILER
13511                 if (opts.IsReadyToRun())
13512                 {
13513                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13514                                                     gtNewArgList(op2));
13515                     usingReadyToRunHelper = (op1 != nullptr);
13516
13517                     if (!usingReadyToRunHelper)
13518                     {
13519                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13520                         // and the newarr call with a single call to a dynamic R2R cell that will:
13521                         //      1) Load the context
13522                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13523                         //      3) Allocate the new array
13524                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13525
13526                         // Need to restore array classes before creating array objects on the heap
13527                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13528                         if (op1 == nullptr)
13529                         { // compDonotInline()
13530                             return;
13531                         }
13532                     }
13533                 }
13534
13535                 if (!usingReadyToRunHelper)
13536 #endif
13537                 {
13538                     args = gtNewArgList(op1, op2);
13539
13540                     /* Create a call to 'new' */
13541
13542                     // Note that this only works for shared generic code because the same helper is used for all
13543                     // reference array types
13544                     op1 =
13545                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13546                 }
13547
13548                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13549
13550                 /* Remember that this basic block contains 'new' of an sd array */
13551
13552                 block->bbFlags |= BBF_HAS_NEWARRAY;
13553                 optMethodFlags |= OMF_HAS_NEWARRAY;
13554
13555                 /* Push the result of the call on the stack */
13556
13557                 impPushOnStack(op1, tiRetVal);
13558
13559                 callTyp = TYP_REF;
13560             }
13561             break;
13562
13563             case CEE_LOCALLOC:
13564                 assert(!compIsForInlining());
13565
13566                 if (tiVerificationNeeded)
13567                 {
13568                     Verify(false, "bad opcode");
13569                 }
13570
13571                 // We don't allow locallocs inside handlers
13572                 if (block->hasHndIndex())
13573                 {
13574                     BADCODE("Localloc can't be inside handler");
13575                 }
13576
13577                 /* The FP register may not be back to the original value at the end
13578                    of the method, even if the frame size is 0, as localloc may
13579                    have modified it. So we will HAVE to reset it */
13580
13581                 compLocallocUsed = true;
13582                 setNeedsGSSecurityCookie();
13583
13584                 // Get the size to allocate
13585
13586                 op2 = impPopStack().val;
13587                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13588
13589                 if (verCurrentState.esStackDepth != 0)
13590                 {
13591                     BADCODE("Localloc can only be used when the stack is empty");
13592                 }
13593
13594                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13595
13596                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13597
13598                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13599
13600                 impPushOnStack(op1, tiRetVal);
13601                 break;
13602
13603             case CEE_ISINST:
13604
13605                 /* Get the type token */
13606                 assertImp(sz == sizeof(unsigned));
13607
13608                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13609
13610                 JITDUMP(" %08X", resolvedToken.token);
13611
13612                 if (!opts.IsReadyToRun())
13613                 {
13614                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13615                     if (op2 == nullptr)
13616                     { // compDonotInline()
13617                         return;
13618                     }
13619                 }
13620
13621                 if (tiVerificationNeeded)
13622                 {
13623                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13624                     // Even if this is a value class, we know it is boxed.
13625                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13626                 }
13627                 accessAllowedResult =
13628                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13629                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13630
13631                 op1 = impPopStack().val;
13632
13633 #ifdef FEATURE_READYTORUN_COMPILER
13634                 if (opts.IsReadyToRun())
13635                 {
13636                     GenTreePtr opLookup =
13637                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13638                                                   gtNewArgList(op1));
13639                     usingReadyToRunHelper = (opLookup != nullptr);
13640                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13641
13642                     if (!usingReadyToRunHelper)
13643                     {
13644                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13645                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13646                         //      1) Load the context
13647                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13648                         //      3) Perform the 'is instance' check on the input object
13649                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13650
13651                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13652                         if (op2 == nullptr)
13653                         { // compDonotInline()
13654                             return;
13655                         }
13656                     }
13657                 }
13658
13659                 if (!usingReadyToRunHelper)
13660 #endif
13661                 {
13662                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13663                 }
13664                 if (compDonotInline())
13665                 {
13666                     return;
13667                 }
13668
13669                 impPushOnStack(op1, tiRetVal);
13670
13671                 break;
13672
13673             case CEE_REFANYVAL:
13674
13675                 // get the class handle and make a ICON node out of it
13676
13677                 _impResolveToken(CORINFO_TOKENKIND_Class);
13678
13679                 JITDUMP(" %08X", resolvedToken.token);
13680
13681                 op2 = impTokenToHandle(&resolvedToken);
13682                 if (op2 == nullptr)
13683                 { // compDonotInline()
13684                     return;
13685                 }
13686
13687                 if (tiVerificationNeeded)
13688                 {
13689                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13690                            "need refany");
13691                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13692                 }
13693
13694                 op1 = impPopStack().val;
13695                 // make certain it is normalized;
13696                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13697
13698                 // Call helper GETREFANY(classHandle, op1);
13699                 args = gtNewArgList(op2, op1);
13700                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13701
13702                 impPushOnStack(op1, tiRetVal);
13703                 break;
13704
13705             case CEE_REFANYTYPE:
13706
13707                 if (tiVerificationNeeded)
13708                 {
13709                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13710                            "need refany");
13711                 }
13712
13713                 op1 = impPopStack().val;
13714
13715                 // make certain it is normalized;
13716                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13717
13718                 if (op1->gtOper == GT_OBJ)
13719                 {
13720                     // Get the address of the refany
13721                     op1 = op1->gtOp.gtOp1;
13722
13723                     // Fetch the type from the correct slot
13724                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13725                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13726                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13727                 }
13728                 else
13729                 {
13730                     assertImp(op1->gtOper == GT_MKREFANY);
13731
13732                     // The pointer may have side-effects
13733                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13734                     {
13735                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13736 #ifdef DEBUG
13737                         impNoteLastILoffs();
13738 #endif
13739                     }
13740
13741                     // We already have the class handle
13742                     op1 = op1->gtOp.gtOp2;
13743                 }
13744
13745                 // convert native TypeHandle to RuntimeTypeHandle
13746                 {
13747                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13748
13749                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13750                                               helperArgs);
13751
13752                     // The handle struct is returned in register
13753                     op1->gtCall.gtReturnType = TYP_REF;
13754
13755                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13756                 }
13757
13758                 impPushOnStack(op1, tiRetVal);
13759                 break;
13760
13761             case CEE_LDTOKEN:
13762             {
13763                 /* Get the Class index */
13764                 assertImp(sz == sizeof(unsigned));
13765                 lastLoadToken = codeAddr;
13766                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13767
13768                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13769
13770                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13771                 if (op1 == nullptr)
13772                 { // compDonotInline()
13773                     return;
13774                 }
13775
13776                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13777                 assert(resolvedToken.hClass != nullptr);
13778
13779                 if (resolvedToken.hMethod != nullptr)
13780                 {
13781                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13782                 }
13783                 else if (resolvedToken.hField != nullptr)
13784                 {
13785                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13786                 }
13787
13788                 GenTreeArgList* helperArgs = gtNewArgList(op1);
13789
13790                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13791
13792                 // The handle struct is returned in register
13793                 op1->gtCall.gtReturnType = TYP_REF;
13794
13795                 tiRetVal = verMakeTypeInfo(tokenType);
13796                 impPushOnStack(op1, tiRetVal);
13797             }
13798             break;
13799
13800             case CEE_UNBOX:
13801             case CEE_UNBOX_ANY:
13802             {
13803                 /* Get the Class index */
13804                 assertImp(sz == sizeof(unsigned));
13805
13806                 _impResolveToken(CORINFO_TOKENKIND_Class);
13807
13808                 JITDUMP(" %08X", resolvedToken.token);
13809
13810                 BOOL runtimeLookup;
13811                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13812                 if (op2 == nullptr)
13813                 { // compDonotInline()
13814                     return;
13815                 }
13816
13817                 // Run this always so we can get access exceptions even with SkipVerification.
13818                 accessAllowedResult =
13819                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13820                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13821
13822                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13823                 {
13824                     if (tiVerificationNeeded)
13825                     {
13826                         typeInfo tiUnbox = impStackTop().seTypeInfo;
13827                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13828                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13829                         tiRetVal.NormaliseForStack();
13830                     }
13831                     op1 = impPopStack().val;
13832                     goto CASTCLASS;
13833                 }
13834
13835                 /* Pop the object and create the unbox helper call */
13836                 /* You might think that for UNBOX_ANY we need to push a different */
13837                 /* (non-byref) type, but here we're making the tiRetVal that is used */
13838                 /* for the intermediate pointer which we then transfer onto the OBJ */
13839                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
13840                 if (tiVerificationNeeded)
13841                 {
13842                     typeInfo tiUnbox = impStackTop().seTypeInfo;
13843                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13844
13845                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13846                     Verify(tiRetVal.IsValueClass(), "not value class");
13847                     tiRetVal.MakeByRef();
13848
13849                     // We always come from an objref, so this is safe byref
13850                     tiRetVal.SetIsPermanentHomeByRef();
13851                     tiRetVal.SetIsReadonlyByRef();
13852                 }
13853
13854                 op1 = impPopStack().val;
13855                 assertImp(op1->gtType == TYP_REF);
13856
13857                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13858                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13859
13860                 // We only want to expand inline the normal UNBOX helper;
13861                 expandInline = (helper == CORINFO_HELP_UNBOX);
13862
13863                 if (expandInline)
13864                 {
13865                     if (compCurBB->isRunRarely())
13866                     {
13867                         expandInline = false; // not worth the code expansion
13868                     }
13869                 }
13870
13871                 if (expandInline)
13872                 {
13873                     // we are doing normal unboxing
13874                     // inline the common case of the unbox helper
13875                     // UNBOX(exp) morphs into
13876                     // clone = pop(exp);
13877                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13878                     // push(clone + sizeof(void*))
13879                     //
13880                     GenTreePtr cloneOperand;
13881                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13882                                        nullptr DEBUGARG("inline UNBOX clone1"));
13883                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13884
13885                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13886
13887                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13888                                        nullptr DEBUGARG("inline UNBOX clone2"));
13889                     op2 = impTokenToHandle(&resolvedToken);
13890                     if (op2 == nullptr)
13891                     { // compDonotInline()
13892                         return;
13893                     }
13894                     args = gtNewArgList(op2, op1);
13895                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13896
13897                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13898                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13899                     condBox->gtFlags |= GTF_RELOP_QMARK;
13900
13901                     // QMARK nodes cannot reside on the evaluation stack. Because there
13902                     // may be other trees on the evaluation stack that side-effect the
13903                     // sources of the UNBOX operation we must spill the stack.
13904
13905                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13906
13907                     // Create the address-expression to reference past the object header
13908                     // to the beginning of the value-type. Today this means adjusting
13909                     // past the base of the objects vtable field which is pointer sized.
13910
13911                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13912                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13913                 }
13914                 else
13915                 {
13916                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13917
13918                     // Don't optimize, just call the helper and be done with it
13919                     args = gtNewArgList(op2, op1);
13920                     op1  = gtNewHelperCallNode(helper,
13921                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13922                                               callFlags, args);
13923                 }
13924
13925                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13926                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
13927                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13928                        );
13929
13930                 /*
13931                   ----------------------------------------------------------------------
13932                   | \ helper  |                         |                              |
13933                   |   \       |                         |                              |
13934                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
13935                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
13936                   | opcode  \ |                         |                              |
13937                   |---------------------------------------------------------------------
13938                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
13939                   |           |                         | push the BYREF to this local |
13940                   |---------------------------------------------------------------------
13941                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
13942                   |           | the BYREF               | For Linux when the           |
13943                   |           |                         |  struct is returned in two   |
13944                   |           |                         |  registers create a temp     |
13945                   |           |                         |  which address is passed to  |
13946                   |           |                         |  the unbox_nullable helper.  |
13947                   |---------------------------------------------------------------------
13948                 */
13949
13950                 if (opcode == CEE_UNBOX)
13951                 {
13952                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
13953                     {
13954                         // Unbox nullable helper returns a struct type.
13955                         // We need to spill it to a temp so than can take the address of it.
13956                         // Here we need unsafe value cls check, since the address of struct is taken to be used
13957                         // further along and potetially be exploitable.
13958
13959                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
13960                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
13961
13962                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13963                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
13964                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
13965
13966                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13967                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
13968                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
13969                     }
13970
13971                     assert(op1->gtType == TYP_BYREF);
13972                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
13973                 }
13974                 else
13975                 {
13976                     assert(opcode == CEE_UNBOX_ANY);
13977
13978                     if (helper == CORINFO_HELP_UNBOX)
13979                     {
13980                         // Normal unbox helper returns a TYP_BYREF.
13981                         impPushOnStack(op1, tiRetVal);
13982                         oper = GT_OBJ;
13983                         goto OBJ;
13984                     }
13985
13986                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
13987
13988 #if FEATURE_MULTIREG_RET
13989
13990                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
13991                     {
13992                         // Unbox nullable helper returns a TYP_STRUCT.
13993                         // For the multi-reg case we need to spill it to a temp so that
13994                         // we can pass the address to the unbox_nullable jit helper.
13995
13996                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
13997                         lvaTable[tmp].lvIsMultiRegArg = true;
13998                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
13999
14000                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14001                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14002                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14003
14004                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14005                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14006                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14007
14008                         // In this case the return value of the unbox helper is TYP_BYREF.
14009                         // Make sure the right type is placed on the operand type stack.
14010                         impPushOnStack(op1, tiRetVal);
14011
14012                         // Load the struct.
14013                         oper = GT_OBJ;
14014
14015                         assert(op1->gtType == TYP_BYREF);
14016                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14017
14018                         goto OBJ;
14019                     }
14020                     else
14021
14022 #endif // !FEATURE_MULTIREG_RET
14023
14024                     {
14025                         // If non register passable struct we have it materialized in the RetBuf.
14026                         assert(op1->gtType == TYP_STRUCT);
14027                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14028                         assert(tiRetVal.IsValueClass());
14029                     }
14030                 }
14031
14032                 impPushOnStack(op1, tiRetVal);
14033             }
14034             break;
14035
14036             case CEE_BOX:
14037             {
14038                 /* Get the Class index */
14039                 assertImp(sz == sizeof(unsigned));
14040
14041                 _impResolveToken(CORINFO_TOKENKIND_Box);
14042
14043                 JITDUMP(" %08X", resolvedToken.token);
14044
14045                 if (tiVerificationNeeded)
14046                 {
14047                     typeInfo tiActual = impStackTop().seTypeInfo;
14048                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14049
14050                     Verify(verIsBoxable(tiBox), "boxable type expected");
14051
14052                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14053                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14054                            "boxed type has unsatisfied class constraints");
14055
14056                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14057
14058                     // Observation: the following code introduces a boxed value class on the stack, but,
14059                     // according to the ECMA spec, one would simply expect: tiRetVal =
14060                     // typeInfo(TI_REF,impGetObjectClass());
14061
14062                     // Push the result back on the stack,
14063                     // even if clsHnd is a value class we want the TI_REF
14064                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14065                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14066                 }
14067
14068                 accessAllowedResult =
14069                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14070                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14071
14072                 // Note BOX can be used on things that are not value classes, in which
14073                 // case we get a NOP.  However the verifier's view of the type on the
14074                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14075                 if (!eeIsValueClass(resolvedToken.hClass))
14076                 {
14077                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14078                     break;
14079                 }
14080
14081                 // Look ahead for unbox.any
14082                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14083                 {
14084                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14085                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14086                     {
14087                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14088
14089                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14090
14091                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14092                         {
14093                             // Skip the next unbox.any instruction
14094                             sz += sizeof(mdToken) + 1;
14095                             break;
14096                         }
14097                     }
14098                 }
14099
14100                 impImportAndPushBox(&resolvedToken);
14101                 if (compDonotInline())
14102                 {
14103                     return;
14104                 }
14105             }
14106             break;
14107
14108             case CEE_SIZEOF:
14109
14110                 /* Get the Class index */
14111                 assertImp(sz == sizeof(unsigned));
14112
14113                 _impResolveToken(CORINFO_TOKENKIND_Class);
14114
14115                 JITDUMP(" %08X", resolvedToken.token);
14116
14117                 if (tiVerificationNeeded)
14118                 {
14119                     tiRetVal = typeInfo(TI_INT);
14120                 }
14121
14122                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14123                 impPushOnStack(op1, tiRetVal);
14124                 break;
14125
14126             case CEE_CASTCLASS:
14127
14128                 /* Get the Class index */
14129
14130                 assertImp(sz == sizeof(unsigned));
14131
14132                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14133
14134                 JITDUMP(" %08X", resolvedToken.token);
14135
14136                 if (!opts.IsReadyToRun())
14137                 {
14138                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14139                     if (op2 == nullptr)
14140                     { // compDonotInline()
14141                         return;
14142                     }
14143                 }
14144
14145                 if (tiVerificationNeeded)
14146                 {
14147                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14148                     // box it
14149                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14150                 }
14151
14152                 accessAllowedResult =
14153                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14154                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14155
14156                 op1 = impPopStack().val;
14157
14158             /* Pop the address and create the 'checked cast' helper call */
14159
14160             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14161             // and op2 to contain code that creates the type handle corresponding to typeRef
14162             CASTCLASS:
14163
14164 #ifdef FEATURE_READYTORUN_COMPILER
14165                 if (opts.IsReadyToRun())
14166                 {
14167                     GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14168                                                                     TYP_REF, gtNewArgList(op1));
14169                     usingReadyToRunHelper = (opLookup != nullptr);
14170                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14171
14172                     if (!usingReadyToRunHelper)
14173                     {
14174                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14175                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14176                         //      1) Load the context
14177                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14178                         //      3) Check the object on the stack for the type-cast
14179                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14180
14181                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14182                         if (op2 == nullptr)
14183                         { // compDonotInline()
14184                             return;
14185                         }
14186                     }
14187                 }
14188
14189                 if (!usingReadyToRunHelper)
14190 #endif
14191                 {
14192                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14193                 }
14194                 if (compDonotInline())
14195                 {
14196                     return;
14197                 }
14198
14199                 /* Push the result back on the stack */
14200                 impPushOnStack(op1, tiRetVal);
14201                 break;
14202
14203             case CEE_THROW:
14204
14205                 if (compIsForInlining())
14206                 {
14207                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14208                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14209                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14210
14211                     /* Do we have just the exception on the stack ?*/
14212
14213                     if (verCurrentState.esStackDepth != 1)
14214                     {
14215                         /* if not, just don't inline the method */
14216
14217                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14218                         return;
14219                     }
14220                 }
14221
14222                 if (tiVerificationNeeded)
14223                 {
14224                     tiRetVal = impStackTop().seTypeInfo;
14225                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14226                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14227                     {
14228                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14229                     }
14230                 }
14231
14232                 block->bbSetRunRarely(); // any block with a throw is rare
14233                 /* Pop the exception object and create the 'throw' helper call */
14234
14235                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14236
14237             EVAL_APPEND:
14238                 if (verCurrentState.esStackDepth > 0)
14239                 {
14240                     impEvalSideEffects();
14241                 }
14242
14243                 assert(verCurrentState.esStackDepth == 0);
14244
14245                 goto APPEND;
14246
14247             case CEE_RETHROW:
14248
14249                 assert(!compIsForInlining());
14250
14251                 if (info.compXcptnsCount == 0)
14252                 {
14253                     BADCODE("rethrow outside catch");
14254                 }
14255
14256                 if (tiVerificationNeeded)
14257                 {
14258                     Verify(block->hasHndIndex(), "rethrow outside catch");
14259                     if (block->hasHndIndex())
14260                     {
14261                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14262                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14263                         if (HBtab->HasFilter())
14264                         {
14265                             // we better be in the handler clause part, not the filter part
14266                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14267                                    "rethrow in filter");
14268                         }
14269                     }
14270                 }
14271
14272                 /* Create the 'rethrow' helper call */
14273
14274                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14275
14276                 goto EVAL_APPEND;
14277
14278             case CEE_INITOBJ:
14279
14280                 assertImp(sz == sizeof(unsigned));
14281
14282                 _impResolveToken(CORINFO_TOKENKIND_Class);
14283
14284                 JITDUMP(" %08X", resolvedToken.token);
14285
14286                 if (tiVerificationNeeded)
14287                 {
14288                     typeInfo tiTo    = impStackTop().seTypeInfo;
14289                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14290
14291                     Verify(tiTo.IsByRef(), "byref expected");
14292                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14293
14294                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14295                            "type operand incompatible with type of address");
14296                 }
14297
14298                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14299                 op2  = gtNewIconNode(0);                                     // Value
14300                 op1  = impPopStack().val;                                    // Dest
14301                 op1  = gtNewBlockVal(op1, size);
14302                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14303                 goto SPILL_APPEND;
14304
14305             case CEE_INITBLK:
14306
14307                 if (tiVerificationNeeded)
14308                 {
14309                     Verify(false, "bad opcode");
14310                 }
14311
14312                 op3 = impPopStack().val; // Size
14313                 op2 = impPopStack().val; // Value
14314                 op1 = impPopStack().val; // Dest
14315
14316                 if (op3->IsCnsIntOrI())
14317                 {
14318                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14319                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14320                 }
14321                 else
14322                 {
14323                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14324                     size = 0;
14325                 }
14326                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14327
14328                 goto SPILL_APPEND;
14329
14330             case CEE_CPBLK:
14331
14332                 if (tiVerificationNeeded)
14333                 {
14334                     Verify(false, "bad opcode");
14335                 }
14336                 op3 = impPopStack().val; // Size
14337                 op2 = impPopStack().val; // Src
14338                 op1 = impPopStack().val; // Dest
14339
14340                 if (op3->IsCnsIntOrI())
14341                 {
14342                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14343                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14344                 }
14345                 else
14346                 {
14347                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14348                     size = 0;
14349                 }
14350                 if (op2->OperGet() == GT_ADDR)
14351                 {
14352                     op2 = op2->gtOp.gtOp1;
14353                 }
14354                 else
14355                 {
14356                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14357                 }
14358
14359                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14360                 goto SPILL_APPEND;
14361
14362             case CEE_CPOBJ:
14363
14364                 assertImp(sz == sizeof(unsigned));
14365
14366                 _impResolveToken(CORINFO_TOKENKIND_Class);
14367
14368                 JITDUMP(" %08X", resolvedToken.token);
14369
14370                 if (tiVerificationNeeded)
14371                 {
14372                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14373                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14374                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14375
14376                     Verify(tiFrom.IsByRef(), "expected byref source");
14377                     Verify(tiTo.IsByRef(), "expected byref destination");
14378
14379                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14380                            "type of source address incompatible with type operand");
14381                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14382                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14383                            "type operand incompatible with type of destination address");
14384                 }
14385
14386                 if (!eeIsValueClass(resolvedToken.hClass))
14387                 {
14388                     op1 = impPopStack().val; // address to load from
14389
14390                     impBashVarAddrsToI(op1);
14391
14392                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14393
14394                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14395                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14396
14397                     impPushOnStackNoType(op1);
14398                     opcode = CEE_STIND_REF;
14399                     lclTyp = TYP_REF;
14400                     goto STIND_POST_VERIFY;
14401                 }
14402
14403                 op2 = impPopStack().val; // Src
14404                 op1 = impPopStack().val; // Dest
14405                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14406                 goto SPILL_APPEND;
14407
14408             case CEE_STOBJ:
14409             {
14410                 assertImp(sz == sizeof(unsigned));
14411
14412                 _impResolveToken(CORINFO_TOKENKIND_Class);
14413
14414                 JITDUMP(" %08X", resolvedToken.token);
14415
14416                 if (eeIsValueClass(resolvedToken.hClass))
14417                 {
14418                     lclTyp = TYP_STRUCT;
14419                 }
14420                 else
14421                 {
14422                     lclTyp = TYP_REF;
14423                 }
14424
14425                 if (tiVerificationNeeded)
14426                 {
14427
14428                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14429
14430                     // Make sure we have a good looking byref
14431                     Verify(tiPtr.IsByRef(), "pointer not byref");
14432                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14433                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14434                     {
14435                         compUnsafeCastUsed = true;
14436                     }
14437
14438                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14439                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14440
14441                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14442                     {
14443                         Verify(false, "type of value incompatible with type operand");
14444                         compUnsafeCastUsed = true;
14445                     }
14446
14447                     if (!tiCompatibleWith(argVal, ptrVal, false))
14448                     {
14449                         Verify(false, "type operand incompatible with type of address");
14450                         compUnsafeCastUsed = true;
14451                     }
14452                 }
14453                 else
14454                 {
14455                     compUnsafeCastUsed = true;
14456                 }
14457
14458                 if (lclTyp == TYP_REF)
14459                 {
14460                     opcode = CEE_STIND_REF;
14461                     goto STIND_POST_VERIFY;
14462                 }
14463
14464                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14465                 if (impIsPrimitive(jitTyp))
14466                 {
14467                     lclTyp = JITtype2varType(jitTyp);
14468                     goto STIND_POST_VERIFY;
14469                 }
14470
14471                 op2 = impPopStack().val; // Value
14472                 op1 = impPopStack().val; // Ptr
14473
14474                 assertImp(varTypeIsStruct(op2));
14475
14476                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14477                 goto SPILL_APPEND;
14478             }
14479
14480             case CEE_MKREFANY:
14481
14482                 assert(!compIsForInlining());
14483
14484                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14485                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14486
14487                 JITDUMP("disabling struct promotion because of mkrefany\n");
14488                 fgNoStructPromotion = true;
14489
14490                 oper = GT_MKREFANY;
14491                 assertImp(sz == sizeof(unsigned));
14492
14493                 _impResolveToken(CORINFO_TOKENKIND_Class);
14494
14495                 JITDUMP(" %08X", resolvedToken.token);
14496
14497                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14498                 if (op2 == nullptr)
14499                 { // compDonotInline()
14500                     return;
14501                 }
14502
14503                 if (tiVerificationNeeded)
14504                 {
14505                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14506                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14507
14508                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14509                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14510                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14511                 }
14512
14513                 accessAllowedResult =
14514                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14515                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14516
14517                 op1 = impPopStack().val;
14518
14519                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14520                 // But JIT32 allowed it, so we continue to allow it.
14521                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14522
14523                 // MKREFANY returns a struct.  op2 is the class token.
14524                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14525
14526                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14527                 break;
14528
14529             case CEE_LDOBJ:
14530             {
14531                 oper = GT_OBJ;
14532                 assertImp(sz == sizeof(unsigned));
14533
14534                 _impResolveToken(CORINFO_TOKENKIND_Class);
14535
14536                 JITDUMP(" %08X", resolvedToken.token);
14537
14538             OBJ:
14539
14540                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14541
14542                 if (tiVerificationNeeded)
14543                 {
14544                     typeInfo tiPtr = impStackTop().seTypeInfo;
14545
14546                     // Make sure we have a byref
14547                     if (!tiPtr.IsByRef())
14548                     {
14549                         Verify(false, "pointer not byref");
14550                         compUnsafeCastUsed = true;
14551                     }
14552                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14553
14554                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14555                     {
14556                         Verify(false, "type of address incompatible with type operand");
14557                         compUnsafeCastUsed = true;
14558                     }
14559                     tiRetVal.NormaliseForStack();
14560                 }
14561                 else
14562                 {
14563                     compUnsafeCastUsed = true;
14564                 }
14565
14566                 if (eeIsValueClass(resolvedToken.hClass))
14567                 {
14568                     lclTyp = TYP_STRUCT;
14569                 }
14570                 else
14571                 {
14572                     lclTyp = TYP_REF;
14573                     opcode = CEE_LDIND_REF;
14574                     goto LDIND_POST_VERIFY;
14575                 }
14576
14577                 op1 = impPopStack().val;
14578
14579                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14580
14581                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14582                 if (impIsPrimitive(jitTyp))
14583                 {
14584                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14585
14586                     // Could point anywhere, example a boxed class static int
14587                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14588                     assertImp(varTypeIsArithmetic(op1->gtType));
14589                 }
14590                 else
14591                 {
14592                     // OBJ returns a struct
14593                     // and an inline argument which is the class token of the loaded obj
14594                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14595                 }
14596                 op1->gtFlags |= GTF_EXCEPT;
14597
14598                 impPushOnStack(op1, tiRetVal);
14599                 break;
14600             }
14601
14602             case CEE_LDLEN:
14603                 if (tiVerificationNeeded)
14604                 {
14605                     typeInfo tiArray = impStackTop().seTypeInfo;
14606                     Verify(verIsSDArray(tiArray), "bad array");
14607                     tiRetVal = typeInfo(TI_INT);
14608                 }
14609
14610                 op1 = impPopStack().val;
14611                 if (!opts.MinOpts() && !opts.compDbgCode)
14612                 {
14613                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14614                     GenTreeArrLen* arrLen =
14615                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14616
14617                     /* Mark the block as containing a length expression */
14618
14619                     if (op1->gtOper == GT_LCL_VAR)
14620                     {
14621                         block->bbFlags |= BBF_HAS_IDX_LEN;
14622                     }
14623
14624                     op1 = arrLen;
14625                 }
14626                 else
14627                 {
14628                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14629                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14630                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14631                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14632                     op1->gtFlags |= GTF_IND_ARR_LEN;
14633                 }
14634
14635                 /* An indirection will cause a GPF if the address is null */
14636                 op1->gtFlags |= GTF_EXCEPT;
14637
14638                 /* Push the result back on the stack */
14639                 impPushOnStack(op1, tiRetVal);
14640                 break;
14641
14642             case CEE_BREAK:
14643                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14644                 goto SPILL_APPEND;
14645
14646             case CEE_NOP:
14647                 if (opts.compDbgCode)
14648                 {
14649                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14650                     goto SPILL_APPEND;
14651                 }
14652                 break;
14653
14654             /******************************** NYI *******************************/
14655
14656             case 0xCC:
14657                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14658
14659             case CEE_ILLEGAL:
14660             case CEE_MACRO_END:
14661
14662             default:
14663                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14664         }
14665
14666         codeAddr += sz;
14667         prevOpcode = opcode;
14668
14669         prefixFlags = 0;
14670         assert(!insertLdloc || opcode == CEE_DUP);
14671     }
14672
14673     assert(!insertLdloc);
14674
14675     return;
14676 #undef _impResolveToken
14677 }
14678 #ifdef _PREFAST_
14679 #pragma warning(pop)
14680 #endif
14681
14682 // Push a local/argument treeon the operand stack
14683 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14684 {
14685     tiRetVal.NormaliseForStack();
14686
14687     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14688     {
14689         tiRetVal.SetUninitialisedObjRef();
14690     }
14691
14692     impPushOnStack(op, tiRetVal);
14693 }
14694
14695 // Load a local/argument on the operand stack
14696 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14697 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14698 {
14699     var_types lclTyp;
14700
14701     if (lvaTable[lclNum].lvNormalizeOnLoad())
14702     {
14703         lclTyp = lvaGetRealType(lclNum);
14704     }
14705     else
14706     {
14707         lclTyp = lvaGetActualType(lclNum);
14708     }
14709
14710     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14711 }
14712
14713 // Load an argument on the operand stack
14714 // Shared by the various CEE_LDARG opcodes
14715 // ilArgNum is the argument index as specified in IL.
14716 // It will be mapped to the correct lvaTable index
14717 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14718 {
14719     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14720
14721     if (compIsForInlining())
14722     {
14723         if (ilArgNum >= info.compArgsCount)
14724         {
14725             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14726             return;
14727         }
14728
14729         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14730                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14731     }
14732     else
14733     {
14734         if (ilArgNum >= info.compArgsCount)
14735         {
14736             BADCODE("Bad IL");
14737         }
14738
14739         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14740
14741         if (lclNum == info.compThisArg)
14742         {
14743             lclNum = lvaArg0Var;
14744         }
14745
14746         impLoadVar(lclNum, offset);
14747     }
14748 }
14749
14750 // Load a local on the operand stack
14751 // Shared by the various CEE_LDLOC opcodes
14752 // ilLclNum is the local index as specified in IL.
14753 // It will be mapped to the correct lvaTable index
14754 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14755 {
14756     if (tiVerificationNeeded)
14757     {
14758         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14759         Verify(info.compInitMem, "initLocals not set");
14760     }
14761
14762     if (compIsForInlining())
14763     {
14764         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14765         {
14766             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14767             return;
14768         }
14769
14770         // Get the local type
14771         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14772
14773         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14774
14775         /* Have we allocated a temp for this local? */
14776
14777         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14778
14779         // All vars of inlined methods should be !lvNormalizeOnLoad()
14780
14781         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14782         lclTyp = genActualType(lclTyp);
14783
14784         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14785     }
14786     else
14787     {
14788         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14789         {
14790             BADCODE("Bad IL");
14791         }
14792
14793         unsigned lclNum = info.compArgsCount + ilLclNum;
14794
14795         impLoadVar(lclNum, offset);
14796     }
14797 }
14798
14799 #ifdef _TARGET_ARM_
14800 /**************************************************************************************
14801  *
14802  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14803  *  dst struct, because struct promotion will turn it into a float/double variable while
14804  *  the rhs will be an int/long variable. We don't code generate assignment of int into
14805  *  a float, but there is nothing that might prevent us from doing so. The tree however
14806  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14807  *
14808  *  tmpNum - the lcl dst variable num that is a struct.
14809  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
14810  *  hClass - the type handle for the struct variable.
14811  *
14812  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14813  *        however, we could do a codegen of transferring from int to float registers
14814  *        (transfer, not a cast.)
14815  *
14816  */
14817 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14818 {
14819     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14820     {
14821         int       hfaSlots = GetHfaCount(hClass);
14822         var_types hfaType  = GetHfaType(hClass);
14823
14824         // If we have varargs we morph the method's return type to be "int" irrespective of its original
14825         // type: struct/float at importer because the ABI calls out return in integer registers.
14826         // We don't want struct promotion to replace an expression like this:
14827         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
14828         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14829         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14830             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14831         {
14832             // Make sure this struct type stays as struct so we can receive the call in a struct.
14833             lvaTable[tmpNum].lvIsMultiRegRet = true;
14834         }
14835     }
14836 }
14837 #endif // _TARGET_ARM_
14838
14839 #if FEATURE_MULTIREG_RET
14840 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14841 {
14842     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14843     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14844     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14845
14846     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14847     ret->gtFlags |= GTF_DONT_CSE;
14848
14849     assert(IsMultiRegReturnedType(hClass));
14850
14851     // Mark the var so that fields are not promoted and stay together.
14852     lvaTable[tmpNum].lvIsMultiRegRet = true;
14853
14854     return ret;
14855 }
14856 #endif // FEATURE_MULTIREG_RET
14857
14858 // do import for a return
14859 // returns false if inlining was aborted
14860 // opcode can be ret or call in the case of a tail.call
14861 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14862 {
14863     if (tiVerificationNeeded)
14864     {
14865         verVerifyThisPtrInitialised();
14866
14867         unsigned expectedStack = 0;
14868         if (info.compRetType != TYP_VOID)
14869         {
14870             typeInfo tiVal = impStackTop().seTypeInfo;
14871             typeInfo tiDeclared =
14872                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14873
14874             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14875
14876             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14877             expectedStack = 1;
14878         }
14879         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14880     }
14881
14882     GenTree*             op2       = nullptr;
14883     GenTree*             op1       = nullptr;
14884     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14885
14886     if (info.compRetType != TYP_VOID)
14887     {
14888         StackEntry se = impPopStack(retClsHnd);
14889         op2           = se.val;
14890
14891         if (!compIsForInlining())
14892         {
14893             impBashVarAddrsToI(op2);
14894             op2 = impImplicitIorI4Cast(op2, info.compRetType);
14895             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14896             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14897                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14898                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14899                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14900                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14901
14902 #ifdef DEBUG
14903             if (opts.compGcChecks && info.compRetType == TYP_REF)
14904             {
14905                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
14906                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14907                 // one-return BB.
14908
14909                 assert(op2->gtType == TYP_REF);
14910
14911                 // confirm that the argument is a GC pointer (for debugging (GC stress))
14912                 GenTreeArgList* args = gtNewArgList(op2);
14913                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14914
14915                 if (verbose)
14916                 {
14917                     printf("\ncompGcChecks tree:\n");
14918                     gtDispTree(op2);
14919                 }
14920             }
14921 #endif
14922         }
14923         else
14924         {
14925             // inlinee's stack should be empty now.
14926             assert(verCurrentState.esStackDepth == 0);
14927
14928 #ifdef DEBUG
14929             if (verbose)
14930             {
14931                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
14932                 gtDispTree(op2);
14933             }
14934 #endif
14935
14936             // Make sure the type matches the original call.
14937
14938             var_types returnType       = genActualType(op2->gtType);
14939             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
14940             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
14941             {
14942                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
14943             }
14944
14945             if (returnType != originalCallType)
14946             {
14947                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
14948                 return false;
14949             }
14950
14951             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
14952             // expression. At this point, retExpr could already be set if there are multiple
14953             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
14954             // the other blocks already set it. If there is only a single return block,
14955             // retExpr shouldn't be set. However, this is not true if we reimport a block
14956             // with a return. In that case, retExpr will be set, then the block will be
14957             // reimported, but retExpr won't get cleared as part of setting the block to
14958             // be reimported. The reimported retExpr value should be the same, so even if
14959             // we don't unconditionally overwrite it, it shouldn't matter.
14960             if (info.compRetNativeType != TYP_STRUCT)
14961             {
14962                 // compRetNativeType is not TYP_STRUCT.
14963                 // This implies it could be either a scalar type or SIMD vector type or
14964                 // a struct type that can be normalized to a scalar type.
14965
14966                 if (varTypeIsStruct(info.compRetType))
14967                 {
14968                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
14969                     // adjust the type away from struct to integral
14970                     // and no normalizing
14971                     op2 = impFixupStructReturnType(op2, retClsHnd);
14972                 }
14973                 else
14974                 {
14975                     // Do we have to normalize?
14976                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
14977                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
14978                         fgCastNeeded(op2, fncRealRetType))
14979                     {
14980                         // Small-typed return values are normalized by the callee
14981                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
14982                     }
14983                 }
14984
14985                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
14986                 {
14987                     assert(info.compRetNativeType != TYP_VOID &&
14988                            (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
14989
14990                     // This is a bit of a workaround...
14991                     // If we are inlining a call that returns a struct, where the actual "native" return type is
14992                     // not a struct (for example, the struct is composed of exactly one int, and the native
14993                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
14994                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
14995                     // to the *native* return type), and at least one of the return blocks is the result of
14996                     // a call, then we have a problem. The situation is like this (from a failed test case):
14997                     //
14998                     // inliner:
14999                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15000                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15001                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15002                     //
15003                     // inlinee:
15004                     //      ...
15005                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15006                     //      ret
15007                     //      ...
15008                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15009                     //      object&, class System.Func`1<!!0>)
15010                     //      ret
15011                     //
15012                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15013                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15014                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15015                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15016                     //
15017                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15018                     // native return type, which is what it will be set to eventually. We generate the
15019                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15020                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15021
15022                     bool restoreType = false;
15023                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15024                     {
15025                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15026                         op2->gtType = info.compRetNativeType;
15027                         restoreType = true;
15028                     }
15029
15030                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15031                                      (unsigned)CHECK_SPILL_ALL);
15032
15033                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15034
15035                     if (restoreType)
15036                     {
15037                         op2->gtType = TYP_STRUCT; // restore it to what it was
15038                     }
15039
15040                     op2 = tmpOp2;
15041
15042 #ifdef DEBUG
15043                     if (impInlineInfo->retExpr)
15044                     {
15045                         // Some other block(s) have seen the CEE_RET first.
15046                         // Better they spilled to the same temp.
15047                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15048                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15049                     }
15050 #endif
15051                 }
15052
15053 #ifdef DEBUG
15054                 if (verbose)
15055                 {
15056                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15057                     gtDispTree(op2);
15058                 }
15059 #endif
15060
15061                 // Report the return expression
15062                 impInlineInfo->retExpr = op2;
15063             }
15064             else
15065             {
15066                 // compRetNativeType is TYP_STRUCT.
15067                 // This implies that struct return via RetBuf arg or multi-reg struct return
15068
15069                 GenTreePtr iciCall = impInlineInfo->iciCall;
15070                 assert(iciCall->gtOper == GT_CALL);
15071
15072                 // Assign the inlinee return into a spill temp.
15073                 // spill temp only exists if there are multiple return points
15074                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15075                 {
15076                     // in this case we have to insert multiple struct copies to the temp
15077                     // and the retexpr is just the temp.
15078                     assert(info.compRetNativeType != TYP_VOID);
15079                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15080
15081                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15082                                      (unsigned)CHECK_SPILL_ALL);
15083                 }
15084
15085 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15086 #if defined(_TARGET_ARM_)
15087                 // TODO-ARM64-NYI: HFA
15088                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15089                 // next ifdefs could be refactored in a single method with the ifdef inside.
15090                 if (IsHfa(retClsHnd))
15091                 {
15092 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15093 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15094                 ReturnTypeDesc retTypeDesc;
15095                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15096                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15097
15098                 if (retRegCount != 0)
15099                 {
15100                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15101                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15102                     // max allowed.)
15103                     assert(retRegCount == MAX_RET_REG_COUNT);
15104                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15105                     CLANG_FORMAT_COMMENT_ANCHOR;
15106 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15107
15108                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15109                     {
15110                         if (!impInlineInfo->retExpr)
15111                         {
15112 #if defined(_TARGET_ARM_)
15113                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15114 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15115                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15116                             impInlineInfo->retExpr =
15117                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15118 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15119                         }
15120                     }
15121                     else
15122                     {
15123                         impInlineInfo->retExpr = op2;
15124                     }
15125                 }
15126                 else
15127 #elif defined(_TARGET_ARM64_)
15128                 ReturnTypeDesc retTypeDesc;
15129                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15130                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15131
15132                 if (retRegCount != 0)
15133                 {
15134                     assert(!iciCall->AsCall()->HasRetBufArg());
15135                     assert(retRegCount >= 2);
15136                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15137                     {
15138                         if (!impInlineInfo->retExpr)
15139                         {
15140                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15141                             impInlineInfo->retExpr =
15142                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15143                         }
15144                     }
15145                     else
15146                     {
15147                         impInlineInfo->retExpr = op2;
15148                     }
15149                 }
15150                 else
15151 #endif // defined(_TARGET_ARM64_)
15152                 {
15153                     assert(iciCall->AsCall()->HasRetBufArg());
15154                     GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15155                     // spill temp only exists if there are multiple return points
15156                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15157                     {
15158                         // if this is the first return we have seen set the retExpr
15159                         if (!impInlineInfo->retExpr)
15160                         {
15161                             impInlineInfo->retExpr =
15162                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15163                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15164                         }
15165                     }
15166                     else
15167                     {
15168                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15169                     }
15170                 }
15171             }
15172         }
15173     }
15174
15175     if (compIsForInlining())
15176     {
15177         return true;
15178     }
15179
15180     if (info.compRetType == TYP_VOID)
15181     {
15182         // return void
15183         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15184     }
15185     else if (info.compRetBuffArg != BAD_VAR_NUM)
15186     {
15187         // Assign value to return buff (first param)
15188         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15189
15190         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15191         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15192
15193         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15194         CLANG_FORMAT_COMMENT_ANCHOR;
15195
15196 #if defined(_TARGET_AMD64_)
15197
15198         // x64 (System V and Win64) calling convention requires to
15199         // return the implicit return buffer explicitly (in RAX).
15200         // Change the return type to be BYREF.
15201         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15202 #else  // !defined(_TARGET_AMD64_)
15203         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15204         // In such case the return value of the function is changed to BYREF.
15205         // If profiler hook is not needed the return type of the function is TYP_VOID.
15206         if (compIsProfilerHookNeeded())
15207         {
15208             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15209         }
15210         else
15211         {
15212             // return void
15213             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15214         }
15215 #endif // !defined(_TARGET_AMD64_)
15216     }
15217     else if (varTypeIsStruct(info.compRetType))
15218     {
15219 #if !FEATURE_MULTIREG_RET
15220         // For both ARM architectures the HFA native types are maintained as structs.
15221         // Also on System V AMD64 the multireg structs returns are also left as structs.
15222         noway_assert(info.compRetNativeType != TYP_STRUCT);
15223 #endif
15224         op2 = impFixupStructReturnType(op2, retClsHnd);
15225         // return op2
15226         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15227     }
15228     else
15229     {
15230         // return op2
15231         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15232     }
15233
15234     // We must have imported a tailcall and jumped to RET
15235     if (prefixFlags & PREFIX_TAILCALL)
15236     {
15237 #ifndef _TARGET_AMD64_
15238         // Jit64 compat:
15239         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15240         //      tail.call
15241         //      pop
15242         //      ret
15243         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15244 #endif
15245
15246         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15247
15248         // impImportCall() would have already appended TYP_VOID calls
15249         if (info.compRetType == TYP_VOID)
15250         {
15251             return true;
15252         }
15253     }
15254
15255     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15256 #ifdef DEBUG
15257     // Remember at which BC offset the tree was finished
15258     impNoteLastILoffs();
15259 #endif
15260     return true;
15261 }
15262
15263 /*****************************************************************************
15264  *  Mark the block as unimported.
15265  *  Note that the caller is responsible for calling impImportBlockPending(),
15266  *  with the appropriate stack-state
15267  */
15268
15269 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15270 {
15271 #ifdef DEBUG
15272     if (verbose && (block->bbFlags & BBF_IMPORTED))
15273     {
15274         printf("\nBB%02u will be reimported\n", block->bbNum);
15275     }
15276 #endif
15277
15278     block->bbFlags &= ~BBF_IMPORTED;
15279 }
15280
15281 /*****************************************************************************
15282  *  Mark the successors of the given block as unimported.
15283  *  Note that the caller is responsible for calling impImportBlockPending()
15284  *  for all the successors, with the appropriate stack-state.
15285  */
15286
15287 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15288 {
15289     for (unsigned i = 0; i < block->NumSucc(); i++)
15290     {
15291         impReimportMarkBlock(block->GetSucc(i));
15292     }
15293 }
15294
15295 /*****************************************************************************
15296  *
15297  *  Filter wrapper to handle only passed in exception code
15298  *  from it).
15299  */
15300
15301 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15302 {
15303     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15304     {
15305         return EXCEPTION_EXECUTE_HANDLER;
15306     }
15307
15308     return EXCEPTION_CONTINUE_SEARCH;
15309 }
15310
15311 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15312 {
15313     assert(block->hasTryIndex());
15314     assert(!compIsForInlining());
15315
15316     unsigned  tryIndex = block->getTryIndex();
15317     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15318
15319     if (isTryStart)
15320     {
15321         assert(block->bbFlags & BBF_TRY_BEG);
15322
15323         // The Stack must be empty
15324         //
15325         if (block->bbStkDepth != 0)
15326         {
15327             BADCODE("Evaluation stack must be empty on entry into a try block");
15328         }
15329     }
15330
15331     // Save the stack contents, we'll need to restore it later
15332     //
15333     SavedStack blockState;
15334     impSaveStackState(&blockState, false);
15335
15336     while (HBtab != nullptr)
15337     {
15338         if (isTryStart)
15339         {
15340             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15341             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15342             //
15343             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15344             {
15345                 // We  trigger an invalid program exception here unless we have a try/fault region.
15346                 //
15347                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15348                 {
15349                     BADCODE(
15350                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15351                 }
15352                 else
15353                 {
15354                     // Allow a try/fault region to proceed.
15355                     assert(HBtab->HasFaultHandler());
15356                 }
15357             }
15358
15359             /* Recursively process the handler block */
15360             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15361
15362             //  Construct the proper verification stack state
15363             //   either empty or one that contains just
15364             //   the Exception Object that we are dealing with
15365             //
15366             verCurrentState.esStackDepth = 0;
15367
15368             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15369             {
15370                 CORINFO_CLASS_HANDLE clsHnd;
15371
15372                 if (HBtab->HasFilter())
15373                 {
15374                     clsHnd = impGetObjectClass();
15375                 }
15376                 else
15377                 {
15378                     CORINFO_RESOLVED_TOKEN resolvedToken;
15379
15380                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15381                     resolvedToken.tokenScope   = info.compScopeHnd;
15382                     resolvedToken.token        = HBtab->ebdTyp;
15383                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15384                     info.compCompHnd->resolveToken(&resolvedToken);
15385
15386                     clsHnd = resolvedToken.hClass;
15387                 }
15388
15389                 // push catch arg the stack, spill to a temp if necessary
15390                 // Note: can update HBtab->ebdHndBeg!
15391                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15392             }
15393
15394             // Queue up the handler for importing
15395             //
15396             impImportBlockPending(hndBegBB);
15397
15398             if (HBtab->HasFilter())
15399             {
15400                 /* @VERIFICATION : Ideally the end of filter state should get
15401                    propagated to the catch handler, this is an incompleteness,
15402                    but is not a security/compliance issue, since the only
15403                    interesting state is the 'thisInit' state.
15404                    */
15405
15406                 verCurrentState.esStackDepth = 0;
15407
15408                 BasicBlock* filterBB = HBtab->ebdFilter;
15409
15410                 // push catch arg the stack, spill to a temp if necessary
15411                 // Note: can update HBtab->ebdFilter!
15412                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15413
15414                 impImportBlockPending(filterBB);
15415             }
15416         }
15417         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15418         {
15419             /* Recursively process the handler block */
15420
15421             verCurrentState.esStackDepth = 0;
15422
15423             // Queue up the fault handler for importing
15424             //
15425             impImportBlockPending(HBtab->ebdHndBeg);
15426         }
15427
15428         // Now process our enclosing try index (if any)
15429         //
15430         tryIndex = HBtab->ebdEnclosingTryIndex;
15431         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15432         {
15433             HBtab = nullptr;
15434         }
15435         else
15436         {
15437             HBtab = ehGetDsc(tryIndex);
15438         }
15439     }
15440
15441     // Restore the stack contents
15442     impRestoreStackState(&blockState);
15443 }
15444
15445 //***************************************************************
15446 // Import the instructions for the given basic block.  Perform
15447 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15448 // time, or whose verification pre-state is changed.
15449
15450 #ifdef _PREFAST_
15451 #pragma warning(push)
15452 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15453 #endif
15454 void Compiler::impImportBlock(BasicBlock* block)
15455 {
15456     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15457     // handle them specially. In particular, there is no IL to import for them, but we do need
15458     // to mark them as imported and put their successors on the pending import list.
15459     if (block->bbFlags & BBF_INTERNAL)
15460     {
15461         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15462         block->bbFlags |= BBF_IMPORTED;
15463
15464         for (unsigned i = 0; i < block->NumSucc(); i++)
15465         {
15466             impImportBlockPending(block->GetSucc(i));
15467         }
15468
15469         return;
15470     }
15471
15472     bool markImport;
15473
15474     assert(block);
15475
15476     /* Make the block globaly available */
15477
15478     compCurBB = block;
15479
15480 #ifdef DEBUG
15481     /* Initialize the debug variables */
15482     impCurOpcName = "unknown";
15483     impCurOpcOffs = block->bbCodeOffs;
15484 #endif
15485
15486     /* Set the current stack state to the merged result */
15487     verResetCurrentState(block, &verCurrentState);
15488
15489     /* Now walk the code and import the IL into GenTrees */
15490
15491     struct FilterVerificationExceptionsParam
15492     {
15493         Compiler*   pThis;
15494         BasicBlock* block;
15495     };
15496     FilterVerificationExceptionsParam param;
15497
15498     param.pThis = this;
15499     param.block = block;
15500
15501     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15502     {
15503         /* @VERIFICATION : For now, the only state propagation from try
15504            to it's handler is "thisInit" state (stack is empty at start of try).
15505            In general, for state that we track in verification, we need to
15506            model the possibility that an exception might happen at any IL
15507            instruction, so we really need to merge all states that obtain
15508            between IL instructions in a try block into the start states of
15509            all handlers.
15510
15511            However we do not allow the 'this' pointer to be uninitialized when
15512            entering most kinds try regions (only try/fault are allowed to have
15513            an uninitialized this pointer on entry to the try)
15514
15515            Fortunately, the stack is thrown away when an exception
15516            leads to a handler, so we don't have to worry about that.
15517            We DO, however, have to worry about the "thisInit" state.
15518            But only for the try/fault case.
15519
15520            The only allowed transition is from TIS_Uninit to TIS_Init.
15521
15522            So for a try/fault region for the fault handler block
15523            we will merge the start state of the try begin
15524            and the post-state of each block that is part of this try region
15525         */
15526
15527         // merge the start state of the try begin
15528         //
15529         if (pParam->block->bbFlags & BBF_TRY_BEG)
15530         {
15531             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15532         }
15533
15534         pParam->pThis->impImportBlockCode(pParam->block);
15535
15536         // As discussed above:
15537         // merge the post-state of each block that is part of this try region
15538         //
15539         if (pParam->block->hasTryIndex())
15540         {
15541             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15542         }
15543     }
15544     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15545     {
15546         verHandleVerificationFailure(block DEBUGARG(false));
15547     }
15548     PAL_ENDTRY
15549
15550     if (compDonotInline())
15551     {
15552         return;
15553     }
15554
15555     assert(!compDonotInline());
15556
15557     markImport = false;
15558
15559 SPILLSTACK:
15560
15561     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15562     bool        reimportSpillClique = false;
15563     BasicBlock* tgtBlock            = nullptr;
15564
15565     /* If the stack is non-empty, we might have to spill its contents */
15566
15567     if (verCurrentState.esStackDepth != 0)
15568     {
15569         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15570                                   // on the stack, its lifetime is hard to determine, simply
15571                                   // don't reuse such temps.
15572
15573         GenTreePtr addStmt = nullptr;
15574
15575         /* Do the successors of 'block' have any other predecessors ?
15576            We do not want to do some of the optimizations related to multiRef
15577            if we can reimport blocks */
15578
15579         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15580
15581         switch (block->bbJumpKind)
15582         {
15583             case BBJ_COND:
15584
15585                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15586
15587                 assert(impTreeLast);
15588                 assert(impTreeLast->gtOper == GT_STMT);
15589                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15590
15591                 addStmt     = impTreeLast;
15592                 impTreeLast = impTreeLast->gtPrev;
15593
15594                 /* Note if the next block has more than one ancestor */
15595
15596                 multRef |= block->bbNext->bbRefs;
15597
15598                 /* Does the next block have temps assigned? */
15599
15600                 baseTmp  = block->bbNext->bbStkTempsIn;
15601                 tgtBlock = block->bbNext;
15602
15603                 if (baseTmp != NO_BASE_TMP)
15604                 {
15605                     break;
15606                 }
15607
15608                 /* Try the target of the jump then */
15609
15610                 multRef |= block->bbJumpDest->bbRefs;
15611                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15612                 tgtBlock = block->bbJumpDest;
15613                 break;
15614
15615             case BBJ_ALWAYS:
15616                 multRef |= block->bbJumpDest->bbRefs;
15617                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15618                 tgtBlock = block->bbJumpDest;
15619                 break;
15620
15621             case BBJ_NONE:
15622                 multRef |= block->bbNext->bbRefs;
15623                 baseTmp  = block->bbNext->bbStkTempsIn;
15624                 tgtBlock = block->bbNext;
15625                 break;
15626
15627             case BBJ_SWITCH:
15628
15629                 BasicBlock** jmpTab;
15630                 unsigned     jmpCnt;
15631
15632                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15633
15634                 assert(impTreeLast);
15635                 assert(impTreeLast->gtOper == GT_STMT);
15636                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15637
15638                 addStmt     = impTreeLast;
15639                 impTreeLast = impTreeLast->gtPrev;
15640
15641                 jmpCnt = block->bbJumpSwt->bbsCount;
15642                 jmpTab = block->bbJumpSwt->bbsDstTab;
15643
15644                 do
15645                 {
15646                     tgtBlock = (*jmpTab);
15647
15648                     multRef |= tgtBlock->bbRefs;
15649
15650                     // Thanks to spill cliques, we should have assigned all or none
15651                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15652                     baseTmp = tgtBlock->bbStkTempsIn;
15653                     if (multRef > 1)
15654                     {
15655                         break;
15656                     }
15657                 } while (++jmpTab, --jmpCnt);
15658
15659                 break;
15660
15661             case BBJ_CALLFINALLY:
15662             case BBJ_EHCATCHRET:
15663             case BBJ_RETURN:
15664             case BBJ_EHFINALLYRET:
15665             case BBJ_EHFILTERRET:
15666             case BBJ_THROW:
15667                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15668                 break;
15669
15670             default:
15671                 noway_assert(!"Unexpected bbJumpKind");
15672                 break;
15673         }
15674
15675         assert(multRef >= 1);
15676
15677         /* Do we have a base temp number? */
15678
15679         bool newTemps = (baseTmp == NO_BASE_TMP);
15680
15681         if (newTemps)
15682         {
15683             /* Grab enough temps for the whole stack */
15684             baseTmp = impGetSpillTmpBase(block);
15685         }
15686
15687         /* Spill all stack entries into temps */
15688         unsigned level, tempNum;
15689
15690         JITDUMP("\nSpilling stack entries into temps\n");
15691         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15692         {
15693             GenTreePtr tree = verCurrentState.esStack[level].val;
15694
15695             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15696                the other. This should merge to a byref in unverifiable code.
15697                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15698                successor would be imported assuming there was a TYP_I_IMPL on
15699                the stack. Thus the value would not get GC-tracked. Hence,
15700                change the temp to TYP_BYREF and reimport the successors.
15701                Note: We should only allow this in unverifiable code.
15702             */
15703             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15704             {
15705                 lvaTable[tempNum].lvType = TYP_BYREF;
15706                 impReimportMarkSuccessors(block);
15707                 markImport = true;
15708             }
15709
15710 #ifdef _TARGET_64BIT_
15711             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15712             {
15713                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15714                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15715                 {
15716                     // Merge the current state into the entry state of block;
15717                     // the call to verMergeEntryStates must have changed
15718                     // the entry state of the block by merging the int local var
15719                     // and the native-int stack entry.
15720                     bool changed = false;
15721                     if (verMergeEntryStates(tgtBlock, &changed))
15722                     {
15723                         impRetypeEntryStateTemps(tgtBlock);
15724                         impReimportBlockPending(tgtBlock);
15725                         assert(changed);
15726                     }
15727                     else
15728                     {
15729                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15730                         break;
15731                     }
15732                 }
15733
15734                 // Some other block in the spill clique set this to "int", but now we have "native int".
15735                 // Change the type and go back to re-import any blocks that used the wrong type.
15736                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15737                 reimportSpillClique      = true;
15738             }
15739             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15740             {
15741                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15742                 // Insert a sign-extension to "native int" so we match the clique.
15743                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15744             }
15745
15746             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15747             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15748             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15749             // behavior instead of asserting and then generating bad code (where we save/restore the
15750             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15751             // imported already, we need to change the type of the local and reimport the spill clique.
15752             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15753             // the 'byref' size.
15754             if (!tiVerificationNeeded)
15755             {
15756                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15757                 {
15758                     // Some other block in the spill clique set this to "int", but now we have "byref".
15759                     // Change the type and go back to re-import any blocks that used the wrong type.
15760                     lvaTable[tempNum].lvType = TYP_BYREF;
15761                     reimportSpillClique      = true;
15762                 }
15763                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15764                 {
15765                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15766                     // Insert a sign-extension to "native int" so we match the clique size.
15767                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15768                 }
15769             }
15770 #endif // _TARGET_64BIT_
15771
15772 #if FEATURE_X87_DOUBLES
15773             // X87 stack doesn't differentiate between float/double
15774             // so promoting is no big deal.
15775             // For everybody else keep it as float until we have a collision and then promote
15776             // Just like for x64's TYP_INT<->TYP_I_IMPL
15777
15778             if (multRef > 1 && tree->gtType == TYP_FLOAT)
15779             {
15780                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15781             }
15782
15783 #else // !FEATURE_X87_DOUBLES
15784
15785             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15786             {
15787                 // Some other block in the spill clique set this to "float", but now we have "double".
15788                 // Change the type and go back to re-import any blocks that used the wrong type.
15789                 lvaTable[tempNum].lvType = TYP_DOUBLE;
15790                 reimportSpillClique      = true;
15791             }
15792             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15793             {
15794                 // Spill clique has decided this should be "double", but this block only pushes a "float".
15795                 // Insert a cast to "double" so we match the clique.
15796                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15797             }
15798
15799 #endif // FEATURE_X87_DOUBLES
15800
15801             /* If addStmt has a reference to tempNum (can only happen if we
15802                are spilling to the temps already used by a previous block),
15803                we need to spill addStmt */
15804
15805             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15806             {
15807                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15808
15809                 if (addTree->gtOper == GT_JTRUE)
15810                 {
15811                     GenTreePtr relOp = addTree->gtOp.gtOp1;
15812                     assert(relOp->OperIsCompare());
15813
15814                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15815
15816                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15817                     {
15818                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15819                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15820                         type              = genActualType(lvaTable[temp].TypeGet());
15821                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15822                     }
15823
15824                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15825                     {
15826                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15827                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15828                         type              = genActualType(lvaTable[temp].TypeGet());
15829                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15830                     }
15831                 }
15832                 else
15833                 {
15834                     assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15835
15836                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15837                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15838                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15839                 }
15840             }
15841
15842             /* Spill the stack entry, and replace with the temp */
15843
15844             if (!impSpillStackEntry(level, tempNum
15845 #ifdef DEBUG
15846                                     ,
15847                                     true, "Spill Stack Entry"
15848 #endif
15849                                     ))
15850             {
15851                 if (markImport)
15852                 {
15853                     BADCODE("bad stack state");
15854                 }
15855
15856                 // Oops. Something went wrong when spilling. Bad code.
15857                 verHandleVerificationFailure(block DEBUGARG(true));
15858
15859                 goto SPILLSTACK;
15860             }
15861         }
15862
15863         /* Put back the 'jtrue'/'switch' if we removed it earlier */
15864
15865         if (addStmt)
15866         {
15867             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15868         }
15869     }
15870
15871     // Some of the append/spill logic works on compCurBB
15872
15873     assert(compCurBB == block);
15874
15875     /* Save the tree list in the block */
15876     impEndTreeList(block);
15877
15878     // impEndTreeList sets BBF_IMPORTED on the block
15879     // We do *NOT* want to set it later than this because
15880     // impReimportSpillClique might clear it if this block is both a
15881     // predecessor and successor in the current spill clique
15882     assert(block->bbFlags & BBF_IMPORTED);
15883
15884     // If we had a int/native int, or float/double collision, we need to re-import
15885     if (reimportSpillClique)
15886     {
15887         // This will re-import all the successors of block (as well as each of their predecessors)
15888         impReimportSpillClique(block);
15889
15890         // For blocks that haven't been imported yet, we still need to mark them as pending import.
15891         for (unsigned i = 0; i < block->NumSucc(); i++)
15892         {
15893             BasicBlock* succ = block->GetSucc(i);
15894             if ((succ->bbFlags & BBF_IMPORTED) == 0)
15895             {
15896                 impImportBlockPending(succ);
15897             }
15898         }
15899     }
15900     else // the normal case
15901     {
15902         // otherwise just import the successors of block
15903
15904         /* Does this block jump to any other blocks? */
15905         for (unsigned i = 0; i < block->NumSucc(); i++)
15906         {
15907             impImportBlockPending(block->GetSucc(i));
15908         }
15909     }
15910 }
15911 #ifdef _PREFAST_
15912 #pragma warning(pop)
15913 #endif
15914
15915 /*****************************************************************************/
15916 //
15917 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15918 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15919 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
15920 // (its "pre-state").
15921
15922 void Compiler::impImportBlockPending(BasicBlock* block)
15923 {
15924 #ifdef DEBUG
15925     if (verbose)
15926     {
15927         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15928     }
15929 #endif
15930
15931     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15932     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15933     // (When we're doing verification, we always attempt the merge to detect verification errors.)
15934
15935     // If the block has not been imported, add to pending set.
15936     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15937
15938     // Initialize bbEntryState just the first time we try to add this block to the pending list
15939     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
15940     // We use NULL to indicate the 'common' state to avoid memory allocation
15941     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
15942         (impGetPendingBlockMember(block) == 0))
15943     {
15944         verInitBBEntryState(block, &verCurrentState);
15945         assert(block->bbStkDepth == 0);
15946         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
15947         assert(addToPending);
15948         assert(impGetPendingBlockMember(block) == 0);
15949     }
15950     else
15951     {
15952         // The stack should have the same height on entry to the block from all its predecessors.
15953         if (block->bbStkDepth != verCurrentState.esStackDepth)
15954         {
15955 #ifdef DEBUG
15956             char buffer[400];
15957             sprintf_s(buffer, sizeof(buffer),
15958                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
15959                       "Previous depth was %d, current depth is %d",
15960                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
15961                       verCurrentState.esStackDepth);
15962             buffer[400 - 1] = 0;
15963             NO_WAY(buffer);
15964 #else
15965             NO_WAY("Block entered with different stack depths");
15966 #endif
15967         }
15968
15969         // Additionally, if we need to verify, merge the verification state.
15970         if (tiVerificationNeeded)
15971         {
15972             // Merge the current state into the entry state of block; if this does not change the entry state
15973             // by merging, do not add the block to the pending-list.
15974             bool changed = false;
15975             if (!verMergeEntryStates(block, &changed))
15976             {
15977                 block->bbFlags |= BBF_FAILED_VERIFICATION;
15978                 addToPending = true; // We will pop it off, and check the flag set above.
15979             }
15980             else if (changed)
15981             {
15982                 addToPending = true;
15983
15984                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
15985             }
15986         }
15987
15988         if (!addToPending)
15989         {
15990             return;
15991         }
15992
15993         if (block->bbStkDepth > 0)
15994         {
15995             // We need to fix the types of any spill temps that might have changed:
15996             //   int->native int, float->double, int->byref, etc.
15997             impRetypeEntryStateTemps(block);
15998         }
15999
16000         // OK, we must add to the pending list, if it's not already in it.
16001         if (impGetPendingBlockMember(block) != 0)
16002         {
16003             return;
16004         }
16005     }
16006
16007     // Get an entry to add to the pending list
16008
16009     PendingDsc* dsc;
16010
16011     if (impPendingFree)
16012     {
16013         // We can reuse one of the freed up dscs.
16014         dsc            = impPendingFree;
16015         impPendingFree = dsc->pdNext;
16016     }
16017     else
16018     {
16019         // We have to create a new dsc
16020         dsc = new (this, CMK_Unknown) PendingDsc;
16021     }
16022
16023     dsc->pdBB                 = block;
16024     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16025     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16026
16027     // Save the stack trees for later
16028
16029     if (verCurrentState.esStackDepth)
16030     {
16031         impSaveStackState(&dsc->pdSavedStack, false);
16032     }
16033
16034     // Add the entry to the pending list
16035
16036     dsc->pdNext    = impPendingList;
16037     impPendingList = dsc;
16038     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16039
16040     // Various assertions require us to now to consider the block as not imported (at least for
16041     // the final time...)
16042     block->bbFlags &= ~BBF_IMPORTED;
16043
16044 #ifdef DEBUG
16045     if (verbose && 0)
16046     {
16047         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16048     }
16049 #endif
16050 }
16051
16052 /*****************************************************************************/
16053 //
16054 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16055 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16056 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16057
16058 void Compiler::impReimportBlockPending(BasicBlock* block)
16059 {
16060     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16061
16062     assert(block->bbFlags & BBF_IMPORTED);
16063
16064     // OK, we must add to the pending list, if it's not already in it.
16065     if (impGetPendingBlockMember(block) != 0)
16066     {
16067         return;
16068     }
16069
16070     // Get an entry to add to the pending list
16071
16072     PendingDsc* dsc;
16073
16074     if (impPendingFree)
16075     {
16076         // We can reuse one of the freed up dscs.
16077         dsc            = impPendingFree;
16078         impPendingFree = dsc->pdNext;
16079     }
16080     else
16081     {
16082         // We have to create a new dsc
16083         dsc = new (this, CMK_ImpStack) PendingDsc;
16084     }
16085
16086     dsc->pdBB = block;
16087
16088     if (block->bbEntryState)
16089     {
16090         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16091         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16092         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16093     }
16094     else
16095     {
16096         dsc->pdThisPtrInit        = TIS_Bottom;
16097         dsc->pdSavedStack.ssDepth = 0;
16098         dsc->pdSavedStack.ssTrees = nullptr;
16099     }
16100
16101     // Add the entry to the pending list
16102
16103     dsc->pdNext    = impPendingList;
16104     impPendingList = dsc;
16105     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16106
16107     // Various assertions require us to now to consider the block as not imported (at least for
16108     // the final time...)
16109     block->bbFlags &= ~BBF_IMPORTED;
16110
16111 #ifdef DEBUG
16112     if (verbose && 0)
16113     {
16114         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16115     }
16116 #endif
16117 }
16118
16119 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16120 {
16121     if (comp->impBlockListNodeFreeList == nullptr)
16122     {
16123         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16124     }
16125     else
16126     {
16127         BlockListNode* res             = comp->impBlockListNodeFreeList;
16128         comp->impBlockListNodeFreeList = res->m_next;
16129         return res;
16130     }
16131 }
16132
16133 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16134 {
16135     node->m_next             = impBlockListNodeFreeList;
16136     impBlockListNodeFreeList = node;
16137 }
16138
16139 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16140 {
16141     bool toDo = true;
16142
16143     noway_assert(!fgComputePredsDone);
16144     if (!fgCheapPredsValid)
16145     {
16146         fgComputeCheapPreds();
16147     }
16148
16149     BlockListNode* succCliqueToDo = nullptr;
16150     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16151     while (toDo)
16152     {
16153         toDo = false;
16154         // Look at the successors of every member of the predecessor to-do list.
16155         while (predCliqueToDo != nullptr)
16156         {
16157             BlockListNode* node = predCliqueToDo;
16158             predCliqueToDo      = node->m_next;
16159             BasicBlock* blk     = node->m_blk;
16160             FreeBlockListNode(node);
16161
16162             for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16163             {
16164                 BasicBlock* succ = blk->GetSucc(succNum);
16165                 // If it's not already in the clique, add it, and also add it
16166                 // as a member of the successor "toDo" set.
16167                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16168                 {
16169                     callback->Visit(SpillCliqueSucc, succ);
16170                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16171                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16172                     toDo           = true;
16173                 }
16174             }
16175         }
16176         // Look at the predecessors of every member of the successor to-do list.
16177         while (succCliqueToDo != nullptr)
16178         {
16179             BlockListNode* node = succCliqueToDo;
16180             succCliqueToDo      = node->m_next;
16181             BasicBlock* blk     = node->m_blk;
16182             FreeBlockListNode(node);
16183
16184             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16185             {
16186                 BasicBlock* predBlock = pred->block;
16187                 // If it's not already in the clique, add it, and also add it
16188                 // as a member of the predecessor "toDo" set.
16189                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16190                 {
16191                     callback->Visit(SpillCliquePred, predBlock);
16192                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16193                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16194                     toDo           = true;
16195                 }
16196             }
16197         }
16198     }
16199
16200     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16201     // miss walking back to include the predecessor we started from.
16202     // This most likely cause: missing or out of date bbPreds
16203     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16204 }
16205
16206 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16207 {
16208     if (predOrSucc == SpillCliqueSucc)
16209     {
16210         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16211         blk->bbStkTempsIn = m_baseTmp;
16212     }
16213     else
16214     {
16215         assert(predOrSucc == SpillCliquePred);
16216         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16217         blk->bbStkTempsOut = m_baseTmp;
16218     }
16219 }
16220
16221 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16222 {
16223     // For Preds we could be a little smarter and just find the existing store
16224     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16225     // just re-import the whole block (just like we do for successors)
16226
16227     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16228     {
16229         // If we haven't imported this block and we're not going to (because it isn't on
16230         // the pending list) then just ignore it for now.
16231
16232         // This block has either never been imported (EntryState == NULL) or it failed
16233         // verification. Neither state requires us to force it to be imported now.
16234         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16235         return;
16236     }
16237
16238     // For successors we have a valid verCurrentState, so just mark them for reimport
16239     // the 'normal' way
16240     // Unlike predecessors, we *DO* need to reimport the current block because the
16241     // initial import had the wrong entry state types.
16242     // Similarly, blocks that are currently on the pending list, still need to call
16243     // impImportBlockPending to fixup their entry state.
16244     if (predOrSucc == SpillCliqueSucc)
16245     {
16246         m_pComp->impReimportMarkBlock(blk);
16247
16248         // Set the current stack state to that of the blk->bbEntryState
16249         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16250         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16251
16252         m_pComp->impImportBlockPending(blk);
16253     }
16254     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16255     {
16256         // As described above, we are only visiting predecessors so they can
16257         // add the appropriate casts, since we have already done that for the current
16258         // block, it does not need to be reimported.
16259         // Nor do we need to reimport blocks that are still pending, but not yet
16260         // imported.
16261         //
16262         // For predecessors, we have no state to seed the EntryState, so we just have
16263         // to assume the existing one is correct.
16264         // If the block is also a successor, it will get the EntryState properly
16265         // updated when it is visited as a successor in the above "if" block.
16266         assert(predOrSucc == SpillCliquePred);
16267         m_pComp->impReimportBlockPending(blk);
16268     }
16269 }
16270
16271 // Re-type the incoming lclVar nodes to match the varDsc.
16272 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16273 {
16274     if (blk->bbEntryState != nullptr)
16275     {
16276         EntryState* es = blk->bbEntryState;
16277         for (unsigned level = 0; level < es->esStackDepth; level++)
16278         {
16279             GenTreePtr tree = es->esStack[level].val;
16280             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16281             {
16282                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16283                 noway_assert(lclNum < lvaCount);
16284                 LclVarDsc* varDsc              = lvaTable + lclNum;
16285                 es->esStack[level].val->gtType = varDsc->TypeGet();
16286             }
16287         }
16288     }
16289 }
16290
16291 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16292 {
16293     if (block->bbStkTempsOut != NO_BASE_TMP)
16294     {
16295         return block->bbStkTempsOut;
16296     }
16297
16298 #ifdef DEBUG
16299     if (verbose)
16300     {
16301         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16302     }
16303 #endif // DEBUG
16304
16305     // Otherwise, choose one, and propagate to all members of the spill clique.
16306     // Grab enough temps for the whole stack.
16307     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16308     SetSpillTempsBase callback(baseTmp);
16309
16310     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16311     // to one spill clique, and similarly can only be the sucessor to one spill clique
16312     impWalkSpillCliqueFromPred(block, &callback);
16313
16314     return baseTmp;
16315 }
16316
16317 void Compiler::impReimportSpillClique(BasicBlock* block)
16318 {
16319 #ifdef DEBUG
16320     if (verbose)
16321     {
16322         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16323     }
16324 #endif // DEBUG
16325
16326     // If we get here, it is because this block is already part of a spill clique
16327     // and one predecessor had an outgoing live stack slot of type int, and this
16328     // block has an outgoing live stack slot of type native int.
16329     // We need to reset these before traversal because they have already been set
16330     // by the previous walk to determine all the members of the spill clique.
16331     impInlineRoot()->impSpillCliquePredMembers.Reset();
16332     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16333
16334     ReimportSpillClique callback(this);
16335
16336     impWalkSpillCliqueFromPred(block, &callback);
16337 }
16338
16339 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16340 // a copy of "srcState", cloning tree pointers as required.
16341 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16342 {
16343     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16344     {
16345         block->bbEntryState = nullptr;
16346         return;
16347     }
16348
16349     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16350
16351     // block->bbEntryState.esRefcount = 1;
16352
16353     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16354     block->bbEntryState->thisInitialized = TIS_Bottom;
16355
16356     if (srcState->esStackDepth > 0)
16357     {
16358         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16359         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16360
16361         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16362         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16363         {
16364             GenTreePtr tree                         = srcState->esStack[level].val;
16365             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16366         }
16367     }
16368
16369     if (verTrackObjCtorInitState)
16370     {
16371         verSetThisInit(block, srcState->thisInitialized);
16372     }
16373
16374     return;
16375 }
16376
16377 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16378 {
16379     assert(tis != TIS_Bottom); // Precondition.
16380     if (block->bbEntryState == nullptr)
16381     {
16382         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16383     }
16384
16385     block->bbEntryState->thisInitialized = tis;
16386 }
16387
16388 /*
16389  * Resets the current state to the state at the start of the basic block
16390  */
16391 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16392 {
16393
16394     if (block->bbEntryState == nullptr)
16395     {
16396         destState->esStackDepth    = 0;
16397         destState->thisInitialized = TIS_Bottom;
16398         return;
16399     }
16400
16401     destState->esStackDepth = block->bbEntryState->esStackDepth;
16402
16403     if (destState->esStackDepth > 0)
16404     {
16405         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16406
16407         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16408     }
16409
16410     destState->thisInitialized = block->bbThisOnEntry();
16411
16412     return;
16413 }
16414
16415 ThisInitState BasicBlock::bbThisOnEntry()
16416 {
16417     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16418 }
16419
16420 unsigned BasicBlock::bbStackDepthOnEntry()
16421 {
16422     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16423 }
16424
16425 void BasicBlock::bbSetStack(void* stackBuffer)
16426 {
16427     assert(bbEntryState);
16428     assert(stackBuffer);
16429     bbEntryState->esStack = (StackEntry*)stackBuffer;
16430 }
16431
16432 StackEntry* BasicBlock::bbStackOnEntry()
16433 {
16434     assert(bbEntryState);
16435     return bbEntryState->esStack;
16436 }
16437
16438 void Compiler::verInitCurrentState()
16439 {
16440     verTrackObjCtorInitState        = FALSE;
16441     verCurrentState.thisInitialized = TIS_Bottom;
16442
16443     if (tiVerificationNeeded)
16444     {
16445         // Track this ptr initialization
16446         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16447         {
16448             verTrackObjCtorInitState        = TRUE;
16449             verCurrentState.thisInitialized = TIS_Uninit;
16450         }
16451     }
16452
16453     // initialize stack info
16454
16455     verCurrentState.esStackDepth = 0;
16456     assert(verCurrentState.esStack != nullptr);
16457
16458     // copy current state to entry state of first BB
16459     verInitBBEntryState(fgFirstBB, &verCurrentState);
16460 }
16461
16462 Compiler* Compiler::impInlineRoot()
16463 {
16464     if (impInlineInfo == nullptr)
16465     {
16466         return this;
16467     }
16468     else
16469     {
16470         return impInlineInfo->InlineRoot;
16471     }
16472 }
16473
16474 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16475 {
16476     if (predOrSucc == SpillCliquePred)
16477     {
16478         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16479     }
16480     else
16481     {
16482         assert(predOrSucc == SpillCliqueSucc);
16483         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16484     }
16485 }
16486
16487 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16488 {
16489     if (predOrSucc == SpillCliquePred)
16490     {
16491         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16492     }
16493     else
16494     {
16495         assert(predOrSucc == SpillCliqueSucc);
16496         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16497     }
16498 }
16499
16500 /*****************************************************************************
16501  *
16502  *  Convert the instrs ("import") into our internal format (trees). The
16503  *  basic flowgraph has already been constructed and is passed in.
16504  */
16505
16506 void Compiler::impImport(BasicBlock* method)
16507 {
16508 #ifdef DEBUG
16509     if (verbose)
16510     {
16511         printf("*************** In impImport() for %s\n", info.compFullName);
16512     }
16513 #endif
16514
16515     /* Allocate the stack contents */
16516
16517     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16518     {
16519         /* Use local variable, don't waste time allocating on the heap */
16520
16521         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16522         verCurrentState.esStack = impSmallStack;
16523     }
16524     else
16525     {
16526         impStkSize              = info.compMaxStack;
16527         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16528     }
16529
16530     // initialize the entry state at start of method
16531     verInitCurrentState();
16532
16533     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16534     Compiler* inlineRoot = impInlineRoot();
16535     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16536     {
16537         // We have initialized these previously, but to size 0.  Make them larger.
16538         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16539         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16540         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16541     }
16542     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16543     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16544     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16545     impBlockListNodeFreeList = nullptr;
16546
16547 #ifdef DEBUG
16548     impLastILoffsStmt   = nullptr;
16549     impNestedStackSpill = false;
16550 #endif
16551     impBoxTemp = BAD_VAR_NUM;
16552
16553     impPendingList = impPendingFree = nullptr;
16554
16555     /* Add the entry-point to the worker-list */
16556
16557     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16558     // from EH normalization.
16559     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16560     // out.
16561     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16562     {
16563         // Treat these as imported.
16564         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16565         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16566         method->bbFlags |= BBF_IMPORTED;
16567     }
16568
16569     impImportBlockPending(method);
16570
16571     /* Import blocks in the worker-list until there are no more */
16572
16573     while (impPendingList)
16574     {
16575         /* Remove the entry at the front of the list */
16576
16577         PendingDsc* dsc = impPendingList;
16578         impPendingList  = impPendingList->pdNext;
16579         impSetPendingBlockMember(dsc->pdBB, 0);
16580
16581         /* Restore the stack state */
16582
16583         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16584         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16585         if (verCurrentState.esStackDepth)
16586         {
16587             impRestoreStackState(&dsc->pdSavedStack);
16588         }
16589
16590         /* Add the entry to the free list for reuse */
16591
16592         dsc->pdNext    = impPendingFree;
16593         impPendingFree = dsc;
16594
16595         /* Now import the block */
16596
16597         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16598         {
16599
16600 #ifdef _TARGET_64BIT_
16601             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16602             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16603             // method for further explanation on why we raise this exception instead of making the jitted
16604             // code throw the verification exception during execution.
16605             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16606             {
16607                 BADCODE("Basic block marked as not verifiable");
16608             }
16609             else
16610 #endif // _TARGET_64BIT_
16611             {
16612                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16613                 impEndTreeList(dsc->pdBB);
16614             }
16615         }
16616         else
16617         {
16618             impImportBlock(dsc->pdBB);
16619
16620             if (compDonotInline())
16621             {
16622                 return;
16623             }
16624             if (compIsForImportOnly() && !tiVerificationNeeded)
16625             {
16626                 return;
16627             }
16628         }
16629     }
16630
16631 #ifdef DEBUG
16632     if (verbose && info.compXcptnsCount)
16633     {
16634         printf("\nAfter impImport() added block for try,catch,finally");
16635         fgDispBasicBlocks();
16636         printf("\n");
16637     }
16638
16639     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16640     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16641     {
16642         block->bbFlags &= ~BBF_VISITED;
16643     }
16644 #endif
16645
16646     assert(!compIsForInlining() || !tiVerificationNeeded);
16647 }
16648
16649 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16650 // The invariant here is that if it's not a ref or a method and has a class handle
16651 // it's a valuetype
16652 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16653 {
16654     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16655     {
16656         return true;
16657     }
16658     else
16659     {
16660         return false;
16661     }
16662 }
16663
16664 /*****************************************************************************
16665  *  Check to see if the tree is the address of a local or
16666     the address of a field in a local.
16667
16668     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16669
16670  */
16671
16672 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16673 {
16674     if (tree->gtOper != GT_ADDR)
16675     {
16676         return FALSE;
16677     }
16678
16679     GenTreePtr op = tree->gtOp.gtOp1;
16680     while (op->gtOper == GT_FIELD)
16681     {
16682         op = op->gtField.gtFldObj;
16683         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16684         {
16685             op = op->gtOp.gtOp1;
16686         }
16687         else
16688         {
16689             return false;
16690         }
16691     }
16692
16693     if (op->gtOper == GT_LCL_VAR)
16694     {
16695         *lclVarTreeOut = op;
16696         return TRUE;
16697     }
16698     else
16699     {
16700         return FALSE;
16701     }
16702 }
16703
16704 //------------------------------------------------------------------------
16705 // impMakeDiscretionaryInlineObservations: make observations that help
16706 // determine the profitability of a discretionary inline
16707 //
16708 // Arguments:
16709 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16710 //    inlineResult -- InlineResult accumulating information about this inline
16711 //
16712 // Notes:
16713 //    If inlining or prejitting the root, this method also makes
16714 //    various observations about the method that factor into inline
16715 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16716
16717 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16718 {
16719     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16720            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16721            );
16722
16723     // If we're really inlining, we should just have one result in play.
16724     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16725
16726     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16727     // to the trouble of estimating the native code size. Even if it did, it
16728     // shouldn't be relying on the result of this method.
16729     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16730
16731     // Note if the caller contains NEWOBJ or NEWARR.
16732     Compiler* rootCompiler = impInlineRoot();
16733
16734     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16735     {
16736         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16737     }
16738
16739     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16740     {
16741         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16742     }
16743
16744     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16745     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16746
16747     if (isSpecialMethod)
16748     {
16749         if (calleeIsStatic)
16750         {
16751             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16752         }
16753         else
16754         {
16755             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16756         }
16757     }
16758     else if (!calleeIsStatic)
16759     {
16760         // Callee is an instance method.
16761         //
16762         // Check if the callee has the same 'this' as the root.
16763         if (pInlineInfo != nullptr)
16764         {
16765             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16766             assert(thisArg);
16767             bool isSameThis = impIsThis(thisArg);
16768             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16769         }
16770     }
16771
16772     // Note if the callee's class is a promotable struct
16773     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16774     {
16775         lvaStructPromotionInfo structPromotionInfo;
16776         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16777         if (structPromotionInfo.canPromote)
16778         {
16779             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16780         }
16781     }
16782
16783 #ifdef FEATURE_SIMD
16784
16785     // Note if this method is has SIMD args or return value
16786     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16787     {
16788         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16789     }
16790
16791 #endif // FEATURE_SIMD
16792
16793     // Roughly classify callsite frequency.
16794     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16795
16796     // If this is a prejit root, or a maximally hot block...
16797     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16798     {
16799         frequency = InlineCallsiteFrequency::HOT;
16800     }
16801     // No training data.  Look for loop-like things.
16802     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
16803     // However, give it to things nearby.
16804     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16805              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16806     {
16807         frequency = InlineCallsiteFrequency::LOOP;
16808     }
16809     else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16810     {
16811         frequency = InlineCallsiteFrequency::WARM;
16812     }
16813     // Now modify the multiplier based on where we're called from.
16814     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16815     {
16816         frequency = InlineCallsiteFrequency::RARE;
16817     }
16818     else
16819     {
16820         frequency = InlineCallsiteFrequency::BORING;
16821     }
16822
16823     // Also capture the block weight of the call site.  In the prejit
16824     // root case, assume there's some hot call site for this method.
16825     unsigned weight = 0;
16826
16827     if (pInlineInfo != nullptr)
16828     {
16829         weight = pInlineInfo->iciBlock->bbWeight;
16830     }
16831     else
16832     {
16833         weight = BB_MAX_WEIGHT;
16834     }
16835
16836     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16837     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16838 }
16839
16840 /*****************************************************************************
16841  This method makes STATIC inlining decision based on the IL code.
16842  It should not make any inlining decision based on the context.
16843  If forceInline is true, then the inlining decision should not depend on
16844  performance heuristics (code size, etc.).
16845  */
16846
16847 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16848                               CORINFO_METHOD_INFO*  methInfo,
16849                               bool                  forceInline,
16850                               InlineResult*         inlineResult)
16851 {
16852     unsigned codeSize = methInfo->ILCodeSize;
16853
16854     // We shouldn't have made up our minds yet...
16855     assert(!inlineResult->IsDecided());
16856
16857     if (methInfo->EHcount)
16858     {
16859         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16860         return;
16861     }
16862
16863     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16864     {
16865         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16866         return;
16867     }
16868
16869     // For now we don't inline varargs (import code can't handle it)
16870
16871     if (methInfo->args.isVarArg())
16872     {
16873         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16874         return;
16875     }
16876
16877     // Reject if it has too many locals.
16878     // This is currently an implementation limit due to fixed-size arrays in the
16879     // inline info, rather than a performance heuristic.
16880
16881     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16882
16883     if (methInfo->locals.numArgs > MAX_INL_LCLS)
16884     {
16885         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16886         return;
16887     }
16888
16889     // Make sure there aren't too many arguments.
16890     // This is currently an implementation limit due to fixed-size arrays in the
16891     // inline info, rather than a performance heuristic.
16892
16893     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16894
16895     if (methInfo->args.numArgs > MAX_INL_ARGS)
16896     {
16897         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16898         return;
16899     }
16900
16901     // Note force inline state
16902
16903     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16904
16905     // Note IL code size
16906
16907     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16908
16909     if (inlineResult->IsFailure())
16910     {
16911         return;
16912     }
16913
16914     // Make sure maxstack is not too big
16915
16916     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16917
16918     if (inlineResult->IsFailure())
16919     {
16920         return;
16921     }
16922 }
16923
16924 /*****************************************************************************
16925  */
16926
16927 void Compiler::impCheckCanInline(GenTreePtr             call,
16928                                  CORINFO_METHOD_HANDLE  fncHandle,
16929                                  unsigned               methAttr,
16930                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
16931                                  InlineCandidateInfo**  ppInlineCandidateInfo,
16932                                  InlineResult*          inlineResult)
16933 {
16934     // Either EE or JIT might throw exceptions below.
16935     // If that happens, just don't inline the method.
16936
16937     struct Param
16938     {
16939         Compiler*              pThis;
16940         GenTreePtr             call;
16941         CORINFO_METHOD_HANDLE  fncHandle;
16942         unsigned               methAttr;
16943         CORINFO_CONTEXT_HANDLE exactContextHnd;
16944         InlineResult*          result;
16945         InlineCandidateInfo**  ppInlineCandidateInfo;
16946     } param = {nullptr};
16947
16948     param.pThis                 = this;
16949     param.call                  = call;
16950     param.fncHandle             = fncHandle;
16951     param.methAttr              = methAttr;
16952     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
16953     param.result                = inlineResult;
16954     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
16955
16956     bool success = eeRunWithErrorTrap<Param>(
16957         [](Param* pParam) {
16958             DWORD                  dwRestrictions = 0;
16959             CorInfoInitClassResult initClassResult;
16960
16961 #ifdef DEBUG
16962             const char* methodName;
16963             const char* className;
16964             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
16965
16966             if (JitConfig.JitNoInline())
16967             {
16968                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
16969                 goto _exit;
16970             }
16971 #endif
16972
16973             /* Try to get the code address/size for the method */
16974
16975             CORINFO_METHOD_INFO methInfo;
16976             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
16977             {
16978                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
16979                 goto _exit;
16980             }
16981
16982             bool forceInline;
16983             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
16984
16985             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
16986
16987             if (pParam->result->IsFailure())
16988             {
16989                 assert(pParam->result->IsNever());
16990                 goto _exit;
16991             }
16992
16993             // Speculatively check if initClass() can be done.
16994             // If it can be done, we will try to inline the method. If inlining
16995             // succeeds, then we will do the non-speculative initClass() and commit it.
16996             // If this speculative call to initClass() fails, there is no point
16997             // trying to inline this method.
16998             initClassResult =
16999                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17000                                                            pParam->exactContextHnd /* context */,
17001                                                            TRUE /* speculative */);
17002
17003             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17004             {
17005                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17006                 goto _exit;
17007             }
17008
17009             // Given the EE the final say in whether to inline or not.
17010             // This should be last since for verifiable code, this can be expensive
17011
17012             /* VM Inline check also ensures that the method is verifiable if needed */
17013             CorInfoInline vmResult;
17014             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17015                                                                   &dwRestrictions);
17016
17017             if (vmResult == INLINE_FAIL)
17018             {
17019                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17020             }
17021             else if (vmResult == INLINE_NEVER)
17022             {
17023                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17024             }
17025
17026             if (pParam->result->IsFailure())
17027             {
17028                 // Make sure not to report this one.  It was already reported by the VM.
17029                 pParam->result->SetReported();
17030                 goto _exit;
17031             }
17032
17033             // check for unsupported inlining restrictions
17034             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17035
17036             if (dwRestrictions & INLINE_SAME_THIS)
17037             {
17038                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17039                 assert(thisArg);
17040
17041                 if (!pParam->pThis->impIsThis(thisArg))
17042                 {
17043                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17044                     goto _exit;
17045                 }
17046             }
17047
17048             /* Get the method properties */
17049
17050             CORINFO_CLASS_HANDLE clsHandle;
17051             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17052             unsigned clsAttr;
17053             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17054
17055             /* Get the return type */
17056
17057             var_types fncRetType;
17058             fncRetType = pParam->call->TypeGet();
17059
17060 #ifdef DEBUG
17061             var_types fncRealRetType;
17062             fncRealRetType = JITtype2varType(methInfo.args.retType);
17063
17064             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17065                    // <BUGNUM> VSW 288602 </BUGNUM>
17066                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17067                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17068                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17069 #endif
17070
17071             //
17072             // Allocate an InlineCandidateInfo structure
17073             //
17074             InlineCandidateInfo* pInfo;
17075             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17076
17077             pInfo->dwRestrictions  = dwRestrictions;
17078             pInfo->methInfo        = methInfo;
17079             pInfo->methAttr        = pParam->methAttr;
17080             pInfo->clsHandle       = clsHandle;
17081             pInfo->clsAttr         = clsAttr;
17082             pInfo->fncRetType      = fncRetType;
17083             pInfo->exactContextHnd = pParam->exactContextHnd;
17084             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17085             pInfo->initClassResult = initClassResult;
17086
17087             *(pParam->ppInlineCandidateInfo) = pInfo;
17088
17089         _exit:;
17090         },
17091         &param);
17092     if (!success)
17093     {
17094         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17095     }
17096 }
17097
17098 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17099                                       GenTreePtr    curArgVal,
17100                                       unsigned      argNum,
17101                                       InlineResult* inlineResult)
17102 {
17103     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17104
17105     if (curArgVal->gtOper == GT_MKREFANY)
17106     {
17107         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17108         return;
17109     }
17110
17111     inlCurArgInfo->argNode = curArgVal;
17112
17113     GenTreePtr lclVarTree;
17114     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17115     {
17116         inlCurArgInfo->argIsByRefToStructLocal = true;
17117 #ifdef FEATURE_SIMD
17118         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17119         {
17120             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17121         }
17122 #endif // FEATURE_SIMD
17123     }
17124
17125     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17126     {
17127         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17128         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17129     }
17130
17131     if (curArgVal->gtOper == GT_LCL_VAR)
17132     {
17133         inlCurArgInfo->argIsLclVar = true;
17134
17135         /* Remember the "original" argument number */
17136         curArgVal->gtLclVar.gtLclILoffs = argNum;
17137     }
17138
17139     if ((curArgVal->OperKind() & GTK_CONST) ||
17140         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17141     {
17142         inlCurArgInfo->argIsInvariant = true;
17143         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17144         {
17145             /* Abort, but do not mark as not inlinable */
17146             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17147             return;
17148         }
17149     }
17150
17151     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17152     {
17153         inlCurArgInfo->argHasLdargaOp = true;
17154     }
17155
17156 #ifdef DEBUG
17157     if (verbose)
17158     {
17159         if (inlCurArgInfo->argIsThis)
17160         {
17161             printf("thisArg:");
17162         }
17163         else
17164         {
17165             printf("\nArgument #%u:", argNum);
17166         }
17167         if (inlCurArgInfo->argIsLclVar)
17168         {
17169             printf(" is a local var");
17170         }
17171         if (inlCurArgInfo->argIsInvariant)
17172         {
17173             printf(" is a constant");
17174         }
17175         if (inlCurArgInfo->argHasGlobRef)
17176         {
17177             printf(" has global refs");
17178         }
17179         if (inlCurArgInfo->argHasSideEff)
17180         {
17181             printf(" has side effects");
17182         }
17183         if (inlCurArgInfo->argHasLdargaOp)
17184         {
17185             printf(" has ldarga effect");
17186         }
17187         if (inlCurArgInfo->argHasStargOp)
17188         {
17189             printf(" has starg effect");
17190         }
17191         if (inlCurArgInfo->argIsByRefToStructLocal)
17192         {
17193             printf(" is byref to a struct local");
17194         }
17195
17196         printf("\n");
17197         gtDispTree(curArgVal);
17198         printf("\n");
17199     }
17200 #endif
17201 }
17202
17203 /*****************************************************************************
17204  *
17205  */
17206
17207 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17208 {
17209     assert(!compIsForInlining());
17210
17211     GenTreePtr           call         = pInlineInfo->iciCall;
17212     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17213     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17214     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17215     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17216     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17217
17218     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17219
17220     /* init the argument stuct */
17221
17222     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17223
17224     /* Get hold of the 'this' pointer and the argument list proper */
17225
17226     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17227     GenTreePtr argList = call->gtCall.gtCallArgs;
17228     unsigned   argCnt  = 0; // Count of the arguments
17229
17230     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17231
17232     if (thisArg)
17233     {
17234         inlArgInfo[0].argIsThis = true;
17235
17236         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17237
17238         if (inlineResult->IsFailure())
17239         {
17240             return;
17241         }
17242
17243         /* Increment the argument count */
17244         argCnt++;
17245     }
17246
17247     /* Record some information about each of the arguments */
17248     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17249
17250 #if USER_ARGS_COME_LAST
17251     unsigned typeCtxtArg = thisArg ? 1 : 0;
17252 #else  // USER_ARGS_COME_LAST
17253     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17254 #endif // USER_ARGS_COME_LAST
17255
17256     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17257     {
17258         if (argTmp == argList && hasRetBuffArg)
17259         {
17260             continue;
17261         }
17262
17263         // Ignore the type context argument
17264         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17265         {
17266             typeCtxtArg = 0xFFFFFFFF;
17267             continue;
17268         }
17269
17270         assert(argTmp->gtOper == GT_LIST);
17271         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17272
17273         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17274
17275         if (inlineResult->IsFailure())
17276         {
17277             return;
17278         }
17279
17280         /* Increment the argument count */
17281         argCnt++;
17282     }
17283
17284     /* Make sure we got the arg number right */
17285     assert(argCnt == methInfo->args.totalILArgs());
17286
17287 #ifdef FEATURE_SIMD
17288     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17289 #endif // FEATURE_SIMD
17290
17291     /* We have typeless opcodes, get type information from the signature */
17292
17293     if (thisArg)
17294     {
17295         var_types sigType;
17296
17297         if (clsAttr & CORINFO_FLG_VALUECLASS)
17298         {
17299             sigType = TYP_BYREF;
17300         }
17301         else
17302         {
17303             sigType = TYP_REF;
17304         }
17305
17306         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17307         lclVarInfo[0].lclHasLdlocaOp = false;
17308
17309 #ifdef FEATURE_SIMD
17310         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17311         // the inlining multiplier) for anything in that assembly.
17312         // But we only need to normalize it if it is a TYP_STRUCT
17313         // (which we need to do even if we have already set foundSIMDType).
17314         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17315         {
17316             if (sigType == TYP_STRUCT)
17317             {
17318                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17319             }
17320             foundSIMDType = true;
17321         }
17322 #endif // FEATURE_SIMD
17323         lclVarInfo[0].lclTypeInfo = sigType;
17324
17325         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17326                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17327                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17328
17329         if (genActualType(thisArg->gtType) != genActualType(sigType))
17330         {
17331             if (sigType == TYP_REF)
17332             {
17333                 /* The argument cannot be bashed into a ref (see bug 750871) */
17334                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17335                 return;
17336             }
17337
17338             /* This can only happen with byrefs <-> ints/shorts */
17339
17340             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17341             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17342
17343             if (sigType == TYP_BYREF)
17344             {
17345                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17346             }
17347             else if (thisArg->gtType == TYP_BYREF)
17348             {
17349                 assert(sigType == TYP_I_IMPL);
17350
17351                 /* If possible change the BYREF to an int */
17352                 if (thisArg->IsVarAddr())
17353                 {
17354                     thisArg->gtType              = TYP_I_IMPL;
17355                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17356                 }
17357                 else
17358                 {
17359                     /* Arguments 'int <- byref' cannot be bashed */
17360                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17361                     return;
17362                 }
17363             }
17364         }
17365     }
17366
17367     /* Init the types of the arguments and make sure the types
17368      * from the trees match the types in the signature */
17369
17370     CORINFO_ARG_LIST_HANDLE argLst;
17371     argLst = methInfo->args.args;
17372
17373     unsigned i;
17374     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17375     {
17376         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17377
17378         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17379
17380 #ifdef FEATURE_SIMD
17381         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17382         {
17383             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17384             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17385             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17386             foundSIMDType = true;
17387             if (sigType == TYP_STRUCT)
17388             {
17389                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17390                 sigType              = structType;
17391             }
17392         }
17393 #endif // FEATURE_SIMD
17394
17395         lclVarInfo[i].lclTypeInfo    = sigType;
17396         lclVarInfo[i].lclHasLdlocaOp = false;
17397
17398         /* Does the tree type match the signature type? */
17399
17400         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17401
17402         if (sigType != inlArgNode->gtType)
17403         {
17404             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17405                but in bad IL cases with caller-callee signature mismatches we can see other types.
17406                Intentionally reject cases with mismatches so the jit is more flexible when
17407                encountering bad IL. */
17408
17409             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17410                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17411                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17412
17413             if (!isPlausibleTypeMatch)
17414             {
17415                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17416                 return;
17417             }
17418
17419             /* Is it a narrowing or widening cast?
17420              * Widening casts are ok since the value computed is already
17421              * normalized to an int (on the IL stack) */
17422
17423             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17424             {
17425                 if (sigType == TYP_BYREF)
17426                 {
17427                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17428                 }
17429                 else if (inlArgNode->gtType == TYP_BYREF)
17430                 {
17431                     assert(varTypeIsIntOrI(sigType));
17432
17433                     /* If possible bash the BYREF to an int */
17434                     if (inlArgNode->IsVarAddr())
17435                     {
17436                         inlArgNode->gtType           = TYP_I_IMPL;
17437                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17438                     }
17439                     else
17440                     {
17441                         /* Arguments 'int <- byref' cannot be changed */
17442                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17443                         return;
17444                     }
17445                 }
17446                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17447                 {
17448                     /* Narrowing cast */
17449
17450                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17451                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17452                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17453                     {
17454                         /* We don't need to insert a cast here as the variable
17455                            was assigned a normalized value of the right type */
17456
17457                         continue;
17458                     }
17459
17460                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17461
17462                     inlArgInfo[i].argIsLclVar = false;
17463
17464                     /* Try to fold the node in case we have constant arguments */
17465
17466                     if (inlArgInfo[i].argIsInvariant)
17467                     {
17468                         inlArgNode            = gtFoldExprConst(inlArgNode);
17469                         inlArgInfo[i].argNode = inlArgNode;
17470                         assert(inlArgNode->OperIsConst());
17471                     }
17472                 }
17473 #ifdef _TARGET_64BIT_
17474                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17475                 {
17476                     // This should only happen for int -> native int widening
17477                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17478
17479                     inlArgInfo[i].argIsLclVar = false;
17480
17481                     /* Try to fold the node in case we have constant arguments */
17482
17483                     if (inlArgInfo[i].argIsInvariant)
17484                     {
17485                         inlArgNode            = gtFoldExprConst(inlArgNode);
17486                         inlArgInfo[i].argNode = inlArgNode;
17487                         assert(inlArgNode->OperIsConst());
17488                     }
17489                 }
17490 #endif // _TARGET_64BIT_
17491             }
17492         }
17493     }
17494
17495     /* Init the types of the local variables */
17496
17497     CORINFO_ARG_LIST_HANDLE localsSig;
17498     localsSig = methInfo->locals.args;
17499
17500     for (i = 0; i < methInfo->locals.numArgs; i++)
17501     {
17502         bool      isPinned;
17503         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17504
17505         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17506         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17507         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17508
17509         if (isPinned)
17510         {
17511             // Pinned locals may cause inlines to fail.
17512             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17513             if (inlineResult->IsFailure())
17514             {
17515                 return;
17516             }
17517         }
17518
17519         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17520
17521         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17522         // out on the inline.
17523         if (type == TYP_STRUCT)
17524         {
17525             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17526             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17527             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17528             {
17529                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17530                 if (inlineResult->IsFailure())
17531                 {
17532                     return;
17533                 }
17534
17535                 // Do further notification in the case where the call site is rare; some policies do
17536                 // not track the relative hotness of call sites for "always" inline cases.
17537                 if (pInlineInfo->iciBlock->isRunRarely())
17538                 {
17539                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17540                     if (inlineResult->IsFailure())
17541                     {
17542
17543                         return;
17544                     }
17545                 }
17546             }
17547         }
17548
17549         localsSig = info.compCompHnd->getArgNext(localsSig);
17550
17551 #ifdef FEATURE_SIMD
17552         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17553         {
17554             foundSIMDType = true;
17555             if (featureSIMD && type == TYP_STRUCT)
17556             {
17557                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17558                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17559             }
17560         }
17561 #endif // FEATURE_SIMD
17562     }
17563
17564 #ifdef FEATURE_SIMD
17565     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17566     {
17567         foundSIMDType = true;
17568     }
17569     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17570 #endif // FEATURE_SIMD
17571 }
17572
17573 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17574 {
17575     assert(compIsForInlining());
17576
17577     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17578
17579     if (tmpNum == BAD_VAR_NUM)
17580     {
17581         var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17582
17583         // The lifetime of this local might span multiple BBs.
17584         // So it is a long lifetime local.
17585         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17586
17587         lvaTable[tmpNum].lvType = lclTyp;
17588         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17589         {
17590             lvaTable[tmpNum].lvHasLdAddrOp = 1;
17591         }
17592
17593         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17594         {
17595             lvaTable[tmpNum].lvPinned = 1;
17596
17597             if (!impInlineInfo->hasPinnedLocals)
17598             {
17599                 // If the inlinee returns a value, use a spill temp
17600                 // for the return value to ensure that even in case
17601                 // where the return expression refers to one of the
17602                 // pinned locals, we can unpin the local right after
17603                 // the inlined method body.
17604                 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17605                 {
17606                     lvaInlineeReturnSpillTemp =
17607                         lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17608                     lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17609                 }
17610             }
17611
17612             impInlineInfo->hasPinnedLocals = true;
17613         }
17614
17615         if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17616         {
17617             if (varTypeIsStruct(lclTyp))
17618             {
17619                 lvaSetStruct(tmpNum,
17620                              impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17621                              true /* unsafe value cls check */);
17622             }
17623             else
17624             {
17625                 // This is a wrapped primitive.  Make sure the verstate knows that
17626                 lvaTable[tmpNum].lvVerTypeInfo =
17627                     impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17628             }
17629         }
17630     }
17631
17632     return tmpNum;
17633 }
17634
17635 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17636 // Only use this method for the arguments of the inlinee method.
17637 // !!! Do not use it for the locals of the inlinee method. !!!!
17638
17639 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17640 {
17641     /* Get the argument type */
17642     var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17643
17644     GenTreePtr op1 = nullptr;
17645
17646     // constant or address of local
17647     if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17648     {
17649         /* Clone the constant. Note that we cannot directly use argNode
17650         in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17651         would introduce aliasing between inlArgInfo[].argNode and
17652         impInlineExpr. Then gtFoldExpr() could change it, causing further
17653         references to the argument working off of the bashed copy. */
17654
17655         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17656         PREFIX_ASSUME(op1 != nullptr);
17657         inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17658     }
17659     else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17660     {
17661         /* Argument is a local variable (of the caller)
17662          * Can we re-use the passed argument node? */
17663
17664         op1                          = inlArgInfo[lclNum].argNode;
17665         inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17666
17667         if (inlArgInfo[lclNum].argIsUsed)
17668         {
17669             assert(op1->gtOper == GT_LCL_VAR);
17670             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17671
17672             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17673             {
17674                 lclTyp = genActualType(lclTyp);
17675             }
17676
17677             /* Create a new lcl var node - remember the argument lclNum */
17678             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17679         }
17680     }
17681     else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17682     {
17683         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17684            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17685            This way we will increase the chance for this byref to be optimized away by
17686            a subsequent "dereference" operation.
17687
17688            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17689            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17690            For example, if the caller is:
17691                 ldloca.s   V_1  // V_1 is a local struct
17692                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17693            and the callee being inlined has:
17694                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17695                     ldarga.s   ptrToInts
17696                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17697            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17698            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17699         */
17700         assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17701                inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17702         op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17703     }
17704     else
17705     {
17706         /* Argument is a complex expression - it must be evaluated into a temp */
17707
17708         if (inlArgInfo[lclNum].argHasTmp)
17709         {
17710             assert(inlArgInfo[lclNum].argIsUsed);
17711             assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17712
17713             /* Create a new lcl var node - remember the argument lclNum */
17714             op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17715
17716             /* This is the second or later use of the this argument,
17717             so we have to use the temp (instead of the actual arg) */
17718             inlArgInfo[lclNum].argBashTmpNode = nullptr;
17719         }
17720         else
17721         {
17722             /* First time use */
17723             assert(inlArgInfo[lclNum].argIsUsed == false);
17724
17725             /* Reserve a temp for the expression.
17726             * Use a large size node as we may change it later */
17727
17728             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17729
17730             lvaTable[tmpNum].lvType = lclTyp;
17731             assert(lvaTable[tmpNum].lvAddrExposed == 0);
17732             if (inlArgInfo[lclNum].argHasLdargaOp)
17733             {
17734                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17735             }
17736
17737             if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17738             {
17739                 if (varTypeIsStruct(lclTyp))
17740                 {
17741                     lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17742                                  true /* unsafe value cls check */);
17743                 }
17744                 else
17745                 {
17746                     // This is a wrapped primitive.  Make sure the verstate knows that
17747                     lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17748                 }
17749             }
17750
17751             inlArgInfo[lclNum].argHasTmp = true;
17752             inlArgInfo[lclNum].argTmpNum = tmpNum;
17753
17754             // If we require strict exception order, then arguments must
17755             // be evaluated in sequence before the body of the inlined method.
17756             // So we need to evaluate them to a temp.
17757             // Also, if arguments have global references, we need to
17758             // evaluate them to a temp before the inlined body as the
17759             // inlined body may be modifying the global ref.
17760             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17761             // if it is a struct, because it requires some additional handling.
17762
17763             if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17764             {
17765                 /* Get a *LARGE* LCL_VAR node */
17766                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17767
17768                 /* Record op1 as the very first use of this argument.
17769                 If there are no further uses of the arg, we may be
17770                 able to use the actual arg node instead of the temp.
17771                 If we do see any further uses, we will clear this. */
17772                 inlArgInfo[lclNum].argBashTmpNode = op1;
17773             }
17774             else
17775             {
17776                 /* Get a small LCL_VAR node */
17777                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17778                 /* No bashing of this argument */
17779                 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17780             }
17781         }
17782     }
17783
17784     /* Mark the argument as used */
17785
17786     inlArgInfo[lclNum].argIsUsed = true;
17787
17788     return op1;
17789 }
17790
17791 /******************************************************************************
17792  Is this the original "this" argument to the call being inlined?
17793
17794  Note that we do not inline methods with "starg 0", and so we do not need to
17795  worry about it.
17796 */
17797
17798 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17799 {
17800     assert(compIsForInlining());
17801     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17802 }
17803
17804 //-----------------------------------------------------------------------------
17805 // This function checks if a dereference in the inlinee can guarantee that
17806 // the "this" is non-NULL.
17807 // If we haven't hit a branch or a side effect, and we are dereferencing
17808 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17809 // then we can avoid a separate null pointer check.
17810 //
17811 // "additionalTreesToBeEvaluatedBefore"
17812 // is the set of pending trees that have not yet been added to the statement list,
17813 // and which have been removed from verCurrentState.esStack[]
17814
17815 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
17816                                                                   GenTreePtr  variableBeingDereferenced,
17817                                                                   InlArgInfo* inlArgInfo)
17818 {
17819     assert(compIsForInlining());
17820     assert(opts.OptEnabled(CLFLG_INLINING));
17821
17822     BasicBlock* block = compCurBB;
17823
17824     GenTreePtr stmt;
17825     GenTreePtr expr;
17826
17827     if (block != fgFirstBB)
17828     {
17829         return FALSE;
17830     }
17831
17832     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17833     {
17834         return FALSE;
17835     }
17836
17837     if (additionalTreesToBeEvaluatedBefore &&
17838         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17839     {
17840         return FALSE;
17841     }
17842
17843     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17844     {
17845         expr = stmt->gtStmt.gtStmtExpr;
17846
17847         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17848         {
17849             return FALSE;
17850         }
17851     }
17852
17853     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17854     {
17855         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17856         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17857         {
17858             return FALSE;
17859         }
17860     }
17861
17862     return TRUE;
17863 }
17864
17865 /******************************************************************************/
17866 // Check the inlining eligibility of this GT_CALL node.
17867 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17868
17869 // Todo: find a way to record the failure reasons in the IR (or
17870 // otherwise build tree context) so when we do the inlining pass we
17871 // can capture these reasons
17872
17873 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
17874                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
17875                                       CORINFO_CALL_INFO*     callInfo)
17876 {
17877     // Let the strategy know there's another call
17878     impInlineRoot()->m_inlineStrategy->NoteCall();
17879
17880     if (!opts.OptEnabled(CLFLG_INLINING))
17881     {
17882         /* XXX Mon 8/18/2008
17883          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
17884          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
17885          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
17886          * figure out why we did not set MAXOPT for this compile.
17887          */
17888         assert(!compIsForInlining());
17889         return;
17890     }
17891
17892     if (compIsForImportOnly())
17893     {
17894         // Don't bother creating the inline candidate during verification.
17895         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17896         // that leads to the creation of multiple instances of Compiler.
17897         return;
17898     }
17899
17900     GenTreeCall* call = callNode->AsCall();
17901     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17902
17903     // Don't inline if not optimizing root method
17904     if (opts.compDbgCode)
17905     {
17906         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17907         return;
17908     }
17909
17910     // Don't inline if inlining into root method is disabled.
17911     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17912     {
17913         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17914         return;
17915     }
17916
17917     // Inlining candidate determination needs to honor only IL tail prefix.
17918     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17919     if (call->IsTailPrefixedCall())
17920     {
17921         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17922         return;
17923     }
17924
17925     // Tail recursion elimination takes precedence over inlining.
17926     // TODO: We may want to do some of the additional checks from fgMorphCall
17927     // here to reduce the chance we don't inline a call that won't be optimized
17928     // as a fast tail call or turned into a loop.
17929     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17930     {
17931         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17932         return;
17933     }
17934
17935     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17936     {
17937         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
17938         return;
17939     }
17940
17941     /* Ignore helper calls */
17942
17943     if (call->gtCallType == CT_HELPER)
17944     {
17945         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
17946         return;
17947     }
17948
17949     /* Ignore indirect calls */
17950     if (call->gtCallType == CT_INDIRECT)
17951     {
17952         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
17953         return;
17954     }
17955
17956     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
17957      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
17958      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
17959
17960     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
17961     unsigned              methAttr;
17962
17963     // Reuse method flags from the original callInfo if possible
17964     if (fncHandle == callInfo->hMethod)
17965     {
17966         methAttr = callInfo->methodFlags;
17967     }
17968     else
17969     {
17970         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
17971     }
17972
17973 #ifdef DEBUG
17974     if (compStressCompile(STRESS_FORCE_INLINE, 0))
17975     {
17976         methAttr |= CORINFO_FLG_FORCEINLINE;
17977     }
17978 #endif
17979
17980     // Check for COMPlus_AggressiveInlining
17981     if (compDoAggressiveInlining)
17982     {
17983         methAttr |= CORINFO_FLG_FORCEINLINE;
17984     }
17985
17986     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
17987     {
17988         /* Don't bother inline blocks that are in the filter region */
17989         if (bbInCatchHandlerILRange(compCurBB))
17990         {
17991 #ifdef DEBUG
17992             if (verbose)
17993             {
17994                 printf("\nWill not inline blocks that are in the catch handler region\n");
17995             }
17996
17997 #endif
17998
17999             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18000             return;
18001         }
18002
18003         if (bbInFilterILRange(compCurBB))
18004         {
18005 #ifdef DEBUG
18006             if (verbose)
18007             {
18008                 printf("\nWill not inline blocks that are in the filter region\n");
18009             }
18010 #endif
18011
18012             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18013             return;
18014         }
18015     }
18016
18017     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18018
18019     if (opts.compNeedSecurityCheck)
18020     {
18021         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18022         return;
18023     }
18024
18025     /* Check if we tried to inline this method before */
18026
18027     if (methAttr & CORINFO_FLG_DONT_INLINE)
18028     {
18029         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18030         return;
18031     }
18032
18033     /* Cannot inline synchronized methods */
18034
18035     if (methAttr & CORINFO_FLG_SYNCH)
18036     {
18037         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18038         return;
18039     }
18040
18041     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18042
18043     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18044     {
18045         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18046         return;
18047     }
18048
18049     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18050     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18051
18052     if (inlineResult.IsFailure())
18053     {
18054         return;
18055     }
18056
18057     // The old value should be NULL
18058     assert(call->gtInlineCandidateInfo == nullptr);
18059
18060     call->gtInlineCandidateInfo = inlineCandidateInfo;
18061
18062     // Mark the call node as inline candidate.
18063     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18064
18065     // Let the strategy know there's another candidate.
18066     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18067
18068     // Since we're not actually inlining yet, and this call site is
18069     // still just an inline candidate, there's nothing to report.
18070     inlineResult.SetReported();
18071 }
18072
18073 /******************************************************************************/
18074 // Returns true if the given intrinsic will be implemented by target-specific
18075 // instructions
18076
18077 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18078 {
18079 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18080     switch (intrinsicId)
18081     {
18082         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18083         //
18084         // TODO: Because the x86 backend only targets SSE for floating-point code,
18085         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18086         //       implemented those intrinsics as x87 instructions). If this poses
18087         //       a CQ problem, it may be necessary to change the implementation of
18088         //       the helper calls to decrease call overhead or switch back to the
18089         //       x87 instructions. This is tracked by #7097.
18090         case CORINFO_INTRINSIC_Sqrt:
18091         case CORINFO_INTRINSIC_Abs:
18092             return true;
18093
18094         default:
18095             return false;
18096     }
18097 #elif defined(_TARGET_ARM64_)
18098     switch (intrinsicId)
18099     {
18100         case CORINFO_INTRINSIC_Sqrt:
18101         case CORINFO_INTRINSIC_Abs:
18102         case CORINFO_INTRINSIC_Round:
18103             return true;
18104
18105         default:
18106             return false;
18107     }
18108 #elif defined(_TARGET_ARM_)
18109     switch (intrinsicId)
18110     {
18111         case CORINFO_INTRINSIC_Sqrt:
18112         case CORINFO_INTRINSIC_Abs:
18113         case CORINFO_INTRINSIC_Round:
18114             return true;
18115
18116         default:
18117             return false;
18118     }
18119 #elif defined(_TARGET_X86_)
18120     switch (intrinsicId)
18121     {
18122         case CORINFO_INTRINSIC_Sin:
18123         case CORINFO_INTRINSIC_Cos:
18124         case CORINFO_INTRINSIC_Sqrt:
18125         case CORINFO_INTRINSIC_Abs:
18126         case CORINFO_INTRINSIC_Round:
18127             return true;
18128
18129         default:
18130             return false;
18131     }
18132 #else
18133     // TODO: This portion of logic is not implemented for other arch.
18134     // The reason for returning true is that on all other arch the only intrinsic
18135     // enabled are target intrinsics.
18136     return true;
18137 #endif //_TARGET_AMD64_
18138 }
18139
18140 /******************************************************************************/
18141 // Returns true if the given intrinsic will be implemented by calling System.Math
18142 // methods.
18143
18144 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18145 {
18146     // Currently, if an math intrisic is not implemented by target-specific
18147     // intructions, it will be implemented by a System.Math call. In the
18148     // future, if we turn to implementing some of them with helper callers,
18149     // this predicate needs to be revisited.
18150     return !IsTargetIntrinsic(intrinsicId);
18151 }
18152
18153 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18154 {
18155     switch (intrinsicId)
18156     {
18157         case CORINFO_INTRINSIC_Sin:
18158         case CORINFO_INTRINSIC_Sqrt:
18159         case CORINFO_INTRINSIC_Abs:
18160         case CORINFO_INTRINSIC_Cos:
18161         case CORINFO_INTRINSIC_Round:
18162         case CORINFO_INTRINSIC_Cosh:
18163         case CORINFO_INTRINSIC_Sinh:
18164         case CORINFO_INTRINSIC_Tan:
18165         case CORINFO_INTRINSIC_Tanh:
18166         case CORINFO_INTRINSIC_Asin:
18167         case CORINFO_INTRINSIC_Acos:
18168         case CORINFO_INTRINSIC_Atan:
18169         case CORINFO_INTRINSIC_Atan2:
18170         case CORINFO_INTRINSIC_Log10:
18171         case CORINFO_INTRINSIC_Pow:
18172         case CORINFO_INTRINSIC_Exp:
18173         case CORINFO_INTRINSIC_Ceiling:
18174         case CORINFO_INTRINSIC_Floor:
18175             return true;
18176         default:
18177             return false;
18178     }
18179 }
18180
18181 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18182 {
18183     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18184 }
18185 /*****************************************************************************/