Merge pull request #17136 from weshaggard/OptDataSouceBuildFixes
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (!varTypeIsSmall(lclTyp))
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTree* tree     = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTree* firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTree* tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTree* stmtPrev  = stmtBefore->gtPrev;
676     stmt->gtPrev       = stmtPrev;
677     stmt->gtNext       = stmtBefore;
678     stmtPrev->gtNext   = stmt;
679     stmtBefore->gtPrev = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTree* expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTree* expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTree*    val,
729                                 unsigned    curLevel,
730                                 GenTree**   pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTree* asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTree*             val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTree**            pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTree* asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTree*   temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855 #ifdef DEBUG
856             if (verbose)
857             {
858                 printf("Calling impNormStructVal on:\n");
859                 gtDispTree(temp);
860             }
861 #endif
862             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
863 #ifdef DEBUG
864             if (verbose)
865             {
866                 printf("resulting tree:\n");
867                 gtDispTree(temp);
868             }
869 #endif
870         }
871
872         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873         treeList = gtNewListNode(temp, treeList);
874     }
875
876     if (sig != nullptr)
877     {
878         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880         {
881             // Make sure that all valuetypes (including enums) that we push are loaded.
882             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883             // all valuetypes in the method signature are already loaded.
884             // We need to be able to find the size of the valuetypes, but we cannot
885             // do a class-load from within GC.
886             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
887         }
888
889         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890         CORINFO_CLASS_HANDLE    argClass;
891         CORINFO_CLASS_HANDLE    argRealClass;
892         GenTreeArgList*         args;
893
894         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895         {
896             PREFIX_ASSUME(args != nullptr);
897
898             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899
900             // insert implied casts (from float to double or double to float)
901
902             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903             {
904                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
905             }
906             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907             {
908                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
909             }
910
911             // insert any widening or narrowing casts for backwards compatibility
912
913             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914
915             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917             {
918                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920                 // primitive types.
921                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922                 // details).
923                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924                 {
925                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
926                 }
927
928                 // Make sure that all valuetypes (including enums) that we push are loaded.
929                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930                 // all valuetypes in the method signature are already loaded.
931                 // We need to be able to find the size of the valuetypes, but we cannot
932                 // do a class-load from within GC.
933                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
934             }
935
936             argLst = info.compCompHnd->getArgNext(argLst);
937         }
938     }
939
940     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941     {
942         // Prepend the prefixTree
943
944         // Simple in-place reversal to place treeList
945         // at the end of a reversed prefixTree
946         while (prefixTree != nullptr)
947         {
948             GenTreeArgList* next = prefixTree->Rest();
949             prefixTree->Rest()   = treeList;
950             treeList             = prefixTree;
951             prefixTree           = next;
952         }
953     }
954     return treeList;
955 }
956
957 /*****************************************************************************
958  *
959  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960  *  The first "skipReverseCount" items are not reversed.
961  */
962
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
964
965 {
966     assert(skipReverseCount <= count);
967
968     GenTreeArgList* list = impPopList(count, sig);
969
970     // reverse the list
971     if (list == nullptr || skipReverseCount == count)
972     {
973         return list;
974     }
975
976     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
977     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978
979     if (skipReverseCount == 0)
980     {
981         ptr = list;
982     }
983     else
984     {
985         lastSkipNode = list;
986         // Get to the first node that needs to be reversed
987         for (unsigned i = 0; i < skipReverseCount - 1; i++)
988         {
989             lastSkipNode = lastSkipNode->Rest();
990         }
991
992         PREFIX_ASSUME(lastSkipNode != nullptr);
993         ptr = lastSkipNode->Rest();
994     }
995
996     GenTreeArgList* reversedList = nullptr;
997
998     do
999     {
1000         GenTreeArgList* tmp = ptr->Rest();
1001         ptr->Rest()         = reversedList;
1002         reversedList        = ptr;
1003         ptr                 = tmp;
1004     } while (ptr != nullptr);
1005
1006     if (skipReverseCount)
1007     {
1008         lastSkipNode->Rest() = reversedList;
1009         return list;
1010     }
1011     else
1012     {
1013         return reversedList;
1014     }
1015 }
1016
1017 /*****************************************************************************
1018    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1019    class of type 'clsHnd'.  It returns the tree that should be appended to the
1020    statement list that represents the assignment.
1021    Temp assignments may be appended to impTreeList if spilling is necessary.
1022    curLevel is the stack level for which a spill may be being done.
1023  */
1024
1025 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1026                                    GenTree*             src,
1027                                    CORINFO_CLASS_HANDLE structHnd,
1028                                    unsigned             curLevel,
1029                                    GenTree**            pAfterStmt, /* = NULL */
1030                                    BasicBlock*          block       /* = NULL */
1031                                    )
1032 {
1033     assert(varTypeIsStruct(dest));
1034
1035     while (dest->gtOper == GT_COMMA)
1036     {
1037         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1038
1039         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1040         if (pAfterStmt)
1041         {
1042             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1043         }
1044         else
1045         {
1046             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1047         }
1048
1049         // set dest to the second thing
1050         dest = dest->gtOp.gtOp2;
1051     }
1052
1053     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1055
1056     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1058     {
1059         // Make this a NOP
1060         return gtNewNothingNode();
1061     }
1062
1063     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064     // or re-creating a Blk node if it is.
1065     GenTree* destAddr;
1066
1067     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1068     {
1069         destAddr = dest->gtOp.gtOp1;
1070     }
1071     else
1072     {
1073         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1074     }
1075
1076     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1077 }
1078
1079 /*****************************************************************************/
1080
1081 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1082                                       GenTree*             src,
1083                                       CORINFO_CLASS_HANDLE structHnd,
1084                                       unsigned             curLevel,
1085                                       GenTree**            pAfterStmt, /* = NULL */
1086                                       BasicBlock*          block       /* = NULL */
1087                                       )
1088 {
1089     var_types destType;
1090     GenTree*  dest      = nullptr;
1091     unsigned  destFlags = 0;
1092
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095     // TODO-ARM-BUG: Does ARM need this?
1096     // TODO-ARM64-BUG: Does ARM64 need this?
1097     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100            (src->TypeGet() != TYP_STRUCT &&
1101             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103     assert(varTypeIsStruct(src));
1104
1105     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107            src->gtOper == GT_COMMA ||
1108            (src->TypeGet() != TYP_STRUCT &&
1109             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111     if (destAddr->OperGet() == GT_ADDR)
1112     {
1113         GenTree* destNode = destAddr->gtGetOp1();
1114         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115         // will be morphed, don't insert an OBJ(ADDR).
1116         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1120                 )
1121         {
1122             dest = destNode;
1123         }
1124         destType = destNode->TypeGet();
1125     }
1126     else
1127     {
1128         destType = src->TypeGet();
1129     }
1130
1131     var_types asgType = src->TypeGet();
1132
1133     if (src->gtOper == GT_CALL)
1134     {
1135         if (src->AsCall()->TreatAsHasRetBufArg(this))
1136         {
1137             // Case of call returning a struct via hidden retbuf arg
1138
1139             // insert the return value buffer into the argument list as first byref parameter
1140             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1141
1142             // now returns void, not a struct
1143             src->gtType = TYP_VOID;
1144
1145             // return the morphed call node
1146             return src;
1147         }
1148         else
1149         {
1150             // Case of call returning a struct in one or more registers.
1151
1152             var_types returnType = (var_types)src->gtCall.gtReturnType;
1153
1154             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155             src->gtType = genActualType(returnType);
1156
1157             // First we try to change this to "LclVar/LclFld = call"
1158             //
1159             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1160             {
1161                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162                 // That is, the IR will be of the form lclVar = call for multi-reg return
1163                 //
1164                 GenTree* lcl = destAddr->gtOp.gtOp1;
1165                 if (src->AsCall()->HasMultiRegRetVal())
1166                 {
1167                     // Mark the struct LclVar as used in a MultiReg return context
1168                     //  which currently makes it non promotable.
1169                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170                     // handle multireg returns.
1171                     lcl->gtFlags |= GTF_DONT_CSE;
1172                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1173                 }
1174                 else // The call result is not a multireg return
1175                 {
1176                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177                     lcl->ChangeOper(GT_LCL_FLD);
1178                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179                     lcl->gtType = src->gtType;
1180                     asgType     = src->gtType;
1181                 }
1182
1183                 dest = lcl;
1184
1185 #if defined(_TARGET_ARM_)
1186                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187                 // but that method has not been updadted to include ARM.
1188                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189                 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1193
1194                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196                 // handle multireg returns.
1197                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198                 // non-multireg returns.
1199                 lcl->gtFlags |= GTF_DONT_CSE;
1200                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1201 #endif
1202             }
1203             else // we don't have a GT_ADDR of a GT_LCL_VAR
1204             {
1205                 // !!! The destination could be on stack. !!!
1206                 // This flag will let us choose the correct write barrier.
1207                 asgType   = returnType;
1208                 destFlags = GTF_IND_TGTANYWHERE;
1209             }
1210         }
1211     }
1212     else if (src->gtOper == GT_RET_EXPR)
1213     {
1214         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215         noway_assert(call->gtOper == GT_CALL);
1216
1217         if (call->HasRetBufArg())
1218         {
1219             // insert the return value buffer into the argument list as first byref parameter
1220             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1221
1222             // now returns void, not a struct
1223             src->gtType  = TYP_VOID;
1224             call->gtType = TYP_VOID;
1225
1226             // We already have appended the write to 'dest' GT_CALL's args
1227             // So now we just return an empty node (pruning the GT_RET_EXPR)
1228             return src;
1229         }
1230         else
1231         {
1232             // Case of inline method returning a struct in one or more registers.
1233             //
1234             var_types returnType = (var_types)call->gtReturnType;
1235
1236             // We won't need a return buffer
1237             asgType      = returnType;
1238             src->gtType  = genActualType(returnType);
1239             call->gtType = src->gtType;
1240
1241             // If we've changed the type, and it no longer matches a local destination,
1242             // we must use an indirection.
1243             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1244             {
1245                 dest = nullptr;
1246             }
1247
1248             // !!! The destination could be on stack. !!!
1249             // This flag will let us choose the correct write barrier.
1250             destFlags = GTF_IND_TGTANYWHERE;
1251         }
1252     }
1253     else if (src->OperIsBlk())
1254     {
1255         asgType = impNormStructType(structHnd);
1256         if (src->gtOper == GT_OBJ)
1257         {
1258             assert(src->gtObj.gtClass == structHnd);
1259         }
1260     }
1261     else if (src->gtOper == GT_INDEX)
1262     {
1263         asgType = impNormStructType(structHnd);
1264         assert(src->gtIndex.gtStructElemClass == structHnd);
1265     }
1266     else if (src->gtOper == GT_MKREFANY)
1267     {
1268         // Since we are assigning the result of a GT_MKREFANY,
1269         // "destAddr" must point to a refany.
1270
1271         GenTree* destAddrClone;
1272         destAddr =
1273             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1274
1275         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1281         GenTree* typeSlot =
1282             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1283
1284         // append the assign of the pointer value
1285         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1286         if (pAfterStmt)
1287         {
1288             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1289         }
1290         else
1291         {
1292             impAppendTree(asg, curLevel, impCurStmtOffs);
1293         }
1294
1295         // return the assign of the type value, to be appended
1296         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1297     }
1298     else if (src->gtOper == GT_COMMA)
1299     {
1300         // The second thing is the struct or its address.
1301         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1302         if (pAfterStmt)
1303         {
1304             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1305         }
1306         else
1307         {
1308             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1309         }
1310
1311         // Evaluate the second thing using recursion.
1312         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1313     }
1314     else if (src->IsLocal())
1315     {
1316         asgType = src->TypeGet();
1317     }
1318     else if (asgType == TYP_STRUCT)
1319     {
1320         asgType     = impNormStructType(structHnd);
1321         src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323         if (asgType == TYP_STRUCT)
1324         {
1325             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1327         }
1328 #endif
1329     }
1330     if (dest == nullptr)
1331     {
1332         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333         // if this is a known struct type.
1334         if (asgType == TYP_STRUCT)
1335         {
1336             dest = gtNewObjNode(structHnd, destAddr);
1337             gtSetObjGcInfo(dest->AsObj());
1338             // Although an obj as a call argument was always assumed to be a globRef
1339             // (which is itself overly conservative), that is not true of the operands
1340             // of a block assignment.
1341             dest->gtFlags &= ~GTF_GLOB_REF;
1342             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1343         }
1344         else if (varTypeIsStruct(asgType))
1345         {
1346             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1347         }
1348         else
1349         {
1350             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1351         }
1352     }
1353     else
1354     {
1355         dest->gtType = asgType;
1356     }
1357
1358     dest->gtFlags |= destFlags;
1359     destFlags = dest->gtFlags;
1360
1361     // return an assignment node, to be appended
1362     GenTree* asgNode = gtNewAssignNode(dest, src);
1363     gtBlockOpInit(asgNode, dest, src, false);
1364
1365     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1366     // of assignments.
1367     if ((destFlags & GTF_DONT_CSE) == 0)
1368     {
1369         dest->gtFlags &= ~(GTF_DONT_CSE);
1370     }
1371     return asgNode;
1372 }
1373
1374 /*****************************************************************************
1375    Given a struct value, and the class handle for that structure, return
1376    the expression for the address for that structure value.
1377
1378    willDeref - does the caller guarantee to dereference the pointer.
1379 */
1380
1381 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1382                                     CORINFO_CLASS_HANDLE structHnd,
1383                                     unsigned             curLevel,
1384                                     bool                 willDeref)
1385 {
1386     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1387
1388     var_types type = structVal->TypeGet();
1389
1390     genTreeOps oper = structVal->gtOper;
1391
1392     if (oper == GT_OBJ && willDeref)
1393     {
1394         assert(structVal->gtObj.gtClass == structHnd);
1395         return (structVal->gtObj.Addr());
1396     }
1397     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1398              structVal->OperIsSimdHWIntrinsic())
1399     {
1400         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1401
1402         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1403
1404         // The 'return value' is now the temp itself
1405
1406         type          = genActualType(lvaTable[tmpNum].TypeGet());
1407         GenTree* temp = gtNewLclvNode(tmpNum, type);
1408         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1409         return temp;
1410     }
1411     else if (oper == GT_COMMA)
1412     {
1413         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1414
1415         GenTree* oldTreeLast  = impTreeLast;
1416         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417         structVal->gtType     = TYP_BYREF;
1418
1419         if (oldTreeLast != impTreeLast)
1420         {
1421             // Some temp assignment statement was placed on the statement list
1422             // for Op2, but that would be out of order with op1, so we need to
1423             // spill op1 onto the statement list after whatever was last
1424             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426             structVal->gtOp.gtOp1 = gtNewNothingNode();
1427         }
1428
1429         return (structVal);
1430     }
1431
1432     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1433 }
1434
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 //                    and optionally determine the GC layout of the struct.
1438 //
1439 // Arguments:
1440 //    structHnd       - The class handle for the struct type of interest.
1441 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 //                      into which the gcLayout will be written.
1443 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 //                      which will be set to the number of GC fields in the struct.
1445 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 //                      type, set to the SIMD base type
1447 //
1448 // Return Value:
1449 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1452 //
1453 // Assumptions:
1454 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1456 //
1457 // Notes:
1458 //    Normalizing the type involves examining the struct type to determine if it should
1459 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1460 //    for full enregistration, e.g. TYP_SIMD16.
1461
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1463                                       BYTE*                gcLayout,
1464                                       unsigned*            pNumGCVars,
1465                                       var_types*           pSimdBaseType)
1466 {
1467     assert(structHnd != NO_CLASS_HANDLE);
1468
1469     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470     var_types   structType  = TYP_STRUCT;
1471
1472     // On coreclr the check for GC includes a "may" to account for the special
1473     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1475     // pointer.
1476     const bool mayContainGCPtrs =
1477         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1478
1479 #ifdef FEATURE_SIMD
1480     // Check to see if this is a SIMD type.
1481     if (featureSIMD && !mayContainGCPtrs)
1482     {
1483         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1484
1485         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1486         {
1487             unsigned int sizeBytes;
1488             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489             if (simdBaseType != TYP_UNKNOWN)
1490             {
1491                 assert(sizeBytes == originalSize);
1492                 structType = getSIMDTypeForSize(sizeBytes);
1493                 if (pSimdBaseType != nullptr)
1494                 {
1495                     *pSimdBaseType = simdBaseType;
1496                 }
1497                 // Also indicate that we use floating point registers.
1498                 compFloatingPointUsed = true;
1499             }
1500         }
1501     }
1502 #endif // FEATURE_SIMD
1503
1504     // Fetch GC layout info if requested
1505     if (gcLayout != nullptr)
1506     {
1507         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1508
1509         // Verify that the quick test up above via the class attributes gave a
1510         // safe view of the type's GCness.
1511         //
1512         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513         // does not report any gc fields.
1514
1515         assert(mayContainGCPtrs || (numGCVars == 0));
1516
1517         if (pNumGCVars != nullptr)
1518         {
1519             *pNumGCVars = numGCVars;
1520         }
1521     }
1522     else
1523     {
1524         // Can't safely ask for number of GC pointers without also
1525         // asking for layout.
1526         assert(pNumGCVars == nullptr);
1527     }
1528
1529     return structType;
1530 }
1531
1532 //****************************************************************************
1533 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1535 //
1536 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1537                                     CORINFO_CLASS_HANDLE structHnd,
1538                                     unsigned             curLevel,
1539                                     bool                 forceNormalization /*=false*/)
1540 {
1541     assert(forceNormalization || varTypeIsStruct(structVal));
1542     assert(structHnd != NO_CLASS_HANDLE);
1543     var_types structType = structVal->TypeGet();
1544     bool      makeTemp   = false;
1545     if (structType == TYP_STRUCT)
1546     {
1547         structType = impNormStructType(structHnd);
1548     }
1549     bool                 alreadyNormalized = false;
1550     GenTreeLclVarCommon* structLcl         = nullptr;
1551
1552     genTreeOps oper = structVal->OperGet();
1553     switch (oper)
1554     {
1555         // GT_RETURN and GT_MKREFANY don't capture the handle.
1556         case GT_RETURN:
1557             break;
1558         case GT_MKREFANY:
1559             alreadyNormalized = true;
1560             break;
1561
1562         case GT_CALL:
1563             structVal->gtCall.gtRetClsHnd = structHnd;
1564             makeTemp                      = true;
1565             break;
1566
1567         case GT_RET_EXPR:
1568             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1569             makeTemp                         = true;
1570             break;
1571
1572         case GT_ARGPLACE:
1573             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1574             break;
1575
1576         case GT_INDEX:
1577             // This will be transformed to an OBJ later.
1578             alreadyNormalized                    = true;
1579             structVal->gtIndex.gtStructElemClass = structHnd;
1580             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1581             break;
1582
1583         case GT_FIELD:
1584             // Wrap it in a GT_OBJ.
1585             structVal->gtType = structType;
1586             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1587             break;
1588
1589         case GT_LCL_VAR:
1590         case GT_LCL_FLD:
1591             structLcl = structVal->AsLclVarCommon();
1592             // Wrap it in a GT_OBJ.
1593             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1594             __fallthrough;
1595
1596         case GT_OBJ:
1597         case GT_BLK:
1598         case GT_DYN_BLK:
1599         case GT_ASG:
1600             // These should already have the appropriate type.
1601             assert(structVal->gtType == structType);
1602             alreadyNormalized = true;
1603             break;
1604
1605         case GT_IND:
1606             assert(structVal->gtType == structType);
1607             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608             alreadyNormalized = true;
1609             break;
1610
1611 #ifdef FEATURE_SIMD
1612         case GT_SIMD:
1613             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1614             break;
1615 #endif // FEATURE_SIMD
1616 #ifdef FEATURE_HW_INTRINSICS
1617         case GT_HWIntrinsic:
1618             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1619             break;
1620 #endif
1621
1622         case GT_COMMA:
1623         {
1624             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625             GenTree* blockNode = structVal->gtOp.gtOp2;
1626             assert(blockNode->gtType == structType);
1627
1628             // Is this GT_COMMA(op1, GT_COMMA())?
1629             GenTree* parent = structVal;
1630             if (blockNode->OperGet() == GT_COMMA)
1631             {
1632                 // Find the last node in the comma chain.
1633                 do
1634                 {
1635                     assert(blockNode->gtType == structType);
1636                     parent    = blockNode;
1637                     blockNode = blockNode->gtOp.gtOp2;
1638                 } while (blockNode->OperGet() == GT_COMMA);
1639             }
1640
1641             if (blockNode->OperGet() == GT_FIELD)
1642             {
1643                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1645             }
1646
1647 #ifdef FEATURE_SIMD
1648             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1649             {
1650                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651                 alreadyNormalized  = true;
1652             }
1653             else
1654 #endif
1655             {
1656                 noway_assert(blockNode->OperIsBlk());
1657
1658                 // Sink the GT_COMMA below the blockNode addr.
1659                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1660                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1661                 //
1662                 // In case of a chained GT_COMMA case, we sink the last
1663                 // GT_COMMA below the blockNode addr.
1664                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1665                 assert(blockNodeAddr->gtType == TYP_BYREF);
1666                 GenTree* commaNode    = parent;
1667                 commaNode->gtType     = TYP_BYREF;
1668                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1669                 blockNode->gtOp.gtOp1 = commaNode;
1670                 if (parent == structVal)
1671                 {
1672                     structVal = blockNode;
1673                 }
1674                 alreadyNormalized = true;
1675             }
1676         }
1677         break;
1678
1679         default:
1680             noway_assert(!"Unexpected node in impNormStructVal()");
1681             break;
1682     }
1683     structVal->gtType  = structType;
1684     GenTree* structObj = structVal;
1685
1686     if (!alreadyNormalized || forceNormalization)
1687     {
1688         if (makeTemp)
1689         {
1690             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1691
1692             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1693
1694             // The structVal is now the temp itself
1695
1696             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1697             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1698             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1699         }
1700         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1701         {
1702             // Wrap it in a GT_OBJ
1703             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1704         }
1705     }
1706
1707     if (structLcl != nullptr)
1708     {
1709         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1710         // so we don't set GTF_EXCEPT here.
1711         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1712         {
1713             structObj->gtFlags &= ~GTF_GLOB_REF;
1714         }
1715     }
1716     else
1717     {
1718         // In general a OBJ is an indirection and could raise an exception.
1719         structObj->gtFlags |= GTF_EXCEPT;
1720     }
1721     return (structObj);
1722 }
1723
1724 /******************************************************************************/
1725 // Given a type token, generate code that will evaluate to the correct
1726 // handle representation of that token (type handle, field handle, or method handle)
1727 //
1728 // For most cases, the handle is determined at compile-time, and the code
1729 // generated is simply an embedded handle.
1730 //
1731 // Run-time lookup is required if the enclosing method is shared between instantiations
1732 // and the token refers to formal type parameters whose instantiation is not known
1733 // at compile-time.
1734 //
1735 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1736                                     BOOL*                   pRuntimeLookup /* = NULL */,
1737                                     BOOL                    mustRestoreHandle /* = FALSE */,
1738                                     BOOL                    importParent /* = FALSE */)
1739 {
1740     assert(!fgGlobalMorph);
1741
1742     CORINFO_GENERICHANDLE_RESULT embedInfo;
1743     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1744
1745     if (pRuntimeLookup)
1746     {
1747         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1748     }
1749
1750     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1751     {
1752         switch (embedInfo.handleType)
1753         {
1754             case CORINFO_HANDLETYPE_CLASS:
1755                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1756                 break;
1757
1758             case CORINFO_HANDLETYPE_METHOD:
1759                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1760                 break;
1761
1762             case CORINFO_HANDLETYPE_FIELD:
1763                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1764                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1765                 break;
1766
1767             default:
1768                 break;
1769         }
1770     }
1771
1772     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1773     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1774                                       embedInfo.compileTimeHandle);
1775
1776     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1777     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1780     }
1781
1782     return result;
1783 }
1784
1785 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                    CORINFO_LOOKUP*         pLookup,
1787                                    unsigned                handleFlags,
1788                                    void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                              unsigned              handleFlags,
1828                                              void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1843 }
1844
1845 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1854     {
1855         return nullptr;
1856     }
1857
1858     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1859
1860     op1->setEntryPoint(lookup);
1861
1862     return op1;
1863 }
1864 #endif
1865
1866 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1867 {
1868     GenTree* op1 = nullptr;
1869
1870     switch (pCallInfo->kind)
1871     {
1872         case CORINFO_CALL:
1873             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1874
1875 #ifdef FEATURE_READYTORUN_COMPILER
1876             if (opts.IsReadyToRun())
1877             {
1878                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1879             }
1880             else
1881             {
1882                 op1->gtFptrVal.gtEntryPoint.addr       = nullptr;
1883                 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1884             }
1885 #endif
1886             break;
1887
1888         case CORINFO_CALL_CODE_POINTER:
1889             if (compIsForInlining())
1890             {
1891                 // Don't import runtime lookups when inlining
1892                 // Inlining has to be aborted in such a case
1893                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1894                 return nullptr;
1895             }
1896
1897             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1898             break;
1899
1900         default:
1901             noway_assert(!"unknown call kind");
1902             break;
1903     }
1904
1905     return op1;
1906 }
1907
1908 //------------------------------------------------------------------------
1909 // getRuntimeContextTree: find pointer to context for runtime lookup.
1910 //
1911 // Arguments:
1912 //    kind - lookup kind.
1913 //
1914 // Return Value:
1915 //    Return GenTree pointer to generic shared context.
1916 //
1917 // Notes:
1918 //    Reports about generic context using.
1919
1920 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1921 {
1922     GenTree* ctxTree = nullptr;
1923
1924     // Collectible types requires that for shared generic code, if we use the generic context parameter
1925     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1926     // context parameter is this that we don't need the eager reporting logic.)
1927     lvaGenericsContextUseCount++;
1928
1929     if (kind == CORINFO_LOOKUP_THISOBJ)
1930     {
1931         // this Object
1932         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1933
1934         // Vtable pointer of this object
1935         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1936         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1937         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1938     }
1939     else
1940     {
1941         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1942
1943         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1944     }
1945     return ctxTree;
1946 }
1947
1948 /*****************************************************************************/
1949 /* Import a dictionary lookup to access a handle in code shared between
1950    generic instantiations.
1951    The lookup depends on the typeContext which is only available at
1952    runtime, and not at compile-time.
1953    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1954    The cases are:
1955
1956    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1957       instantiation-specific handle, and the tokens to lookup the handle.
1958    2. pLookup->indirections != CORINFO_USEHELPER :
1959       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1960           to get the handle.
1961       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1962           If it is non-NULL, it is the handle required. Else, call a helper
1963           to lookup the handle.
1964  */
1965
1966 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1967                                           CORINFO_LOOKUP*         pLookup,
1968                                           void*                   compileTimeHandle)
1969 {
1970
1971     // This method can only be called from the importer instance of the Compiler.
1972     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1973     assert(!compIsForInlining());
1974
1975     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1976
1977     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1978     // It's available only via the run-time helper function
1979     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1980     {
1981 #ifdef FEATURE_READYTORUN_COMPILER
1982         if (opts.IsReadyToRun())
1983         {
1984             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1985                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1986         }
1987 #endif
1988         GenTree* argNode =
1989             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1990         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1991
1992         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1993     }
1994
1995     // Slot pointer
1996     GenTree* slotPtrTree = ctxTree;
1997
1998     if (pRuntimeLookup->testForNull)
1999     {
2000         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2001                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2002     }
2003
2004     GenTree* indOffTree = nullptr;
2005
2006     // Applied repeated indirections
2007     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2008     {
2009         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2010         {
2011             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2012                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2013         }
2014
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021
2022         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2023         {
2024             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2025         }
2026
2027         if (pRuntimeLookup->offsets[i] != 0)
2028         {
2029             slotPtrTree =
2030                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2031         }
2032     }
2033
2034     // No null test required
2035     if (!pRuntimeLookup->testForNull)
2036     {
2037         if (pRuntimeLookup->indirections == 0)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2044
2045         if (!pRuntimeLookup->testForFixup)
2046         {
2047             return slotPtrTree;
2048         }
2049
2050         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2051
2052         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2053         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2054
2055         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2056         // downcast the pointer to a TYP_INT on 64-bit targets
2057         slot = impImplicitIorI4Cast(slot, TYP_INT);
2058         // Use a GT_AND to check for the lowest bit and indirect if it is set
2059         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2060         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2061         relop->gtFlags |= GTF_RELOP_QMARK;
2062
2063         // slot = GT_IND(slot - 1)
2064         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2065         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2066         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2067         indir->gtFlags |= GTF_IND_NONFAULTING;
2068         indir->gtFlags |= GTF_IND_INVARIANT;
2069
2070         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2071         GenTree* asg   = gtNewAssignNode(slot, indir);
2072         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2073         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2074         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2075
2076         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2077     }
2078
2079     assert(pRuntimeLookup->indirections != 0);
2080
2081     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2082
2083     // Extract the handle
2084     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2085     handle->gtFlags |= GTF_IND_NONFAULTING;
2086
2087     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2088                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2089
2090     // Call to helper
2091     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2092
2093     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2094     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2095
2096     // Check for null and possibly call helper
2097     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2098     relop->gtFlags |= GTF_RELOP_QMARK;
2099
2100     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2101                                                        gtNewNothingNode(), // do nothing if nonnull
2102                                                        helperCall);
2103
2104     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2105
2106     unsigned tmp;
2107     if (handleCopy->IsLocal())
2108     {
2109         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2110     }
2111     else
2112     {
2113         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2114     }
2115
2116     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2117     return gtNewLclvNode(tmp, TYP_I_IMPL);
2118 }
2119
2120 /******************************************************************************
2121  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2122  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2123  *     else, grab a new temp.
2124  *  For structs (which can be pushed on the stack using obj, etc),
2125  *  special handling is needed
2126  */
2127
2128 struct RecursiveGuard
2129 {
2130 public:
2131     RecursiveGuard()
2132     {
2133         m_pAddress = nullptr;
2134     }
2135
2136     ~RecursiveGuard()
2137     {
2138         if (m_pAddress)
2139         {
2140             *m_pAddress = false;
2141         }
2142     }
2143
2144     void Init(bool* pAddress, bool bInitialize)
2145     {
2146         assert(pAddress && *pAddress == false && "Recursive guard violation");
2147         m_pAddress = pAddress;
2148
2149         if (bInitialize)
2150         {
2151             *m_pAddress = true;
2152         }
2153     }
2154
2155 protected:
2156     bool* m_pAddress;
2157 };
2158
2159 bool Compiler::impSpillStackEntry(unsigned level,
2160                                   unsigned tnum
2161 #ifdef DEBUG
2162                                   ,
2163                                   bool        bAssertOnRecursion,
2164                                   const char* reason
2165 #endif
2166                                   )
2167 {
2168
2169 #ifdef DEBUG
2170     RecursiveGuard guard;
2171     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2172 #endif
2173
2174     GenTree* tree = verCurrentState.esStack[level].val;
2175
2176     /* Allocate a temp if we haven't been asked to use a particular one */
2177
2178     if (tiVerificationNeeded)
2179     {
2180         // Ignore bad temp requests (they will happen with bad code and will be
2181         // catched when importing the destblock)
2182         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2183         {
2184             return false;
2185         }
2186     }
2187     else
2188     {
2189         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2190         {
2191             return false;
2192         }
2193     }
2194
2195     bool isNewTemp = false;
2196
2197     if (tnum == BAD_VAR_NUM)
2198     {
2199         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2200         isNewTemp = true;
2201     }
2202     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2203     {
2204         // if verification is needed and tnum's type is incompatible with
2205         // type on that stack, we grab a new temp. This is safe since
2206         // we will throw a verification exception in the dest block.
2207
2208         var_types valTyp = tree->TypeGet();
2209         var_types dstTyp = lvaTable[tnum].TypeGet();
2210
2211         // if the two types are different, we return. This will only happen with bad code and will
2212         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2213         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2214             !(
2215 #ifndef _TARGET_64BIT_
2216                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2217 #endif // !_TARGET_64BIT_
2218                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2219         {
2220             if (verNeedsVerification())
2221             {
2222                 return false;
2223             }
2224         }
2225     }
2226
2227     /* Assign the spilled entry to the temp */
2228     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2229
2230     // If temp is newly introduced and a ref type, grab what type info we can.
2231     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2232     {
2233         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2234         lvaSetClass(tnum, tree, stkHnd);
2235     }
2236
2237     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2238     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2239     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2240     verCurrentState.esStack[level].val = temp;
2241
2242     return true;
2243 }
2244
2245 /*****************************************************************************
2246  *
2247  *  Ensure that the stack has only spilled values
2248  */
2249
2250 void Compiler::impSpillStackEnsure(bool spillLeaves)
2251 {
2252     assert(!spillLeaves || opts.compDbgCode);
2253
2254     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2255     {
2256         GenTree* tree = verCurrentState.esStack[level].val;
2257
2258         if (!spillLeaves && tree->OperIsLeaf())
2259         {
2260             continue;
2261         }
2262
2263         // Temps introduced by the importer itself don't need to be spilled
2264
2265         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2266
2267         if (isTempLcl)
2268         {
2269             continue;
2270         }
2271
2272         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2273     }
2274 }
2275
2276 void Compiler::impSpillEvalStack()
2277 {
2278     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2279     {
2280         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2281     }
2282 }
2283
2284 /*****************************************************************************
2285  *
2286  *  If the stack contains any trees with side effects in them, assign those
2287  *  trees to temps and append the assignments to the statement list.
2288  *  On return the stack is guaranteed to be empty.
2289  */
2290
2291 inline void Compiler::impEvalSideEffects()
2292 {
2293     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2294     verCurrentState.esStackDepth = 0;
2295 }
2296
2297 /*****************************************************************************
2298  *
2299  *  If the stack contains any trees with side effects in them, assign those
2300  *  trees to temps and replace them on the stack with refs to their temps.
2301  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2302  */
2303
2304 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2305 {
2306     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2307
2308     /* Before we make any appends to the tree list we must spill the
2309      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2310
2311     impSpillSpecialSideEff();
2312
2313     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2314     {
2315         chkLevel = verCurrentState.esStackDepth;
2316     }
2317
2318     assert(chkLevel <= verCurrentState.esStackDepth);
2319
2320     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2321
2322     for (unsigned i = 0; i < chkLevel; i++)
2323     {
2324         GenTree* tree = verCurrentState.esStack[i].val;
2325
2326         GenTree* lclVarTree;
2327
2328         if ((tree->gtFlags & spillFlags) != 0 ||
2329             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2330              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2331              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2332                                            // lvAddrTaken flag.
2333         {
2334             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2335         }
2336     }
2337 }
2338
2339 /*****************************************************************************
2340  *
2341  *  If the stack contains any trees with special side effects in them, assign
2342  *  those trees to temps and replace them on the stack with refs to their temps.
2343  */
2344
2345 inline void Compiler::impSpillSpecialSideEff()
2346 {
2347     // Only exception objects need to be carefully handled
2348
2349     if (!compCurBB->bbCatchTyp)
2350     {
2351         return;
2352     }
2353
2354     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2355     {
2356         GenTree* tree = verCurrentState.esStack[level].val;
2357         // Make sure if we have an exception object in the sub tree we spill ourselves.
2358         if (gtHasCatchArg(tree))
2359         {
2360             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2361         }
2362     }
2363 }
2364
2365 /*****************************************************************************
2366  *
2367  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2368  */
2369
2370 void Compiler::impSpillValueClasses()
2371 {
2372     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2373     {
2374         GenTree* tree = verCurrentState.esStack[level].val;
2375
2376         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2377         {
2378             // Tree walk was aborted, which means that we found a
2379             // value class on the stack.  Need to spill that
2380             // stack entry.
2381
2382             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2383         }
2384     }
2385 }
2386
2387 /*****************************************************************************
2388  *
2389  *  Callback that checks if a tree node is TYP_STRUCT
2390  */
2391
2392 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2393 {
2394     fgWalkResult walkResult = WALK_CONTINUE;
2395
2396     if ((*pTree)->gtType == TYP_STRUCT)
2397     {
2398         // Abort the walk and indicate that we found a value class
2399
2400         walkResult = WALK_ABORT;
2401     }
2402
2403     return walkResult;
2404 }
2405
2406 /*****************************************************************************
2407  *
2408  *  If the stack contains any trees with references to local #lclNum, assign
2409  *  those trees to temps and replace their place on the stack with refs to
2410  *  their temps.
2411  */
2412
2413 void Compiler::impSpillLclRefs(ssize_t lclNum)
2414 {
2415     /* Before we make any appends to the tree list we must spill the
2416      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2417
2418     impSpillSpecialSideEff();
2419
2420     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2421     {
2422         GenTree* tree = verCurrentState.esStack[level].val;
2423
2424         /* If the tree may throw an exception, and the block has a handler,
2425            then we need to spill assignments to the local if the local is
2426            live on entry to the handler.
2427            Just spill 'em all without considering the liveness */
2428
2429         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2430
2431         /* Skip the tree if it doesn't have an affected reference,
2432            unless xcptnCaught */
2433
2434         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2435         {
2436             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2437         }
2438     }
2439 }
2440
2441 /*****************************************************************************
2442  *
2443  *  Push catch arg onto the stack.
2444  *  If there are jumps to the beginning of the handler, insert basic block
2445  *  and spill catch arg to a temp. Update the handler block if necessary.
2446  *
2447  *  Returns the basic block of the actual handler.
2448  */
2449
2450 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2451 {
2452     // Do not inject the basic block twice on reimport. This should be
2453     // hit only under JIT stress. See if the block is the one we injected.
2454     // Note that EH canonicalization can inject internal blocks here. We might
2455     // be able to re-use such a block (but we don't, right now).
2456     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2457         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2458     {
2459         GenTree* tree = hndBlk->bbTreeList;
2460
2461         if (tree != nullptr && tree->gtOper == GT_STMT)
2462         {
2463             tree = tree->gtStmt.gtStmtExpr;
2464             assert(tree != nullptr);
2465
2466             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2467                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2468             {
2469                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2470
2471                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2472
2473                 return hndBlk->bbNext;
2474             }
2475         }
2476
2477         // If we get here, it must have been some other kind of internal block. It's possible that
2478         // someone prepended something to our injected block, but that's unlikely.
2479     }
2480
2481     /* Push the exception address value on the stack */
2482     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2483
2484     /* Mark the node as having a side-effect - i.e. cannot be
2485      * moved around since it is tied to a fixed location (EAX) */
2486     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2487
2488 #if defined(JIT32_GCENCODER)
2489     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2490 #else
2491     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2492 #endif // defined(JIT32_GCENCODER)
2493
2494     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2495     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2496     {
2497         if (hndBlk->bbRefs == 1)
2498         {
2499             hndBlk->bbRefs++;
2500         }
2501
2502         /* Create extra basic block for the spill */
2503         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2504         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2505         newBlk->setBBWeight(hndBlk->bbWeight);
2506         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2507
2508         /* Account for the new link we are about to create */
2509         hndBlk->bbRefs++;
2510
2511         /* Spill into a temp */
2512         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2513         lvaTable[tempNum].lvType = TYP_REF;
2514         arg                      = gtNewTempAssign(tempNum, arg);
2515
2516         hndBlk->bbStkTempsIn = tempNum;
2517
2518         /* Report the debug info. impImportBlockCode won't treat
2519          * the actual handler as exception block and thus won't do it for us. */
2520         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2521         {
2522             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2523             arg            = gtNewStmt(arg, impCurStmtOffs);
2524         }
2525
2526         fgInsertStmtAtEnd(newBlk, arg);
2527
2528         arg = gtNewLclvNode(tempNum, TYP_REF);
2529     }
2530
2531     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2532
2533     return hndBlk;
2534 }
2535
2536 /*****************************************************************************
2537  *
2538  *  Given a tree, clone it. *pClone is set to the cloned tree.
2539  *  Returns the original tree if the cloning was easy,
2540  *   else returns the temp to which the tree had to be spilled to.
2541  *  If the tree has side-effects, it will be spilled to a temp.
2542  */
2543
2544 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2545                                 GenTree**            pClone,
2546                                 CORINFO_CLASS_HANDLE structHnd,
2547                                 unsigned             curLevel,
2548                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2549 {
2550     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2551     {
2552         GenTree* clone = gtClone(tree, true);
2553
2554         if (clone)
2555         {
2556             *pClone = clone;
2557             return tree;
2558         }
2559     }
2560
2561     /* Store the operand in a temp and return the temp */
2562
2563     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2564
2565     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2566     // return a struct type. It also may modify the struct type to a more
2567     // specialized type (e.g. a SIMD type).  So we will get the type from
2568     // the lclVar AFTER calling impAssignTempGen().
2569
2570     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2571     var_types type = genActualType(lvaTable[temp].TypeGet());
2572
2573     *pClone = gtNewLclvNode(temp, type);
2574     return gtNewLclvNode(temp, type);
2575 }
2576
2577 /*****************************************************************************
2578  * Remember the IL offset (including stack-empty info) for the trees we will
2579  * generate now.
2580  */
2581
2582 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2583 {
2584     if (compIsForInlining())
2585     {
2586         GenTree* callStmt = impInlineInfo->iciStmt;
2587         assert(callStmt->gtOper == GT_STMT);
2588         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2589     }
2590     else
2591     {
2592         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2593         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2594         impCurStmtOffs    = offs | stkBit;
2595     }
2596 }
2597
2598 /*****************************************************************************
2599  * Returns current IL offset with stack-empty and call-instruction info incorporated
2600  */
2601 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2602 {
2603     if (compIsForInlining())
2604     {
2605         return BAD_IL_OFFSET;
2606     }
2607     else
2608     {
2609         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2610         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2611         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2612         return offs | stkBit | callInstructionBit;
2613     }
2614 }
2615
2616 //------------------------------------------------------------------------
2617 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2618 //
2619 // Arguments:
2620 //    prevOpcode - last importer opcode
2621 //
2622 // Return Value:
2623 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2624 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2625 {
2626     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2627     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2628     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2629 }
2630
2631 /*****************************************************************************
2632  *
2633  *  Remember the instr offset for the statements
2634  *
2635  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2636  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2637  *  as some of the trees corresponding to code up to impCurOpcOffs might
2638  *  still be sitting on the stack.
2639  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2640  *  This should be called when an opcode finally/explicitly causes
2641  *  impAppendTree(tree) to be called (as opposed to being called because of
2642  *  a spill caused by the opcode)
2643  */
2644
2645 #ifdef DEBUG
2646
2647 void Compiler::impNoteLastILoffs()
2648 {
2649     if (impLastILoffsStmt == nullptr)
2650     {
2651         // We should have added a statement for the current basic block
2652         // Is this assert correct ?
2653
2654         assert(impTreeLast);
2655         assert(impTreeLast->gtOper == GT_STMT);
2656
2657         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2658     }
2659     else
2660     {
2661         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2662         impLastILoffsStmt                          = nullptr;
2663     }
2664 }
2665
2666 #endif // DEBUG
2667
2668 /*****************************************************************************
2669  * We don't create any GenTree (excluding spills) for a branch.
2670  * For debugging info, we need a placeholder so that we can note
2671  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2672  */
2673
2674 void Compiler::impNoteBranchOffs()
2675 {
2676     if (opts.compDbgCode)
2677     {
2678         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2679     }
2680 }
2681
2682 /*****************************************************************************
2683  * Locate the next stmt boundary for which we need to record info.
2684  * We will have to spill the stack at such boundaries if it is not
2685  * already empty.
2686  * Returns the next stmt boundary (after the start of the block)
2687  */
2688
2689 unsigned Compiler::impInitBlockLineInfo()
2690 {
2691     /* Assume the block does not correspond with any IL offset. This prevents
2692        us from reporting extra offsets. Extra mappings can cause confusing
2693        stepping, especially if the extra mapping is a jump-target, and the
2694        debugger does not ignore extra mappings, but instead rewinds to the
2695        nearest known offset */
2696
2697     impCurStmtOffsSet(BAD_IL_OFFSET);
2698
2699     if (compIsForInlining())
2700     {
2701         return ~0;
2702     }
2703
2704     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2705
2706     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2707     {
2708         impCurStmtOffsSet(blockOffs);
2709     }
2710
2711     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2712     {
2713         impCurStmtOffsSet(blockOffs);
2714     }
2715
2716     /* Always report IL offset 0 or some tests get confused.
2717        Probably a good idea anyways */
2718
2719     if (blockOffs == 0)
2720     {
2721         impCurStmtOffsSet(blockOffs);
2722     }
2723
2724     if (!info.compStmtOffsetsCount)
2725     {
2726         return ~0;
2727     }
2728
2729     /* Find the lowest explicit stmt boundary within the block */
2730
2731     /* Start looking at an entry that is based on our instr offset */
2732
2733     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2734
2735     if (index >= info.compStmtOffsetsCount)
2736     {
2737         index = info.compStmtOffsetsCount - 1;
2738     }
2739
2740     /* If we've guessed too far, back up */
2741
2742     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2743     {
2744         index--;
2745     }
2746
2747     /* If we guessed short, advance ahead */
2748
2749     while (info.compStmtOffsets[index] < blockOffs)
2750     {
2751         index++;
2752
2753         if (index == info.compStmtOffsetsCount)
2754         {
2755             return info.compStmtOffsetsCount;
2756         }
2757     }
2758
2759     assert(index < info.compStmtOffsetsCount);
2760
2761     if (info.compStmtOffsets[index] == blockOffs)
2762     {
2763         /* There is an explicit boundary for the start of this basic block.
2764            So we will start with bbCodeOffs. Else we will wait until we
2765            get to the next explicit boundary */
2766
2767         impCurStmtOffsSet(blockOffs);
2768
2769         index++;
2770     }
2771
2772     return index;
2773 }
2774
2775 /*****************************************************************************/
2776
2777 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2778 {
2779     switch (opcode)
2780     {
2781         case CEE_CALL:
2782         case CEE_CALLI:
2783         case CEE_CALLVIRT:
2784             return true;
2785
2786         default:
2787             return false;
2788     }
2789 }
2790
2791 /*****************************************************************************/
2792
2793 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2794 {
2795     switch (opcode)
2796     {
2797         case CEE_CALL:
2798         case CEE_CALLI:
2799         case CEE_CALLVIRT:
2800         case CEE_JMP:
2801         case CEE_NEWOBJ:
2802         case CEE_NEWARR:
2803             return true;
2804
2805         default:
2806             return false;
2807     }
2808 }
2809
2810 /*****************************************************************************/
2811
2812 // One might think it is worth caching these values, but results indicate
2813 // that it isn't.
2814 // In addition, caching them causes SuperPMI to be unable to completely
2815 // encapsulate an individual method context.
2816 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2817 {
2818     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2819     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2820     return refAnyClass;
2821 }
2822
2823 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2824 {
2825     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2826     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2827     return typeHandleClass;
2828 }
2829
2830 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2831 {
2832     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2833     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2834     return argIteratorClass;
2835 }
2836
2837 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2838 {
2839     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2840     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2841     return stringClass;
2842 }
2843
2844 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2845 {
2846     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2847     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2848     return objectClass;
2849 }
2850
2851 /*****************************************************************************
2852  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2853  *  set its type to TYP_BYREF when we create it. We know if it can be
2854  *  changed to TYP_I_IMPL only at the point where we use it
2855  */
2856
2857 /* static */
2858 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2859 {
2860     if (tree1->IsVarAddr())
2861     {
2862         tree1->gtType = TYP_I_IMPL;
2863     }
2864
2865     if (tree2 && tree2->IsVarAddr())
2866     {
2867         tree2->gtType = TYP_I_IMPL;
2868     }
2869 }
2870
2871 /*****************************************************************************
2872  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2873  *  to make that an explicit cast in our trees, so any implicit casts that
2874  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2875  *  turned into explicit casts here.
2876  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2877  */
2878
2879 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2880 {
2881     var_types currType   = genActualType(tree->gtType);
2882     var_types wantedType = genActualType(dstTyp);
2883
2884     if (wantedType != currType)
2885     {
2886         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2887         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2888         {
2889             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2890             {
2891                 tree->gtType = TYP_I_IMPL;
2892             }
2893         }
2894 #ifdef _TARGET_64BIT_
2895         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2896         {
2897             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2898             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2899         }
2900         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2901         {
2902             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2903             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2904         }
2905 #endif // _TARGET_64BIT_
2906     }
2907
2908     return tree;
2909 }
2910
2911 /*****************************************************************************
2912  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2913  *  but we want to make that an explicit cast in our trees, so any implicit casts
2914  *  that exist in the IL are turned into explicit casts here.
2915  */
2916
2917 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2918 {
2919 #ifndef LEGACY_BACKEND
2920     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2921     {
2922         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2923     }
2924 #endif // !LEGACY_BACKEND
2925
2926     return tree;
2927 }
2928
2929 //------------------------------------------------------------------------
2930 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2931 //    with a GT_COPYBLK node.
2932 //
2933 // Arguments:
2934 //    sig - The InitializeArray signature.
2935 //
2936 // Return Value:
2937 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2938 //    nullptr otherwise.
2939 //
2940 // Notes:
2941 //    The function recognizes the following IL pattern:
2942 //      ldc <length> or a list of ldc <lower bound>/<length>
2943 //      newarr or newobj
2944 //      dup
2945 //      ldtoken <field handle>
2946 //      call InitializeArray
2947 //    The lower bounds need not be constant except when the array rank is 1.
2948 //    The function recognizes all kinds of arrays thus enabling a small runtime
2949 //    such as CoreRT to skip providing an implementation for InitializeArray.
2950
2951 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2952 {
2953     assert(sig->numArgs == 2);
2954
2955     GenTree* fieldTokenNode = impStackTop(0).val;
2956     GenTree* arrayLocalNode = impStackTop(1).val;
2957
2958     //
2959     // Verify that the field token is known and valid.  Note that It's also
2960     // possible for the token to come from reflection, in which case we cannot do
2961     // the optimization and must therefore revert to calling the helper.  You can
2962     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2963     //
2964
2965     // Check to see if the ldtoken helper call is what we see here.
2966     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2967         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2968     {
2969         return nullptr;
2970     }
2971
2972     // Strip helper call away
2973     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2974
2975     if (fieldTokenNode->gtOper == GT_IND)
2976     {
2977         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2978     }
2979
2980     // Check for constant
2981     if (fieldTokenNode->gtOper != GT_CNS_INT)
2982     {
2983         return nullptr;
2984     }
2985
2986     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2987     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2988     {
2989         return nullptr;
2990     }
2991
2992     //
2993     // We need to get the number of elements in the array and the size of each element.
2994     // We verify that the newarr statement is exactly what we expect it to be.
2995     // If it's not then we just return NULL and we don't optimize this call
2996     //
2997
2998     //
2999     // It is possible the we don't have any statements in the block yet
3000     //
3001     if (impTreeLast->gtOper != GT_STMT)
3002     {
3003         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3004         return nullptr;
3005     }
3006
3007     //
3008     // We start by looking at the last statement, making sure it's an assignment, and
3009     // that the target of the assignment is the array passed to InitializeArray.
3010     //
3011     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3012     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3013         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3014         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3015     {
3016         return nullptr;
3017     }
3018
3019     //
3020     // Make sure that the object being assigned is a helper call.
3021     //
3022
3023     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3024     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3025     {
3026         return nullptr;
3027     }
3028
3029     //
3030     // Verify that it is one of the new array helpers.
3031     //
3032
3033     bool isMDArray = false;
3034
3035     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3036         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3037         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3038         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3039 #ifdef FEATURE_READYTORUN_COMPILER
3040         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3041         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3042 #endif
3043             )
3044     {
3045         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3046         {
3047             return nullptr;
3048         }
3049
3050         isMDArray = true;
3051     }
3052
3053     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3054
3055     //
3056     // Make sure we found a compile time handle to the array
3057     //
3058
3059     if (!arrayClsHnd)
3060     {
3061         return nullptr;
3062     }
3063
3064     unsigned rank = 0;
3065     S_UINT32 numElements;
3066
3067     if (isMDArray)
3068     {
3069         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3070
3071         if (rank == 0)
3072         {
3073             return nullptr;
3074         }
3075
3076         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3077         assert(tokenArg != nullptr);
3078         GenTreeArgList* numArgsArg = tokenArg->Rest();
3079         assert(numArgsArg != nullptr);
3080         GenTreeArgList* argsArg = numArgsArg->Rest();
3081         assert(argsArg != nullptr);
3082
3083         //
3084         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3085         // so at least one length must be present and the rank can't exceed 32 so there can
3086         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3087         //
3088
3089         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3090             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3091         {
3092             return nullptr;
3093         }
3094
3095         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3096         bool     lowerBoundsSpecified;
3097
3098         if (numArgs == rank * 2)
3099         {
3100             lowerBoundsSpecified = true;
3101         }
3102         else if (numArgs == rank)
3103         {
3104             lowerBoundsSpecified = false;
3105
3106             //
3107             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3108             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3109             // we get a SDArray as well, see the for loop below.
3110             //
3111
3112             if (rank == 1)
3113             {
3114                 isMDArray = false;
3115             }
3116         }
3117         else
3118         {
3119             return nullptr;
3120         }
3121
3122         //
3123         // The rank is known to be at least 1 so we can start with numElements being 1
3124         // to avoid the need to special case the first dimension.
3125         //
3126
3127         numElements = S_UINT32(1);
3128
3129         struct Match
3130         {
3131             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3132             {
3133                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3134                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3135             }
3136
3137             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3138             {
3139                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3140                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3141                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3142             }
3143
3144             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3145             {
3146                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3147                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3148             }
3149
3150             static bool IsComma(GenTree* tree)
3151             {
3152                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3153             }
3154         };
3155
3156         unsigned argIndex = 0;
3157         GenTree* comma;
3158
3159         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3160         {
3161             if (lowerBoundsSpecified)
3162             {
3163                 //
3164                 // In general lower bounds can be ignored because they're not needed to
3165                 // calculate the total number of elements. But for single dimensional arrays
3166                 // we need to know if the lower bound is 0 because in this case the runtime
3167                 // creates a SDArray and this affects the way the array data offset is calculated.
3168                 //
3169
3170                 if (rank == 1)
3171                 {
3172                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3173                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3174                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3175
3176                     if (lowerBoundNode->IsIntegralConst(0))
3177                     {
3178                         isMDArray = false;
3179                     }
3180                 }
3181
3182                 comma = comma->gtGetOp2();
3183                 argIndex++;
3184             }
3185
3186             GenTree* lengthNodeAssign = comma->gtGetOp1();
3187             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3188             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3189
3190             if (!lengthNode->IsCnsIntOrI())
3191             {
3192                 return nullptr;
3193             }
3194
3195             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3196             argIndex++;
3197         }
3198
3199         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3200
3201         if (argIndex != numArgs)
3202         {
3203             return nullptr;
3204         }
3205     }
3206     else
3207     {
3208         //
3209         // Make sure there are exactly two arguments:  the array class and
3210         // the number of elements.
3211         //
3212
3213         GenTree* arrayLengthNode;
3214
3215         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3216 #ifdef FEATURE_READYTORUN_COMPILER
3217         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3218         {
3219             // Array length is 1st argument for readytorun helper
3220             arrayLengthNode = args->Current();
3221         }
3222         else
3223 #endif
3224         {
3225             // Array length is 2nd argument for regular helper
3226             arrayLengthNode = args->Rest()->Current();
3227         }
3228
3229         //
3230         // Make sure that the number of elements look valid.
3231         //
3232         if (arrayLengthNode->gtOper != GT_CNS_INT)
3233         {
3234             return nullptr;
3235         }
3236
3237         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3238
3239         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3240         {
3241             return nullptr;
3242         }
3243     }
3244
3245     CORINFO_CLASS_HANDLE elemClsHnd;
3246     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3247
3248     //
3249     // Note that genTypeSize will return zero for non primitive types, which is exactly
3250     // what we want (size will then be 0, and we will catch this in the conditional below).
3251     // Note that we don't expect this to fail for valid binaries, so we assert in the
3252     // non-verification case (the verification case should not assert but rather correctly
3253     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3254     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3255     // why.
3256     //
3257
3258     S_UINT32 elemSize(genTypeSize(elementType));
3259     S_UINT32 size = elemSize * S_UINT32(numElements);
3260
3261     if (size.IsOverflow())
3262     {
3263         return nullptr;
3264     }
3265
3266     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3267     {
3268         assert(verNeedsVerification());
3269         return nullptr;
3270     }
3271
3272     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3273     if (!initData)
3274     {
3275         return nullptr;
3276     }
3277
3278     //
3279     // At this point we are ready to commit to implementing the InitializeArray
3280     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3281     // return the struct assignment node.
3282     //
3283
3284     impPopStack();
3285     impPopStack();
3286
3287     const unsigned blkSize = size.Value();
3288     unsigned       dataOffset;
3289
3290     if (isMDArray)
3291     {
3292         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3293     }
3294     else
3295     {
3296         dataOffset = eeGetArrayDataOffset(elementType);
3297     }
3298
3299     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3300     GenTree* blk = gtNewBlockVal(dst, blkSize);
3301     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3302
3303     return gtNewBlkOpNode(blk,     // dst
3304                           src,     // src
3305                           blkSize, // size
3306                           false,   // volatil
3307                           true);   // copyBlock
3308 }
3309
3310 //------------------------------------------------------------------------
3311 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3312 //
3313 // Arguments:
3314 //    newobjThis - for constructor calls, the tree for the newly allocated object
3315 //    clsHnd - handle for the intrinsic method's class
3316 //    method - handle for the intrinsic method
3317 //    sig    - signature of the intrinsic method
3318 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3319 //    memberRef - the token for the intrinsic method
3320 //    readonlyCall - true if call has a readonly prefix
3321 //    tailCall - true if call is in tail position
3322 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3323 //       if call is not constrained
3324 //    constraintCallThisTransform -- this transform to apply for a constrained call
3325 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3326 //       for "traditional" jit intrinsics
3327 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3328 //       that is amenable to special downstream optimization opportunities
3329 //
3330 // Returns:
3331 //    IR tree to use in place of the call, or nullptr if the jit should treat
3332 //    the intrinsic call like a normal call.
3333 //
3334 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3335 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3336 //
3337 //    isSpecial set true if the expansion is subject to special
3338 //    optimizations later in the jit processing
3339 //
3340 // Notes:
3341 //    On success the IR tree may be a call to a different method or an inline
3342 //    sequence. If it is a call, then the intrinsic processing here is responsible
3343 //    for handling all the special cases, as upon return to impImportCall
3344 //    expanded intrinsics bypass most of the normal call processing.
3345 //
3346 //    Intrinsics are generally not recognized in minopts and debug codegen.
3347 //
3348 //    However, certain traditional intrinsics are identifed as "must expand"
3349 //    if there is no fallback implmentation to invoke; these must be handled
3350 //    in all codegen modes.
3351 //
3352 //    New style intrinsics (where the fallback implementation is in IL) are
3353 //    identified as "must expand" if they are invoked from within their
3354 //    own method bodies.
3355 //
3356
3357 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3358                                 CORINFO_CLASS_HANDLE    clsHnd,
3359                                 CORINFO_METHOD_HANDLE   method,
3360                                 CORINFO_SIG_INFO*       sig,
3361                                 unsigned                methodFlags,
3362                                 int                     memberRef,
3363                                 bool                    readonlyCall,
3364                                 bool                    tailCall,
3365                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3366                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3367                                 CorInfoIntrinsics*      pIntrinsicID,
3368                                 bool*                   isSpecialIntrinsic)
3369 {
3370     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3371
3372     bool              mustExpand  = false;
3373     bool              isSpecial   = false;
3374     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3375     NamedIntrinsic    ni          = NI_Illegal;
3376
3377     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3378     {
3379         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3380     }
3381
3382     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3383     {
3384         // The recursive calls to Jit intrinsics are must-expand by convention.
3385         mustExpand = mustExpand || gtIsRecursiveCall(method);
3386
3387         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3388         {
3389             ni = lookupNamedIntrinsic(method);
3390
3391 #ifdef FEATURE_HW_INTRINSICS
3392             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3393             {
3394                 return impHWIntrinsic(ni, method, sig, mustExpand);
3395             }
3396 #endif // FEATURE_HW_INTRINSICS
3397         }
3398     }
3399
3400     *pIntrinsicID = intrinsicID;
3401
3402 #ifndef _TARGET_ARM_
3403     genTreeOps interlockedOperator;
3404 #endif
3405
3406     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3407     {
3408         // must be done regardless of DbgCode and MinOpts
3409         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3410     }
3411 #ifdef _TARGET_64BIT_
3412     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3413     {
3414         // must be done regardless of DbgCode and MinOpts
3415         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3416     }
3417 #else
3418     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3419 #endif
3420
3421     GenTree* retNode = nullptr;
3422
3423     // Under debug and minopts, only expand what is required.
3424     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3425     {
3426         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3427         return retNode;
3428     }
3429
3430     var_types callType = JITtype2varType(sig->retType);
3431
3432     /* First do the intrinsics which are always smaller than a call */
3433
3434     switch (intrinsicID)
3435     {
3436         GenTree* op1;
3437         GenTree* op2;
3438
3439         case CORINFO_INTRINSIC_Sin:
3440         case CORINFO_INTRINSIC_Cbrt:
3441         case CORINFO_INTRINSIC_Sqrt:
3442         case CORINFO_INTRINSIC_Abs:
3443         case CORINFO_INTRINSIC_Cos:
3444         case CORINFO_INTRINSIC_Round:
3445         case CORINFO_INTRINSIC_Cosh:
3446         case CORINFO_INTRINSIC_Sinh:
3447         case CORINFO_INTRINSIC_Tan:
3448         case CORINFO_INTRINSIC_Tanh:
3449         case CORINFO_INTRINSIC_Asin:
3450         case CORINFO_INTRINSIC_Asinh:
3451         case CORINFO_INTRINSIC_Acos:
3452         case CORINFO_INTRINSIC_Acosh:
3453         case CORINFO_INTRINSIC_Atan:
3454         case CORINFO_INTRINSIC_Atan2:
3455         case CORINFO_INTRINSIC_Atanh:
3456         case CORINFO_INTRINSIC_Log10:
3457         case CORINFO_INTRINSIC_Pow:
3458         case CORINFO_INTRINSIC_Exp:
3459         case CORINFO_INTRINSIC_Ceiling:
3460         case CORINFO_INTRINSIC_Floor:
3461             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3462             break;
3463
3464 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3465         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3466         case CORINFO_INTRINSIC_InterlockedAdd32:
3467             interlockedOperator = GT_LOCKADD;
3468             goto InterlockedBinOpCommon;
3469         case CORINFO_INTRINSIC_InterlockedXAdd32:
3470             interlockedOperator = GT_XADD;
3471             goto InterlockedBinOpCommon;
3472         case CORINFO_INTRINSIC_InterlockedXchg32:
3473             interlockedOperator = GT_XCHG;
3474             goto InterlockedBinOpCommon;
3475
3476 #ifdef _TARGET_64BIT_
3477         case CORINFO_INTRINSIC_InterlockedAdd64:
3478             interlockedOperator = GT_LOCKADD;
3479             goto InterlockedBinOpCommon;
3480         case CORINFO_INTRINSIC_InterlockedXAdd64:
3481             interlockedOperator = GT_XADD;
3482             goto InterlockedBinOpCommon;
3483         case CORINFO_INTRINSIC_InterlockedXchg64:
3484             interlockedOperator = GT_XCHG;
3485             goto InterlockedBinOpCommon;
3486 #endif // _TARGET_AMD64_
3487
3488         InterlockedBinOpCommon:
3489             assert(callType != TYP_STRUCT);
3490             assert(sig->numArgs == 2);
3491
3492             op2 = impPopStack().val;
3493             op1 = impPopStack().val;
3494
3495             // This creates:
3496             //   val
3497             // XAdd
3498             //   addr
3499             //     field (for example)
3500             //
3501             // In the case where the first argument is the address of a local, we might
3502             // want to make this *not* make the var address-taken -- but atomic instructions
3503             // on a local are probably pretty useless anyway, so we probably don't care.
3504
3505             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3506             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3507             retNode = op1;
3508             break;
3509 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3510
3511         case CORINFO_INTRINSIC_MemoryBarrier:
3512
3513             assert(sig->numArgs == 0);
3514
3515             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3516             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3517             retNode = op1;
3518             break;
3519
3520 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3521         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3522         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3523 #ifdef _TARGET_64BIT_
3524         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3525 #endif
3526         {
3527             assert(callType != TYP_STRUCT);
3528             assert(sig->numArgs == 3);
3529             GenTree* op3;
3530
3531             op3 = impPopStack().val; // comparand
3532             op2 = impPopStack().val; // value
3533             op1 = impPopStack().val; // location
3534
3535             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3536
3537             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3538             retNode = node;
3539             break;
3540         }
3541 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3542
3543         case CORINFO_INTRINSIC_StringLength:
3544             op1 = impPopStack().val;
3545             if (!opts.MinOpts() && !opts.compDbgCode)
3546             {
3547                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3548                 op1                   = arrLen;
3549             }
3550             else
3551             {
3552                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3553                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3554                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3555                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3556             }
3557
3558             // Getting the length of a null string should throw
3559             op1->gtFlags |= GTF_EXCEPT;
3560
3561             retNode = op1;
3562             break;
3563
3564         case CORINFO_INTRINSIC_StringGetChar:
3565             op2 = impPopStack().val;
3566             op1 = impPopStack().val;
3567             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3568             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3569             retNode = op1;
3570             break;
3571
3572         case CORINFO_INTRINSIC_InitializeArray:
3573             retNode = impInitializeArrayIntrinsic(sig);
3574             break;
3575
3576         case CORINFO_INTRINSIC_Array_Address:
3577         case CORINFO_INTRINSIC_Array_Get:
3578         case CORINFO_INTRINSIC_Array_Set:
3579             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3580             break;
3581
3582         case CORINFO_INTRINSIC_GetTypeFromHandle:
3583             op1 = impStackTop(0).val;
3584             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3585                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3586             {
3587                 op1 = impPopStack().val;
3588                 // Change call to return RuntimeType directly.
3589                 op1->gtType = TYP_REF;
3590                 retNode     = op1;
3591             }
3592             // Call the regular function.
3593             break;
3594
3595         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3596             op1 = impStackTop(0).val;
3597             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3598                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3599             {
3600                 // Old tree
3601                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3602                 //
3603                 // New tree
3604                 // TreeToGetNativeTypeHandle
3605
3606                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3607                 // to that helper.
3608
3609                 op1 = impPopStack().val;
3610
3611                 // Get native TypeHandle argument to old helper
3612                 op1 = op1->gtCall.gtCallArgs;
3613                 assert(op1->OperIsList());
3614                 assert(op1->gtOp.gtOp2 == nullptr);
3615                 op1     = op1->gtOp.gtOp1;
3616                 retNode = op1;
3617             }
3618             // Call the regular function.
3619             break;
3620
3621 #ifndef LEGACY_BACKEND
3622         case CORINFO_INTRINSIC_Object_GetType:
3623         {
3624             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3625             op1 = impStackTop(0).val;
3626
3627             // If we're calling GetType on a boxed value, just get the type directly.
3628             if (op1->IsBoxedValue())
3629             {
3630                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3631
3632                 // Try and clean up the box. Obtain the handle we
3633                 // were going to pass to the newobj.
3634                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3635
3636                 if (boxTypeHandle != nullptr)
3637                 {
3638                     // Note we don't need to play the TYP_STRUCT games here like
3639                     // do for LDTOKEN since the return value of this operator is Type,
3640                     // not RuntimeTypeHandle.
3641                     impPopStack();
3642                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3643                     GenTree*        runtimeType =
3644                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3645                     retNode = runtimeType;
3646                 }
3647             }
3648
3649             // If we have a constrained callvirt with a "box this" transform
3650             // we know we have a value class and hence an exact type.
3651             //
3652             // If so, instead of boxing and then extracting the type, just
3653             // construct the type directly.
3654             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3655                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3656             {
3657                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3658                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3659                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3660
3661                 if (isSafeToOptimize)
3662                 {
3663                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3664                     impPopStack();
3665                     GenTree* typeHandleOp =
3666                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3667                     if (typeHandleOp == nullptr)
3668                     {
3669                         assert(compDonotInline());
3670                         return nullptr;
3671                     }
3672                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3673                     GenTree*        runtimeType =
3674                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3675                     retNode = runtimeType;
3676                 }
3677             }
3678
3679 #ifdef DEBUG
3680             if (retNode != nullptr)
3681             {
3682                 JITDUMP("Optimized result for call to GetType is\n");
3683                 if (verbose)
3684                 {
3685                     gtDispTree(retNode);
3686                 }
3687             }
3688 #endif
3689
3690             // Else expand as an intrinsic, unless the call is constrained,
3691             // in which case we defer expansion to allow impImportCall do the
3692             // special constraint processing.
3693             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3694             {
3695                 JITDUMP("Expanding as special intrinsic\n");
3696                 impPopStack();
3697                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3698
3699                 // Set the CALL flag to indicate that the operator is implemented by a call.
3700                 // Set also the EXCEPTION flag because the native implementation of
3701                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3702                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3703                 retNode = op1;
3704                 // Might be further optimizable, so arrange to leave a mark behind
3705                 isSpecial = true;
3706             }
3707
3708             if (retNode == nullptr)
3709             {
3710                 JITDUMP("Leaving as normal call\n");
3711                 // Might be further optimizable, so arrange to leave a mark behind
3712                 isSpecial = true;
3713             }
3714
3715             break;
3716         }
3717
3718 #endif
3719         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3720         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3721         // substitution.  The parameter byref will be assigned into the newly allocated object.
3722         case CORINFO_INTRINSIC_ByReference_Ctor:
3723         {
3724             // Remove call to constructor and directly assign the byref passed
3725             // to the call to the first slot of the ByReference struct.
3726             op1                                    = impPopStack().val;
3727             GenTree*             thisptr           = newobjThis;
3728             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3729             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3730             GenTree*             assign            = gtNewAssignNode(field, op1);
3731             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3732             assert(byReferenceStruct != nullptr);
3733             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3734             retNode = assign;
3735             break;
3736         }
3737         // Implement ptr value getter for ByReference struct.
3738         case CORINFO_INTRINSIC_ByReference_Value:
3739         {
3740             op1                         = impPopStack().val;
3741             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3742             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3743             retNode                     = field;
3744             break;
3745         }
3746         case CORINFO_INTRINSIC_Span_GetItem:
3747         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3748         {
3749             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3750             //
3751             // For Span<T>
3752             //   Comma
3753             //     BoundsCheck(index, s->_length)
3754             //     s->_pointer + index * sizeof(T)
3755             //
3756             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3757             //
3758             // Signature should show one class type parameter, which
3759             // we need to examine.
3760             assert(sig->sigInst.classInstCount == 1);
3761             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3762             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3763             assert(elemSize > 0);
3764
3765             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3766
3767             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3768                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3769
3770             GenTree* index          = impPopStack().val;
3771             GenTree* ptrToSpan      = impPopStack().val;
3772             GenTree* indexClone     = nullptr;
3773             GenTree* ptrToSpanClone = nullptr;
3774
3775 #if defined(DEBUG)
3776             if (verbose)
3777             {
3778                 printf("with ptr-to-span\n");
3779                 gtDispTree(ptrToSpan);
3780                 printf("and index\n");
3781                 gtDispTree(index);
3782             }
3783 #endif // defined(DEBUG)
3784
3785             // We need to use both index and ptr-to-span twice, so clone or spill.
3786             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3787                                  nullptr DEBUGARG("Span.get_Item index"));
3788             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3789                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3790
3791             // Bounds check
3792             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3793             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3794             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3795             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3796                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3797
3798             // Element access
3799             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3800             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3801             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3802             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3803             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3804             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3805             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3806
3807             // Prepare result
3808             var_types resultType = JITtype2varType(sig->retType);
3809             assert(resultType == result->TypeGet());
3810             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3811
3812             break;
3813         }
3814
3815         case CORINFO_INTRINSIC_GetRawHandle:
3816         {
3817             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3818             CORINFO_RESOLVED_TOKEN resolvedToken;
3819             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3820             resolvedToken.tokenScope   = info.compScopeHnd;
3821             resolvedToken.token        = memberRef;
3822             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3823
3824             CORINFO_GENERICHANDLE_RESULT embedInfo;
3825             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3826
3827             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3828                                                  embedInfo.compileTimeHandle);
3829             if (rawHandle == nullptr)
3830             {
3831                 return nullptr;
3832             }
3833
3834             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3835
3836             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3837             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3838
3839             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3840             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3841             var_types resultType = JITtype2varType(sig->retType);
3842             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3843
3844             break;
3845         }
3846
3847         case CORINFO_INTRINSIC_TypeEQ:
3848         case CORINFO_INTRINSIC_TypeNEQ:
3849         {
3850             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3851             op1              = impStackTop(1).val;
3852             op2              = impStackTop(0).val;
3853             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3854             if (optTree != nullptr)
3855             {
3856                 // Success, clean up the evaluation stack.
3857                 impPopStack();
3858                 impPopStack();
3859
3860                 // See if we can optimize even further, to a handle compare.
3861                 optTree = gtFoldTypeCompare(optTree);
3862
3863                 // See if we can now fold a handle compare to a constant.
3864                 optTree = gtFoldExpr(optTree);
3865
3866                 retNode = optTree;
3867             }
3868             else
3869             {
3870                 // Retry optimizing these later
3871                 isSpecial = true;
3872             }
3873             break;
3874         }
3875
3876         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3877         case CORINFO_INTRINSIC_GetManagedThreadId:
3878         {
3879             // Retry optimizing these during morph
3880             isSpecial = true;
3881             break;
3882         }
3883
3884         default:
3885             /* Unknown intrinsic */
3886             intrinsicID = CORINFO_INTRINSIC_Illegal;
3887             break;
3888     }
3889
3890     // Look for new-style jit intrinsics by name
3891     if (ni != NI_Illegal)
3892     {
3893         assert(retNode == nullptr);
3894         switch (ni)
3895         {
3896             case NI_System_Enum_HasFlag:
3897             {
3898                 GenTree* thisOp  = impStackTop(1).val;
3899                 GenTree* flagOp  = impStackTop(0).val;
3900                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3901
3902                 if (optTree != nullptr)
3903                 {
3904                     // Optimization successful. Pop the stack for real.
3905                     impPopStack();
3906                     impPopStack();
3907                     retNode = optTree;
3908                 }
3909                 else
3910                 {
3911                     // Retry optimizing this during morph.
3912                     isSpecial = true;
3913                 }
3914
3915                 break;
3916             }
3917
3918             case NI_MathF_Round:
3919             case NI_Math_Round:
3920             {
3921                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3922                 // to simplify the transition, we will just treat it as if it was still the
3923                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3924                 // everywhere else.
3925
3926                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3927                 break;
3928             }
3929
3930             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3931             {
3932                 // Flag for later handling during devirtualization.
3933                 isSpecial = true;
3934                 break;
3935             }
3936
3937             default:
3938                 break;
3939         }
3940     }
3941
3942     if (mustExpand)
3943     {
3944         if (retNode == nullptr)
3945         {
3946             NO_WAY("JIT must expand the intrinsic!");
3947         }
3948     }
3949
3950     // Optionally report if this intrinsic is special
3951     // (that is, potentially re-optimizable during morph).
3952     if (isSpecialIntrinsic != nullptr)
3953     {
3954         *isSpecialIntrinsic = isSpecial;
3955     }
3956
3957     return retNode;
3958 }
3959
3960 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3961                                     CORINFO_SIG_INFO*     sig,
3962                                     var_types             callType,
3963                                     CorInfoIntrinsics     intrinsicID,
3964                                     bool                  tailCall)
3965 {
3966     GenTree* op1;
3967     GenTree* op2;
3968
3969     assert(callType != TYP_STRUCT);
3970     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3971            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3972            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3973            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3974            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3975            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3976            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3977            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3980            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3981
3982     op1 = nullptr;
3983
3984 #if defined(LEGACY_BACKEND)
3985     if (IsTargetIntrinsic(intrinsicID))
3986 #elif !defined(_TARGET_X86_)
3987     // Intrinsics that are not implemented directly by target instructions will
3988     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3989     // don't do this optimization, because
3990     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3991     //  b) It will be non-trivial task or too late to re-materialize a surviving
3992     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3993     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3994 #else
3995     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3996     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3997     // code generation for certain EH constructs.
3998     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3999 #endif
4000     {
4001         switch (sig->numArgs)
4002         {
4003             case 1:
4004                 op1 = impPopStack().val;
4005
4006 #if FEATURE_X87_DOUBLES
4007
4008                 // X87 stack doesn't differentiate between float/double
4009                 // so it doesn't need a cast, but everybody else does
4010                 // Just double check it is at least a FP type
4011                 noway_assert(varTypeIsFloating(op1));
4012
4013 #else // FEATURE_X87_DOUBLES
4014
4015                 if (op1->TypeGet() != callType)
4016                 {
4017                     op1 = gtNewCastNode(callType, op1, callType);
4018                 }
4019
4020 #endif // FEATURE_X87_DOUBLES
4021
4022                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4023                 break;
4024
4025             case 2:
4026                 op2 = impPopStack().val;
4027                 op1 = impPopStack().val;
4028
4029 #if FEATURE_X87_DOUBLES
4030
4031                 // X87 stack doesn't differentiate between float/double
4032                 // so it doesn't need a cast, but everybody else does
4033                 // Just double check it is at least a FP type
4034                 noway_assert(varTypeIsFloating(op2));
4035                 noway_assert(varTypeIsFloating(op1));
4036
4037 #else // FEATURE_X87_DOUBLES
4038
4039                 if (op2->TypeGet() != callType)
4040                 {
4041                     op2 = gtNewCastNode(callType, op2, callType);
4042                 }
4043                 if (op1->TypeGet() != callType)
4044                 {
4045                     op1 = gtNewCastNode(callType, op1, callType);
4046                 }
4047
4048 #endif // FEATURE_X87_DOUBLES
4049
4050                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4051                 break;
4052
4053             default:
4054                 NO_WAY("Unsupported number of args for Math Instrinsic");
4055         }
4056
4057 #ifndef LEGACY_BACKEND
4058         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4059         {
4060             op1->gtFlags |= GTF_CALL;
4061         }
4062 #endif
4063     }
4064
4065     return op1;
4066 }
4067
4068 //------------------------------------------------------------------------
4069 // lookupNamedIntrinsic: map method to jit named intrinsic value
4070 //
4071 // Arguments:
4072 //    method -- method handle for method
4073 //
4074 // Return Value:
4075 //    Id for the named intrinsic, or Illegal if none.
4076 //
4077 // Notes:
4078 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4079 //    otherwise it is not a named jit intrinsic.
4080 //
4081
4082 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4083 {
4084     NamedIntrinsic result = NI_Illegal;
4085
4086     const char* className     = nullptr;
4087     const char* namespaceName = nullptr;
4088     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4089
4090     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4091     {
4092         return result;
4093     }
4094
4095     if (strcmp(namespaceName, "System") == 0)
4096     {
4097         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4098         {
4099             result = NI_System_Enum_HasFlag;
4100         }
4101         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4102         {
4103             result = NI_MathF_Round;
4104         }
4105         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4106         {
4107             result = NI_Math_Round;
4108         }
4109     }
4110     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4111     {
4112         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4113         {
4114             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4115         }
4116     }
4117
4118 #ifdef FEATURE_HW_INTRINSICS
4119 #if defined(_TARGET_XARCH_)
4120     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4121     {
4122         InstructionSet isa = lookupHWIntrinsicISA(className);
4123         result             = lookupHWIntrinsic(methodName, isa);
4124     }
4125 #elif defined(_TARGET_ARM64_)
4126     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4127     {
4128         result = lookupHWIntrinsic(className, methodName);
4129     }
4130 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4131 #error Unsupported platform
4132 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4133 #endif // FEATURE_HW_INTRINSICS
4134     return result;
4135 }
4136
4137 /*****************************************************************************/
4138
4139 GenTree* Compiler::impArrayAccessIntrinsic(
4140     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4141 {
4142     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4143        the following, as it generates fatter code.
4144     */
4145
4146     if (compCodeOpt() == SMALL_CODE)
4147     {
4148         return nullptr;
4149     }
4150
4151     /* These intrinsics generate fatter (but faster) code and are only
4152        done if we don't need SMALL_CODE */
4153
4154     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4155
4156     // The rank 1 case is special because it has to handle two array formats
4157     // we will simply not do that case
4158     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4159     {
4160         return nullptr;
4161     }
4162
4163     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4164     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4165
4166     // For the ref case, we will only be able to inline if the types match
4167     // (verifier checks for this, we don't care for the nonverified case and the
4168     // type is final (so we don't need to do the cast)
4169     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4170     {
4171         // Get the call site signature
4172         CORINFO_SIG_INFO LocalSig;
4173         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4174         assert(LocalSig.hasThis());
4175
4176         CORINFO_CLASS_HANDLE actualElemClsHnd;
4177
4178         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4179         {
4180             // Fetch the last argument, the one that indicates the type we are setting.
4181             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4182             for (unsigned r = 0; r < rank; r++)
4183             {
4184                 argType = info.compCompHnd->getArgNext(argType);
4185             }
4186
4187             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4188             actualElemClsHnd = argInfo.GetClassHandle();
4189         }
4190         else
4191         {
4192             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4193
4194             // Fetch the return type
4195             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4196             assert(retInfo.IsByRef());
4197             actualElemClsHnd = retInfo.GetClassHandle();
4198         }
4199
4200         // if it's not final, we can't do the optimization
4201         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4202         {
4203             return nullptr;
4204         }
4205     }
4206
4207     unsigned arrayElemSize;
4208     if (elemType == TYP_STRUCT)
4209     {
4210         assert(arrElemClsHnd);
4211
4212         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4213     }
4214     else
4215     {
4216         arrayElemSize = genTypeSize(elemType);
4217     }
4218
4219     if ((unsigned char)arrayElemSize != arrayElemSize)
4220     {
4221         // arrayElemSize would be truncated as an unsigned char.
4222         // This means the array element is too large. Don't do the optimization.
4223         return nullptr;
4224     }
4225
4226     GenTree* val = nullptr;
4227
4228     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4229     {
4230         // Assignment of a struct is more work, and there are more gets than sets.
4231         if (elemType == TYP_STRUCT)
4232         {
4233             return nullptr;
4234         }
4235
4236         val = impPopStack().val;
4237         assert(genActualType(elemType) == genActualType(val->gtType) ||
4238                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4239                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4240                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4241     }
4242
4243     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4244
4245     GenTree* inds[GT_ARR_MAX_RANK];
4246     for (unsigned k = rank; k > 0; k--)
4247     {
4248         inds[k - 1] = impPopStack().val;
4249     }
4250
4251     GenTree* arr = impPopStack().val;
4252     assert(arr->gtType == TYP_REF);
4253
4254     GenTree* arrElem =
4255         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4256                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4257
4258     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4259     {
4260         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4261     }
4262
4263     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4264     {
4265         assert(val != nullptr);
4266         return gtNewAssignNode(arrElem, val);
4267     }
4268     else
4269     {
4270         return arrElem;
4271     }
4272 }
4273
4274 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4275 {
4276     unsigned i;
4277
4278     // do some basic checks first
4279     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4280     {
4281         return FALSE;
4282     }
4283
4284     if (verCurrentState.esStackDepth > 0)
4285     {
4286         // merge stack types
4287         StackEntry* parentStack = block->bbStackOnEntry();
4288         StackEntry* childStack  = verCurrentState.esStack;
4289
4290         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4291         {
4292             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4293             {
4294                 return FALSE;
4295             }
4296         }
4297     }
4298
4299     // merge initialization status of this ptr
4300
4301     if (verTrackObjCtorInitState)
4302     {
4303         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4304         assert(verCurrentState.thisInitialized != TIS_Bottom);
4305
4306         // If the successor block's thisInit state is unknown, copy it from the current state.
4307         if (block->bbThisOnEntry() == TIS_Bottom)
4308         {
4309             *changed = true;
4310             verSetThisInit(block, verCurrentState.thisInitialized);
4311         }
4312         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4313         {
4314             if (block->bbThisOnEntry() != TIS_Top)
4315             {
4316                 *changed = true;
4317                 verSetThisInit(block, TIS_Top);
4318
4319                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4320                 {
4321                     // The block is bad. Control can flow through the block to any handler that catches the
4322                     // verification exception, but the importer ignores bad blocks and therefore won't model
4323                     // this flow in the normal way. To complete the merge into the bad block, the new state
4324                     // needs to be manually pushed to the handlers that may be reached after the verification
4325                     // exception occurs.
4326                     //
4327                     // Usually, the new state was already propagated to the relevant handlers while processing
4328                     // the predecessors of the bad block. The exception is when the bad block is at the start
4329                     // of a try region, meaning it is protected by additional handlers that do not protect its
4330                     // predecessors.
4331                     //
4332                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4333                     {
4334                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4335                         // recursive calls back into this code path (if successors of the current bad block are
4336                         // also bad blocks).
4337                         //
4338                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4339                         verCurrentState.thisInitialized = TIS_Top;
4340                         impVerifyEHBlock(block, true);
4341                         verCurrentState.thisInitialized = origTIS;
4342                     }
4343                 }
4344             }
4345         }
4346     }
4347     else
4348     {
4349         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4350     }
4351
4352     return TRUE;
4353 }
4354
4355 /*****************************************************************************
4356  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4357  *   already logged it (presumably in a more detailed fashion than done here)
4358  * 'bVerificationException' is true for a verification exception, false for a
4359  *   "call unauthorized by host" exception.
4360  */
4361
4362 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4363 {
4364     block->bbJumpKind = BBJ_THROW;
4365     block->bbFlags |= BBF_FAILED_VERIFICATION;
4366
4367     impCurStmtOffsSet(block->bbCodeOffs);
4368
4369 #ifdef DEBUG
4370     // we need this since BeginTreeList asserts otherwise
4371     impTreeList = impTreeLast = nullptr;
4372     block->bbFlags &= ~BBF_IMPORTED;
4373
4374     if (logMsg)
4375     {
4376         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4377                 block->bbCodeOffs, block->bbCodeOffsEnd));
4378         if (verbose)
4379         {
4380             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4381         }
4382     }
4383
4384     if (JitConfig.DebugBreakOnVerificationFailure())
4385     {
4386         DebugBreak();
4387     }
4388 #endif
4389
4390     impBeginTreeList();
4391
4392     // if the stack is non-empty evaluate all the side-effects
4393     if (verCurrentState.esStackDepth > 0)
4394     {
4395         impEvalSideEffects();
4396     }
4397     assert(verCurrentState.esStackDepth == 0);
4398
4399     GenTree* op1 =
4400         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4401     // verCurrentState.esStackDepth = 0;
4402     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4403
4404     // The inliner is not able to handle methods that require throw block, so
4405     // make sure this methods never gets inlined.
4406     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4407 }
4408
4409 /*****************************************************************************
4410  *
4411  */
4412 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4413
4414 {
4415     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4416     // slightly different mechanism in which it calls the JIT to perform IL verification:
4417     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4418     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4419     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4420     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4421     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4422     // to fail upon runtime of the jitted method.
4423     //
4424     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4425     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4426     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4427     // we detect these two conditions, instead of generating a throw statement inside the offending
4428     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4429     // to return false and make RyuJIT behave the same way JIT64 does.
4430     //
4431     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4432     // RyuJIT for the time being until we completely replace JIT64.
4433     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4434
4435     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4436     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4437     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4438     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4439     // be turned off during importation).
4440     CLANG_FORMAT_COMMENT_ANCHOR;
4441
4442 #ifdef _TARGET_64BIT_
4443
4444 #ifdef DEBUG
4445     bool canSkipVerificationResult =
4446         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4447     assert(tiVerificationNeeded || canSkipVerificationResult);
4448 #endif // DEBUG
4449
4450     // Add the non verifiable flag to the compiler
4451     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4452     {
4453         tiIsVerifiableCode = FALSE;
4454     }
4455 #endif //_TARGET_64BIT_
4456     verResetCurrentState(block, &verCurrentState);
4457     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4458
4459 #ifdef DEBUG
4460     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4461 #endif                   // DEBUG
4462 }
4463
4464 /******************************************************************************/
4465 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4466 {
4467     assert(ciType < CORINFO_TYPE_COUNT);
4468
4469     typeInfo tiResult;
4470     switch (ciType)
4471     {
4472         case CORINFO_TYPE_STRING:
4473         case CORINFO_TYPE_CLASS:
4474             tiResult = verMakeTypeInfo(clsHnd);
4475             if (!tiResult.IsType(TI_REF))
4476             { // type must be consistent with element type
4477                 return typeInfo();
4478             }
4479             break;
4480
4481 #ifdef _TARGET_64BIT_
4482         case CORINFO_TYPE_NATIVEINT:
4483         case CORINFO_TYPE_NATIVEUINT:
4484             if (clsHnd)
4485             {
4486                 // If we have more precise information, use it
4487                 return verMakeTypeInfo(clsHnd);
4488             }
4489             else
4490             {
4491                 return typeInfo::nativeInt();
4492             }
4493             break;
4494 #endif // _TARGET_64BIT_
4495
4496         case CORINFO_TYPE_VALUECLASS:
4497         case CORINFO_TYPE_REFANY:
4498             tiResult = verMakeTypeInfo(clsHnd);
4499             // type must be constant with element type;
4500             if (!tiResult.IsValueClass())
4501             {
4502                 return typeInfo();
4503             }
4504             break;
4505         case CORINFO_TYPE_VAR:
4506             return verMakeTypeInfo(clsHnd);
4507
4508         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4509         case CORINFO_TYPE_VOID:
4510             return typeInfo();
4511             break;
4512
4513         case CORINFO_TYPE_BYREF:
4514         {
4515             CORINFO_CLASS_HANDLE childClassHandle;
4516             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4517             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4518         }
4519         break;
4520
4521         default:
4522             if (clsHnd)
4523             { // If we have more precise information, use it
4524                 return typeInfo(TI_STRUCT, clsHnd);
4525             }
4526             else
4527             {
4528                 return typeInfo(JITtype2tiType(ciType));
4529             }
4530     }
4531     return tiResult;
4532 }
4533
4534 /******************************************************************************/
4535
4536 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4537 {
4538     if (clsHnd == nullptr)
4539     {
4540         return typeInfo();
4541     }
4542
4543     // Byrefs should only occur in method and local signatures, which are accessed
4544     // using ICorClassInfo and ICorClassInfo.getChildType.
4545     // So findClass() and getClassAttribs() should not be called for byrefs
4546
4547     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4548     {
4549         assert(!"Did findClass() return a Byref?");
4550         return typeInfo();
4551     }
4552
4553     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4554
4555     if (attribs & CORINFO_FLG_VALUECLASS)
4556     {
4557         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4558
4559         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4560         // not occur here, so we may want to change this to an assert instead.
4561         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4562         {
4563             return typeInfo();
4564         }
4565
4566 #ifdef _TARGET_64BIT_
4567         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4568         {
4569             return typeInfo::nativeInt();
4570         }
4571 #endif // _TARGET_64BIT_
4572
4573         if (t != CORINFO_TYPE_UNDEF)
4574         {
4575             return (typeInfo(JITtype2tiType(t)));
4576         }
4577         else if (bashStructToRef)
4578         {
4579             return (typeInfo(TI_REF, clsHnd));
4580         }
4581         else
4582         {
4583             return (typeInfo(TI_STRUCT, clsHnd));
4584         }
4585     }
4586     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4587     {
4588         // See comment in _typeInfo.h for why we do it this way.
4589         return (typeInfo(TI_REF, clsHnd, true));
4590     }
4591     else
4592     {
4593         return (typeInfo(TI_REF, clsHnd));
4594     }
4595 }
4596
4597 /******************************************************************************/
4598 BOOL Compiler::verIsSDArray(typeInfo ti)
4599 {
4600     if (ti.IsNullObjRef())
4601     { // nulls are SD arrays
4602         return TRUE;
4603     }
4604
4605     if (!ti.IsType(TI_REF))
4606     {
4607         return FALSE;
4608     }
4609
4610     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4611     {
4612         return FALSE;
4613     }
4614     return TRUE;
4615 }
4616
4617 /******************************************************************************/
4618 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4619 /* Returns an error type if anything goes wrong */
4620
4621 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4622 {
4623     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4624
4625     if (!verIsSDArray(arrayObjectType))
4626     {
4627         return typeInfo();
4628     }
4629
4630     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4631     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4632
4633     return verMakeTypeInfo(ciType, childClassHandle);
4634 }
4635
4636 /*****************************************************************************
4637  */
4638 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4639 {
4640     CORINFO_CLASS_HANDLE classHandle;
4641     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4642
4643     var_types type = JITtype2varType(ciType);
4644     if (varTypeIsGC(type))
4645     {
4646         // For efficiency, getArgType only returns something in classHandle for
4647         // value types.  For other types that have addition type info, you
4648         // have to call back explicitly
4649         classHandle = info.compCompHnd->getArgClass(sig, args);
4650         if (!classHandle)
4651         {
4652             NO_WAY("Could not figure out Class specified in argument or local signature");
4653         }
4654     }
4655
4656     return verMakeTypeInfo(ciType, classHandle);
4657 }
4658
4659 /*****************************************************************************/
4660
4661 // This does the expensive check to figure out whether the method
4662 // needs to be verified. It is called only when we fail verification,
4663 // just before throwing the verification exception.
4664
4665 BOOL Compiler::verNeedsVerification()
4666 {
4667     // If we have previously determined that verification is NOT needed
4668     // (for example in Compiler::compCompile), that means verification is really not needed.
4669     // Return the same decision we made before.
4670     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4671
4672     if (!tiVerificationNeeded)
4673     {
4674         return tiVerificationNeeded;
4675     }
4676
4677     assert(tiVerificationNeeded);
4678
4679     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4680     // obtain the answer.
4681     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4682         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4683
4684     // canSkipVerification will return one of the following three values:
4685     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4686     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4687     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4688     //     but need to insert a callout to the VM to ask during runtime
4689     //     whether to skip verification or not.
4690
4691     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4692     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4693     {
4694         tiRuntimeCalloutNeeded = true;
4695     }
4696
4697     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4698     {
4699         // Dev10 706080 - Testers don't like the assert, so just silence it
4700         // by not using the macros that invoke debugAssert.
4701         badCode();
4702     }
4703
4704     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4705     // The following line means we will NOT do jit time verification if canSkipVerification
4706     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4707     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4708     return tiVerificationNeeded;
4709 }
4710
4711 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4712 {
4713     if (ti.IsByRef())
4714     {
4715         return TRUE;
4716     }
4717     if (!ti.IsType(TI_STRUCT))
4718     {
4719         return FALSE;
4720     }
4721     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4722 }
4723
4724 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4725 {
4726     if (ti.IsPermanentHomeByRef())
4727     {
4728         return TRUE;
4729     }
4730     else
4731     {
4732         return FALSE;
4733     }
4734 }
4735
4736 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4737 {
4738     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4739             || ti.IsUnboxedGenericTypeVar() ||
4740             (ti.IsType(TI_STRUCT) &&
4741              // exclude byreflike structs
4742              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4743 }
4744
4745 // Is it a boxed value type?
4746 bool Compiler::verIsBoxedValueType(typeInfo ti)
4747 {
4748     if (ti.GetType() == TI_REF)
4749     {
4750         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4751         return !!eeIsValueClass(clsHnd);
4752     }
4753     else
4754     {
4755         return false;
4756     }
4757 }
4758
4759 /*****************************************************************************
4760  *
4761  *  Check if a TailCall is legal.
4762  */
4763
4764 bool Compiler::verCheckTailCallConstraint(
4765     OPCODE                  opcode,
4766     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4767     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4768     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4769                                                        // return false to the caller.
4770                                                        // If false, it will throw.
4771     )
4772 {
4773     DWORD            mflags;
4774     CORINFO_SIG_INFO sig;
4775     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4776                                    // this counter is used to keep track of how many items have been
4777                                    // virtually popped
4778
4779     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4780     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4781     unsigned              methodClassFlgs = 0;
4782
4783     assert(impOpcodeIsCallOpcode(opcode));
4784
4785     if (compIsForInlining())
4786     {
4787         return false;
4788     }
4789
4790     // for calli, VerifyOrReturn that this is not a virtual method
4791     if (opcode == CEE_CALLI)
4792     {
4793         /* Get the call sig */
4794         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4795
4796         // We don't know the target method, so we have to infer the flags, or
4797         // assume the worst-case.
4798         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4799     }
4800     else
4801     {
4802         methodHnd = pResolvedToken->hMethod;
4803
4804         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4805
4806         // When verifying generic code we pair the method handle with its
4807         // owning class to get the exact method signature.
4808         methodClassHnd = pResolvedToken->hClass;
4809         assert(methodClassHnd);
4810
4811         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4812
4813         // opcode specific check
4814         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4815     }
4816
4817     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4818     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4819
4820     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4821     {
4822         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4823     }
4824
4825     // check compatibility of the arguments
4826     unsigned int argCount;
4827     argCount = sig.numArgs;
4828     CORINFO_ARG_LIST_HANDLE args;
4829     args = sig.args;
4830     while (argCount--)
4831     {
4832         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4833
4834         // check that the argument is not a byref for tailcalls
4835         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4836
4837         // For unsafe code, we might have parameters containing pointer to the stack location.
4838         // Disallow the tailcall for this kind.
4839         CORINFO_CLASS_HANDLE classHandle;
4840         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4841         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4842
4843         args = info.compCompHnd->getArgNext(args);
4844     }
4845
4846     // update popCount
4847     popCount += sig.numArgs;
4848
4849     // check for 'this' which is on non-static methods, not called via NEWOBJ
4850     if (!(mflags & CORINFO_FLG_STATIC))
4851     {
4852         // Always update the popCount.
4853         // This is crucial for the stack calculation to be correct.
4854         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4855         popCount++;
4856
4857         if (opcode == CEE_CALLI)
4858         {
4859             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4860             // on the stack.
4861             if (tiThis.IsValueClass())
4862             {
4863                 tiThis.MakeByRef();
4864             }
4865             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4866         }
4867         else
4868         {
4869             // Check type compatibility of the this argument
4870             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4871             if (tiDeclaredThis.IsValueClass())
4872             {
4873                 tiDeclaredThis.MakeByRef();
4874             }
4875
4876             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4877         }
4878     }
4879
4880     // Tail calls on constrained calls should be illegal too:
4881     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4882     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4883
4884     // Get the exact view of the signature for an array method
4885     if (sig.retType != CORINFO_TYPE_VOID)
4886     {
4887         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4888         {
4889             assert(opcode != CEE_CALLI);
4890             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4891         }
4892     }
4893
4894     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4895     typeInfo tiCallerRetType =
4896         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4897
4898     // void return type gets morphed into the error type, so we have to treat them specially here
4899     if (sig.retType == CORINFO_TYPE_VOID)
4900     {
4901         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4902                                   speculative);
4903     }
4904     else
4905     {
4906         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4907                                                    NormaliseForStack(tiCallerRetType), true),
4908                                   "tailcall return mismatch", speculative);
4909     }
4910
4911     // for tailcall, stack must be empty
4912     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4913
4914     return true; // Yes, tailcall is legal
4915 }
4916
4917 /*****************************************************************************
4918  *
4919  *  Checks the IL verification rules for the call
4920  */
4921
4922 void Compiler::verVerifyCall(OPCODE                  opcode,
4923                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4924                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4925                              bool                    tailCall,
4926                              bool                    readonlyCall,
4927                              const BYTE*             delegateCreateStart,
4928                              const BYTE*             codeAddr,
4929                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4930 {
4931     DWORD             mflags;
4932     CORINFO_SIG_INFO* sig      = nullptr;
4933     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4934                                     // this counter is used to keep track of how many items have been
4935                                     // virtually popped
4936
4937     // for calli, VerifyOrReturn that this is not a virtual method
4938     if (opcode == CEE_CALLI)
4939     {
4940         Verify(false, "Calli not verifiable");
4941         return;
4942     }
4943
4944     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4945     mflags = callInfo->verMethodFlags;
4946
4947     sig = &callInfo->verSig;
4948
4949     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4950     {
4951         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4952     }
4953
4954     // opcode specific check
4955     unsigned methodClassFlgs = callInfo->classFlags;
4956     switch (opcode)
4957     {
4958         case CEE_CALLVIRT:
4959             // cannot do callvirt on valuetypes
4960             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4961             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4962             break;
4963
4964         case CEE_NEWOBJ:
4965         {
4966             assert(!tailCall); // Importer should not allow this
4967             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4968                            "newobj must be on instance");
4969
4970             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4971             {
4972                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4973                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4974                 typeInfo tiDeclaredFtn =
4975                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4976                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4977
4978                 assert(popCount == 0);
4979                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4980                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4981
4982                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4983                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4984                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4985                                "delegate object type mismatch");
4986
4987                 CORINFO_CLASS_HANDLE objTypeHandle =
4988                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4989
4990                 // the method signature must be compatible with the delegate's invoke method
4991
4992                 // check that for virtual functions, the type of the object used to get the
4993                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4994                 // since this is a bit of work to determine in general, we pattern match stylized
4995                 // code sequences
4996
4997                 // the delegate creation code check, which used to be done later, is now done here
4998                 // so we can read delegateMethodRef directly from
4999                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5000                 // we then use it in our call to isCompatibleDelegate().
5001
5002                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5003                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5004                                "must create delegates with certain IL");
5005
5006                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5007                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5008                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5009                 delegateResolvedToken.token        = delegateMethodRef;
5010                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5011                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5012
5013                 CORINFO_CALL_INFO delegateCallInfo;
5014                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5015                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5016
5017                 BOOL isOpenDelegate = FALSE;
5018                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5019                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5020                                                                       &isOpenDelegate),
5021                                "function incompatible with delegate");
5022
5023                 // check the constraints on the target method
5024                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5025                                "delegate target has unsatisfied class constraints");
5026                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5027                                                                             tiActualFtn.GetMethod()),
5028                                "delegate target has unsatisfied method constraints");
5029
5030                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5031                 // for additional verification rules for delegates
5032                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5033                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5034                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5035                 {
5036
5037                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5038 #ifdef DEBUG
5039                         && StrictCheckForNonVirtualCallToVirtualMethod()
5040 #endif
5041                             )
5042                     {
5043                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5044                         {
5045                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5046                                                verIsBoxedValueType(tiActualObj),
5047                                            "The 'this' parameter to the call must be either the calling method's "
5048                                            "'this' parameter or "
5049                                            "a boxed value type.");
5050                         }
5051                     }
5052                 }
5053
5054                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5055                 {
5056                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5057
5058                     Verify(targetIsStatic || !isOpenDelegate,
5059                            "Unverifiable creation of an open instance delegate for a protected member.");
5060
5061                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5062                                                                 ? info.compClassHnd
5063                                                                 : tiActualObj.GetClassHandleForObjRef();
5064
5065                     // In the case of protected methods, it is a requirement that the 'this'
5066                     // pointer be a subclass of the current context.  Perform this check.
5067                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5068                            "Accessing protected method through wrong type.");
5069                 }
5070                 goto DONE_ARGS;
5071             }
5072         }
5073         // fall thru to default checks
5074         default:
5075             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5076     }
5077     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5078                    "can only newobj a delegate constructor");
5079
5080     // check compatibility of the arguments
5081     unsigned int argCount;
5082     argCount = sig->numArgs;
5083     CORINFO_ARG_LIST_HANDLE args;
5084     args = sig->args;
5085     while (argCount--)
5086     {
5087         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5088
5089         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5090         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5091
5092         args = info.compCompHnd->getArgNext(args);
5093     }
5094
5095 DONE_ARGS:
5096
5097     // update popCount
5098     popCount += sig->numArgs;
5099
5100     // check for 'this' which are is non-static methods, not called via NEWOBJ
5101     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5102     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5103     {
5104         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5105         popCount++;
5106
5107         // If it is null, we assume we can access it (since it will AV shortly)
5108         // If it is anything but a reference class, there is no hierarchy, so
5109         // again, we don't need the precise instance class to compute 'protected' access
5110         if (tiThis.IsType(TI_REF))
5111         {
5112             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5113         }
5114
5115         // Check type compatibility of the this argument
5116         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5117         if (tiDeclaredThis.IsValueClass())
5118         {
5119             tiDeclaredThis.MakeByRef();
5120         }
5121
5122         // If this is a call to the base class .ctor, set thisPtr Init for
5123         // this block.
5124         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5125         {
5126             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5127                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5128             {
5129                 assert(verCurrentState.thisInitialized !=
5130                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5131                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5132                                "Call to base class constructor when 'this' is possibly initialized");
5133                 // Otherwise, 'this' is now initialized.
5134                 verCurrentState.thisInitialized = TIS_Init;
5135                 tiThis.SetInitialisedObjRef();
5136             }
5137             else
5138             {
5139                 // We allow direct calls to value type constructors
5140                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5141                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5142                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5143                                "Bad call to a constructor");
5144             }
5145         }
5146
5147         if (pConstrainedResolvedToken != nullptr)
5148         {
5149             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5150
5151             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5152
5153             // We just dereference this and test for equality
5154             tiThis.DereferenceByRef();
5155             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5156                            "this type mismatch with constrained type operand");
5157
5158             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5159             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5160         }
5161
5162         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5163         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5164         {
5165             tiDeclaredThis.SetIsReadonlyByRef();
5166         }
5167
5168         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5169
5170         if (tiThis.IsByRef())
5171         {
5172             // Find the actual type where the method exists (as opposed to what is declared
5173             // in the metadata). This is to prevent passing a byref as the "this" argument
5174             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5175
5176             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5177             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5178                            "Call to base type of valuetype (which is never a valuetype)");
5179         }
5180
5181         // Rules for non-virtual call to a non-final virtual method:
5182
5183         // Define:
5184         // The "this" pointer is considered to be "possibly written" if
5185         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5186         //   (or)
5187         //   2. It has been stored to (STARG.0) anywhere in the method.
5188
5189         // A non-virtual call to a non-final virtual method is only allowed if
5190         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5191         //   (or)
5192         //   2. The this pointer passed to the callee is the current method's this pointer.
5193         //      (and) The current method's this pointer is not "possibly written".
5194
5195         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5196         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5197         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5198         // hard and more error prone.
5199
5200         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5201 #ifdef DEBUG
5202             && StrictCheckForNonVirtualCallToVirtualMethod()
5203 #endif
5204                 )
5205         {
5206             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5207             {
5208                 VerifyOrReturn(
5209                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5210                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5211                     "a boxed value type.");
5212             }
5213         }
5214     }
5215
5216     // check any constraints on the callee's class and type parameters
5217     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5218                    "method has unsatisfied class constraints");
5219     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5220                    "method has unsatisfied method constraints");
5221
5222     if (mflags & CORINFO_FLG_PROTECTED)
5223     {
5224         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5225                        "Can't access protected method");
5226     }
5227
5228     // Get the exact view of the signature for an array method
5229     if (sig->retType != CORINFO_TYPE_VOID)
5230     {
5231         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5232     }
5233
5234     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5235     // The methods supported by array types are under the control of the EE
5236     // so we can trust that only the Address operation returns a byref.
5237     if (readonlyCall)
5238     {
5239         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5240         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5241                        "unexpected use of readonly prefix");
5242     }
5243
5244     // Verify the tailcall
5245     if (tailCall)
5246     {
5247         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5248     }
5249 }
5250
5251 /*****************************************************************************
5252  *  Checks that a delegate creation is done using the following pattern:
5253  *     dup
5254  *     ldvirtftn targetMemberRef
5255  *  OR
5256  *     ldftn targetMemberRef
5257  *
5258  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5259  *  not in this basic block)
5260  *
5261  *  targetMemberRef is read from the code sequence.
5262  *  targetMemberRef is validated iff verificationNeeded.
5263  */
5264
5265 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5266                                         const BYTE*  codeAddr,
5267                                         mdMemberRef& targetMemberRef)
5268 {
5269     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5270     {
5271         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5272         return TRUE;
5273     }
5274     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5275     {
5276         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5277         return TRUE;
5278     }
5279
5280     return FALSE;
5281 }
5282
5283 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5284 {
5285     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5286     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5287     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5288     if (!tiCompatibleWith(value, normPtrVal, true))
5289     {
5290         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5291         compUnsafeCastUsed = true;
5292     }
5293     return ptrVal;
5294 }
5295
5296 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5297 {
5298     assert(!instrType.IsStruct());
5299
5300     typeInfo ptrVal;
5301     if (ptr.IsByRef())
5302     {
5303         ptrVal = DereferenceByRef(ptr);
5304         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5305         {
5306             Verify(false, "bad pointer");
5307             compUnsafeCastUsed = true;
5308         }
5309         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5310         {
5311             Verify(false, "pointer not consistent with instr");
5312             compUnsafeCastUsed = true;
5313         }
5314     }
5315     else
5316     {
5317         Verify(false, "pointer not byref");
5318         compUnsafeCastUsed = true;
5319     }
5320
5321     return ptrVal;
5322 }
5323
5324 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5325 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5326 // ld*flda or a st*fld.
5327 // 'enclosingClass' is given if we are accessing a field in some specific type.
5328
5329 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5330                               const CORINFO_FIELD_INFO& fieldInfo,
5331                               const typeInfo*           tiThis,
5332                               BOOL                      mutator,
5333                               BOOL                      allowPlainStructAsThis)
5334 {
5335     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5336     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5337     CORINFO_CLASS_HANDLE instanceClass =
5338         info.compClassHnd; // for statics, we imagine the instance is the current class.
5339
5340     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5341     if (mutator)
5342     {
5343         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5344         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5345         {
5346             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5347                        info.compIsStatic == isStaticField,
5348                    "bad use of initonly field (set or address taken)");
5349         }
5350     }
5351
5352     if (tiThis == nullptr)
5353     {
5354         Verify(isStaticField, "used static opcode with non-static field");
5355     }
5356     else
5357     {
5358         typeInfo tThis = *tiThis;
5359
5360         if (allowPlainStructAsThis && tThis.IsValueClass())
5361         {
5362             tThis.MakeByRef();
5363         }
5364
5365         // If it is null, we assume we can access it (since it will AV shortly)
5366         // If it is anything but a refernce class, there is no hierarchy, so
5367         // again, we don't need the precise instance class to compute 'protected' access
5368         if (tiThis->IsType(TI_REF))
5369         {
5370             instanceClass = tiThis->GetClassHandleForObjRef();
5371         }
5372
5373         // Note that even if the field is static, we require that the this pointer
5374         // satisfy the same constraints as a non-static field  This happens to
5375         // be simpler and seems reasonable
5376         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5377         if (tiDeclaredThis.IsValueClass())
5378         {
5379             tiDeclaredThis.MakeByRef();
5380
5381             // we allow read-only tThis, on any field access (even stores!), because if the
5382             // class implementor wants to prohibit stores he should make the field private.
5383             // we do this by setting the read-only bit on the type we compare tThis to.
5384             tiDeclaredThis.SetIsReadonlyByRef();
5385         }
5386         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5387         {
5388             // Any field access is legal on "uninitialized" this pointers.
5389             // The easiest way to implement this is to simply set the
5390             // initialized bit for the duration of the type check on the
5391             // field access only.  It does not change the state of the "this"
5392             // for the function as a whole. Note that the "tThis" is a copy
5393             // of the original "this" type (*tiThis) passed in.
5394             tThis.SetInitialisedObjRef();
5395         }
5396
5397         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5398     }
5399
5400     // Presently the JIT does not check that we don't store or take the address of init-only fields
5401     // since we cannot guarantee their immutability and it is not a security issue.
5402
5403     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5404     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5405                    "field has unsatisfied class constraints");
5406     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5407     {
5408         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5409                "Accessing protected method through wrong type.");
5410     }
5411 }
5412
5413 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5414 {
5415     if (tiOp1.IsNumberType())
5416     {
5417 #ifdef _TARGET_64BIT_
5418         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5419 #else  // _TARGET_64BIT
5420         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5421         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5422         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5423         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5424 #endif // !_TARGET_64BIT_
5425     }
5426     else if (tiOp1.IsObjRef())
5427     {
5428         switch (opcode)
5429         {
5430             case CEE_BEQ_S:
5431             case CEE_BEQ:
5432             case CEE_BNE_UN_S:
5433             case CEE_BNE_UN:
5434             case CEE_CEQ:
5435             case CEE_CGT_UN:
5436                 break;
5437             default:
5438                 Verify(FALSE, "Cond not allowed on object types");
5439         }
5440         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5441     }
5442     else if (tiOp1.IsByRef())
5443     {
5444         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5445     }
5446     else
5447     {
5448         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5449     }
5450 }
5451
5452 void Compiler::verVerifyThisPtrInitialised()
5453 {
5454     if (verTrackObjCtorInitState)
5455     {
5456         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5457     }
5458 }
5459
5460 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5461 {
5462     // Either target == context, in this case calling an alternate .ctor
5463     // Or target is the immediate parent of context
5464
5465     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5466 }
5467
5468 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5469                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5470                                       CORINFO_CALL_INFO*      pCallInfo)
5471 {
5472     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5473     {
5474         NO_WAY("Virtual call to a function added via EnC is not supported");
5475     }
5476
5477     // CoreRT generic virtual method
5478     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5479     {
5480         GenTree* runtimeMethodHandle = nullptr;
5481         if (pCallInfo->exactContextNeedsRuntimeLookup)
5482         {
5483             runtimeMethodHandle =
5484                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5485         }
5486         else
5487         {
5488             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5489         }
5490         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5491                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5492     }
5493
5494 #ifdef FEATURE_READYTORUN_COMPILER
5495     if (opts.IsReadyToRun())
5496     {
5497         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5498         {
5499             GenTreeCall* call =
5500                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5501
5502             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5503
5504             return call;
5505         }
5506
5507         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5508         if (IsTargetAbi(CORINFO_CORERT_ABI))
5509         {
5510             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5511
5512             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5513                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5514         }
5515     }
5516 #endif
5517
5518     // Get the exact descriptor for the static callsite
5519     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5520     if (exactTypeDesc == nullptr)
5521     { // compDonotInline()
5522         return nullptr;
5523     }
5524
5525     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5526     if (exactMethodDesc == nullptr)
5527     { // compDonotInline()
5528         return nullptr;
5529     }
5530
5531     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5532
5533     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5534
5535     helpArgs = gtNewListNode(thisPtr, helpArgs);
5536
5537     // Call helper function.  This gets the target address of the final destination callsite.
5538
5539     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5540 }
5541
5542 //------------------------------------------------------------------------
5543 // impImportAndPushBox: build and import a value-type box
5544 //
5545 // Arguments:
5546 //   pResolvedToken - resolved token from the box operation
5547 //
5548 // Return Value:
5549 //   None.
5550 //
5551 // Side Effects:
5552 //   The value to be boxed is popped from the stack, and a tree for
5553 //   the boxed value is pushed. This method may create upstream
5554 //   statements, spill side effecting trees, and create new temps.
5555 //
5556 //   If importing an inlinee, we may also discover the inline must
5557 //   fail. If so there is no new value pushed on the stack. Callers
5558 //   should use CompDoNotInline after calling this method to see if
5559 //   ongoing importation should be aborted.
5560 //
5561 // Notes:
5562 //   Boxing of ref classes results in the same value as the value on
5563 //   the top of the stack, so is handled inline in impImportBlockCode
5564 //   for the CEE_BOX case. Only value or primitive type boxes make it
5565 //   here.
5566 //
5567 //   Boxing for nullable types is done via a helper call; boxing
5568 //   of other value types is expanded inline or handled via helper
5569 //   call, depending on the jit's codegen mode.
5570 //
5571 //   When the jit is operating in size and time constrained modes,
5572 //   using a helper call here can save jit time and code size. But it
5573 //   also may inhibit cleanup optimizations that could have also had a
5574 //   even greater benefit effect on code size and jit time. An optimal
5575 //   strategy may need to peek ahead and see if it is easy to tell how
5576 //   the box is being used. For now, we defer.
5577
5578 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5579 {
5580     // Spill any special side effects
5581     impSpillSpecialSideEff();
5582
5583     // Get get the expression to box from the stack.
5584     GenTree*             op1       = nullptr;
5585     GenTree*             op2       = nullptr;
5586     StackEntry           se        = impPopStack();
5587     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5588     GenTree*             exprToBox = se.val;
5589
5590     // Look at what helper we should use.
5591     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5592
5593     // Determine what expansion to prefer.
5594     //
5595     // In size/time/debuggable constrained modes, the helper call
5596     // expansion for box is generally smaller and is preferred, unless
5597     // the value to box is a struct that comes from a call. In that
5598     // case the call can construct its return value directly into the
5599     // box payload, saving possibly some up-front zeroing.
5600     //
5601     // Currently primitive type boxes always get inline expanded. We may
5602     // want to do the same for small structs if they don't come from
5603     // calls and don't have GC pointers, since explicitly copying such
5604     // structs is cheap.
5605     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5606     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5607     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5608     bool expandInline    = canExpandInline && !optForSize;
5609
5610     if (expandInline)
5611     {
5612         JITDUMP(" inline allocate/copy sequence\n");
5613
5614         // we are doing 'normal' boxing.  This means that we can inline the box operation
5615         // Box(expr) gets morphed into
5616         // temp = new(clsHnd)
5617         // cpobj(temp+4, expr, clsHnd)
5618         // push temp
5619         // The code paths differ slightly below for structs and primitives because
5620         // "cpobj" differs in these cases.  In one case you get
5621         //    impAssignStructPtr(temp+4, expr, clsHnd)
5622         // and the other you get
5623         //    *(temp+4) = expr
5624
5625         if (opts.MinOpts() || opts.compDbgCode)
5626         {
5627             // For minopts/debug code, try and minimize the total number
5628             // of box temps by reusing an existing temp when possible.
5629             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5630             {
5631                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5632             }
5633         }
5634         else
5635         {
5636             // When optimizing, use a new temp for each box operation
5637             // since we then know the exact class of the box temp.
5638             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5639             lvaTable[impBoxTemp].lvType = TYP_REF;
5640             const bool isExact          = true;
5641             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5642         }
5643
5644         // needs to stay in use until this box expression is appended
5645         // some other node.  We approximate this by keeping it alive until
5646         // the opcode stack becomes empty
5647         impBoxTempInUse = true;
5648
5649 #ifdef FEATURE_READYTORUN_COMPILER
5650         bool usingReadyToRunHelper = false;
5651
5652         if (opts.IsReadyToRun())
5653         {
5654             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5655             usingReadyToRunHelper = (op1 != nullptr);
5656         }
5657
5658         if (!usingReadyToRunHelper)
5659 #endif
5660         {
5661             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5662             // and the newfast call with a single call to a dynamic R2R cell that will:
5663             //      1) Load the context
5664             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5665             //      3) Allocate and return the new object for boxing
5666             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5667
5668             // Ensure that the value class is restored
5669             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5670             if (op2 == nullptr)
5671             {
5672                 // We must be backing out of an inline.
5673                 assert(compDonotInline());
5674                 return;
5675             }
5676
5677             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5678                                     pResolvedToken->hClass, TYP_REF, op2);
5679         }
5680
5681         /* Remember that this basic block contains 'new' of an object, and so does this method */
5682         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5683         optMethodFlags |= OMF_HAS_NEWOBJ;
5684
5685         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5686
5687         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5688
5689         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5690         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5691         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5692
5693         if (varTypeIsStruct(exprToBox))
5694         {
5695             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5696             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5697         }
5698         else
5699         {
5700             var_types lclTyp = exprToBox->TypeGet();
5701             if (lclTyp == TYP_BYREF)
5702             {
5703                 lclTyp = TYP_I_IMPL;
5704             }
5705             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5706             if (impIsPrimitive(jitType))
5707             {
5708                 lclTyp = JITtype2varType(jitType);
5709             }
5710             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5711                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5712             var_types srcTyp = exprToBox->TypeGet();
5713             var_types dstTyp = lclTyp;
5714
5715             if (srcTyp != dstTyp)
5716             {
5717                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5718                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5719                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5720             }
5721             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5722         }
5723
5724         // Spill eval stack to flush out any pending side effects.
5725         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5726
5727         // Set up this copy as a second assignment.
5728         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5729
5730         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5731
5732         // Record that this is a "box" node and keep track of the matching parts.
5733         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5734
5735         // If it is a value class, mark the "box" node.  We can use this information
5736         // to optimise several cases:
5737         //    "box(x) == null" --> false
5738         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5739         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5740
5741         op1->gtFlags |= GTF_BOX_VALUE;
5742         assert(op1->IsBoxedValue());
5743         assert(asg->gtOper == GT_ASG);
5744     }
5745     else
5746     {
5747         // Don't optimize, just call the helper and be done with it.
5748         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5749         assert(operCls != nullptr);
5750
5751         // Ensure that the value class is restored
5752         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5753         if (op2 == nullptr)
5754         {
5755             // We must be backing out of an inline.
5756             assert(compDonotInline());
5757             return;
5758         }
5759
5760         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5761         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5762     }
5763
5764     /* Push the result back on the stack, */
5765     /* even if clsHnd is a value class we want the TI_REF */
5766     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5767     impPushOnStack(op1, tiRetVal);
5768 }
5769
5770 //------------------------------------------------------------------------
5771 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5772 //
5773 // Arguments:
5774 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5775 //                     by a call to CEEInfo::resolveToken().
5776 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5777 //                by a call to CEEInfo::getCallInfo().
5778 //
5779 // Assumptions:
5780 //    The multi-dimensional array constructor arguments (array dimensions) are
5781 //    pushed on the IL stack on entry to this method.
5782 //
5783 // Notes:
5784 //    Multi-dimensional array constructors are imported as calls to a JIT
5785 //    helper, not as regular calls.
5786
5787 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5788 {
5789     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5790     if (classHandle == nullptr)
5791     { // compDonotInline()
5792         return;
5793     }
5794
5795     assert(pCallInfo->sig.numArgs);
5796
5797     GenTree*        node;
5798     GenTreeArgList* args;
5799
5800     //
5801     // There are two different JIT helpers that can be used to allocate
5802     // multi-dimensional arrays:
5803     //
5804     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5805     //      This variant is deprecated. It should be eventually removed.
5806     //
5807     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5808     //      pointer to block of int32s. This variant is more portable.
5809     //
5810     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5811     // unconditionally would require ReadyToRun version bump.
5812     //
5813     CLANG_FORMAT_COMMENT_ANCHOR;
5814
5815     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5816     {
5817
5818         // Reuse the temp used to pass the array dimensions to avoid bloating
5819         // the stack frame in case there are multiple calls to multi-dim array
5820         // constructors within a single method.
5821         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5822         {
5823             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5824             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5825             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5826         }
5827
5828         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5829         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5830         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5831             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5832
5833         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5834         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5835         // to one allocation at a time.
5836         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5837
5838         //
5839         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5840         //  - Array class handle
5841         //  - Number of dimension arguments
5842         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5843         //
5844
5845         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5846         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5847
5848         // Pop dimension arguments from the stack one at a time and store it
5849         // into lvaNewObjArrayArgs temp.
5850         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5851         {
5852             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5853
5854             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5855             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5856             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5857                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5858             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5859
5860             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5861         }
5862
5863         args = gtNewArgList(node);
5864
5865         // pass number of arguments to the helper
5866         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5867
5868         args = gtNewListNode(classHandle, args);
5869
5870         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5871     }
5872     else
5873     {
5874         //
5875         // The varargs helper needs the type and method handles as last
5876         // and  last-1 param (this is a cdecl call, so args will be
5877         // pushed in reverse order on the CPU stack)
5878         //
5879
5880         args = gtNewArgList(classHandle);
5881
5882         // pass number of arguments to the helper
5883         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5884
5885         unsigned argFlags = 0;
5886         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5887
5888         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5889
5890         // varargs, so we pop the arguments
5891         node->gtFlags |= GTF_CALL_POP_ARGS;
5892
5893 #ifdef DEBUG
5894         // At the present time we don't track Caller pop arguments
5895         // that have GC references in them
5896         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5897         {
5898             assert(temp->Current()->gtType != TYP_REF);
5899         }
5900 #endif
5901     }
5902
5903     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5904     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5905
5906     // Remember that this basic block contains 'new' of a md array
5907     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5908
5909     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5910 }
5911
5912 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
5913                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5914                                     CORINFO_THIS_TRANSFORM  transform)
5915 {
5916     switch (transform)
5917     {
5918         case CORINFO_DEREF_THIS:
5919         {
5920             GenTree* obj = thisPtr;
5921
5922             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5923             impBashVarAddrsToI(obj);
5924             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5925             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5926
5927             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5928             // ldind could point anywhere, example a boxed class static int
5929             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5930
5931             return obj;
5932         }
5933
5934         case CORINFO_BOX_THIS:
5935         {
5936             // Constraint calls where there might be no
5937             // unboxed entry point require us to implement the call via helper.
5938             // These only occur when a possible target of the call
5939             // may have inherited an implementation of an interface
5940             // method from System.Object or System.ValueType.  The EE does not provide us with
5941             // "unboxed" versions of these methods.
5942
5943             GenTree* obj = thisPtr;
5944
5945             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5946             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5947             obj->gtFlags |= GTF_EXCEPT;
5948
5949             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5950             var_types   objType = JITtype2varType(jitTyp);
5951             if (impIsPrimitive(jitTyp))
5952             {
5953                 if (obj->OperIsBlk())
5954                 {
5955                     obj->ChangeOperUnchecked(GT_IND);
5956
5957                     // Obj could point anywhere, example a boxed class static int
5958                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5959                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5960                 }
5961
5962                 obj->gtType = JITtype2varType(jitTyp);
5963                 assert(varTypeIsArithmetic(obj->gtType));
5964             }
5965
5966             // This pushes on the dereferenced byref
5967             // This is then used immediately to box.
5968             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5969
5970             // This pops off the byref-to-a-value-type remaining on the stack and
5971             // replaces it with a boxed object.
5972             // This is then used as the object to the virtual call immediately below.
5973             impImportAndPushBox(pConstrainedResolvedToken);
5974             if (compDonotInline())
5975             {
5976                 return nullptr;
5977             }
5978
5979             obj = impPopStack().val;
5980             return obj;
5981         }
5982         case CORINFO_NO_THIS_TRANSFORM:
5983         default:
5984             return thisPtr;
5985     }
5986 }
5987
5988 //------------------------------------------------------------------------
5989 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5990 //
5991 // Return Value:
5992 //    true if PInvoke inlining should be enabled in current method, false otherwise
5993 //
5994 // Notes:
5995 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5996
5997 bool Compiler::impCanPInvokeInline()
5998 {
5999     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6000            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6001         ;
6002 }
6003
6004 //------------------------------------------------------------------------
6005 // impCanPInvokeInlineCallSite: basic legality checks using information
6006 // from a call to see if the call qualifies as an inline pinvoke.
6007 //
6008 // Arguments:
6009 //    block      - block contaning the call, or for inlinees, block
6010 //                 containing the call being inlined
6011 //
6012 // Return Value:
6013 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6014 //
6015 // Notes:
6016 //    For runtimes that support exception handling interop there are
6017 //    restrictions on using inline pinvoke in handler regions.
6018 //
6019 //    * We have to disable pinvoke inlining inside of filters because
6020 //    in case the main execution (i.e. in the try block) is inside
6021 //    unmanaged code, we cannot reuse the inlined stub (we still need
6022 //    the original state until we are in the catch handler)
6023 //
6024 //    * We disable pinvoke inlining inside handlers since the GSCookie
6025 //    is in the inlined Frame (see
6026 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6027 //    this would not protect framelets/return-address of handlers.
6028 //
6029 //    These restrictions are currently also in place for CoreCLR but
6030 //    can be relaxed when coreclr/#8459 is addressed.
6031
6032 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6033 {
6034     if (block->hasHndIndex())
6035     {
6036         return false;
6037     }
6038
6039     // The remaining limitations do not apply to CoreRT
6040     if (IsTargetAbi(CORINFO_CORERT_ABI))
6041     {
6042         return true;
6043     }
6044
6045 #ifdef _TARGET_AMD64_
6046     // On x64, we disable pinvoke inlining inside of try regions.
6047     // Here is the comment from JIT64 explaining why:
6048     //
6049     //   [VSWhidbey: 611015] - because the jitted code links in the
6050     //   Frame (instead of the stub) we rely on the Frame not being
6051     //   'active' until inside the stub.  This normally happens by the
6052     //   stub setting the return address pointer in the Frame object
6053     //   inside the stub.  On a normal return, the return address
6054     //   pointer is zeroed out so the Frame can be safely re-used, but
6055     //   if an exception occurs, nobody zeros out the return address
6056     //   pointer.  Thus if we re-used the Frame object, it would go
6057     //   'active' as soon as we link it into the Frame chain.
6058     //
6059     //   Technically we only need to disable PInvoke inlining if we're
6060     //   in a handler or if we're in a try body with a catch or
6061     //   filter/except where other non-handler code in this method
6062     //   might run and try to re-use the dirty Frame object.
6063     //
6064     //   A desktop test case where this seems to matter is
6065     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6066     if (block->hasTryIndex())
6067     {
6068         return false;
6069     }
6070 #endif // _TARGET_AMD64_
6071
6072     return true;
6073 }
6074
6075 //------------------------------------------------------------------------
6076 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6077 // if it can be expressed as an inline pinvoke.
6078 //
6079 // Arguments:
6080 //    call       - tree for the call
6081 //    methHnd    - handle for the method being called (may be null)
6082 //    sig        - signature of the method being called
6083 //    mflags     - method flags for the method being called
6084 //    block      - block contaning the call, or for inlinees, block
6085 //                 containing the call being inlined
6086 //
6087 // Notes:
6088 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6089 //
6090 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6091 //   call passes a combination of legality and profitabilty checks.
6092 //
6093 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6094
6095 void Compiler::impCheckForPInvokeCall(
6096     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6097 {
6098     CorInfoUnmanagedCallConv unmanagedCallConv;
6099
6100     // If VM flagged it as Pinvoke, flag the call node accordingly
6101     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6102     {
6103         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6104     }
6105
6106     if (methHnd)
6107     {
6108         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6109         {
6110             return;
6111         }
6112
6113         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6114     }
6115     else
6116     {
6117         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6118         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6119         {
6120             // Used by the IL Stubs.
6121             callConv = CORINFO_CALLCONV_C;
6122         }
6123         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6124         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6125         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6126         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6127
6128         assert(!call->gtCallCookie);
6129     }
6130
6131     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6132         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6133     {
6134         return;
6135     }
6136     optNativeCallCount++;
6137
6138     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6139     {
6140         // PInvoke CALLI in IL stubs must be inlined
6141     }
6142     else
6143     {
6144         // Check legality
6145         if (!impCanPInvokeInlineCallSite(block))
6146         {
6147             return;
6148         }
6149
6150         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6151         // profitability checks
6152         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6153         {
6154             if (!impCanPInvokeInline())
6155             {
6156                 return;
6157             }
6158
6159             // Size-speed tradeoff: don't use inline pinvoke at rarely
6160             // executed call sites.  The non-inline version is more
6161             // compact.
6162             if (block->isRunRarely())
6163             {
6164                 return;
6165             }
6166         }
6167
6168         // The expensive check should be last
6169         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6170         {
6171             return;
6172         }
6173     }
6174
6175     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6176
6177     call->gtFlags |= GTF_CALL_UNMANAGED;
6178     info.compCallUnmanaged++;
6179
6180     // AMD64 convention is same for native and managed
6181     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6182     {
6183         call->gtFlags |= GTF_CALL_POP_ARGS;
6184     }
6185
6186     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6187     {
6188         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6189     }
6190 }
6191
6192 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6193 {
6194     var_types callRetTyp = JITtype2varType(sig->retType);
6195
6196     /* The function pointer is on top of the stack - It may be a
6197      * complex expression. As it is evaluated after the args,
6198      * it may cause registered args to be spilled. Simply spill it.
6199      */
6200
6201     // Ignore this trivial case.
6202     if (impStackTop().val->gtOper != GT_LCL_VAR)
6203     {
6204         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6205                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6206     }
6207
6208     /* Get the function pointer */
6209
6210     GenTree* fptr = impPopStack().val;
6211
6212     // The function pointer is typically a sized to match the target pointer size
6213     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6214     // See ILCodeStream::LowerOpcode
6215     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6216
6217 #ifdef DEBUG
6218     // This temporary must never be converted to a double in stress mode,
6219     // because that can introduce a call to the cast helper after the
6220     // arguments have already been evaluated.
6221
6222     if (fptr->OperGet() == GT_LCL_VAR)
6223     {
6224         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6225     }
6226 #endif
6227
6228     /* Create the call node */
6229
6230     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6231
6232     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6233
6234     return call;
6235 }
6236
6237 /*****************************************************************************/
6238
6239 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6240 {
6241     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6242
6243     /* Since we push the arguments in reverse order (i.e. right -> left)
6244      * spill any side effects from the stack
6245      *
6246      * OBS: If there is only one side effect we do not need to spill it
6247      *      thus we have to spill all side-effects except last one
6248      */
6249
6250     unsigned lastLevelWithSideEffects = UINT_MAX;
6251
6252     unsigned argsToReverse = sig->numArgs;
6253
6254     // For "thiscall", the first argument goes in a register. Since its
6255     // order does not need to be changed, we do not need to spill it
6256
6257     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6258     {
6259         assert(argsToReverse);
6260         argsToReverse--;
6261     }
6262
6263 #ifndef _TARGET_X86_
6264     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6265     argsToReverse = 0;
6266 #endif
6267
6268     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6269     {
6270         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6271         {
6272             assert(lastLevelWithSideEffects == UINT_MAX);
6273
6274             impSpillStackEntry(level,
6275                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6276         }
6277         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6278         {
6279             if (lastLevelWithSideEffects != UINT_MAX)
6280             {
6281                 /* We had a previous side effect - must spill it */
6282                 impSpillStackEntry(lastLevelWithSideEffects,
6283                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6284
6285                 /* Record the level for the current side effect in case we will spill it */
6286                 lastLevelWithSideEffects = level;
6287             }
6288             else
6289             {
6290                 /* This is the first side effect encountered - record its level */
6291
6292                 lastLevelWithSideEffects = level;
6293             }
6294         }
6295     }
6296
6297     /* The argument list is now "clean" - no out-of-order side effects
6298      * Pop the argument list in reverse order */
6299
6300     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6301
6302     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6303     {
6304         GenTree* thisPtr = args->Current();
6305         impBashVarAddrsToI(thisPtr);
6306         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6307     }
6308
6309     if (args)
6310     {
6311         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6312     }
6313 }
6314
6315 //------------------------------------------------------------------------
6316 // impInitClass: Build a node to initialize the class before accessing the
6317 //               field if necessary
6318 //
6319 // Arguments:
6320 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6321 //                     by a call to CEEInfo::resolveToken().
6322 //
6323 // Return Value: If needed, a pointer to the node that will perform the class
6324 //               initializtion.  Otherwise, nullptr.
6325 //
6326
6327 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6328 {
6329     CorInfoInitClassResult initClassResult =
6330         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6331
6332     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6333     {
6334         return nullptr;
6335     }
6336     BOOL runtimeLookup;
6337
6338     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6339
6340     if (node == nullptr)
6341     {
6342         assert(compDonotInline());
6343         return nullptr;
6344     }
6345
6346     if (runtimeLookup)
6347     {
6348         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6349     }
6350     else
6351     {
6352         // Call the shared non gc static helper, as its the fastest
6353         node = fgGetSharedCCtor(pResolvedToken->hClass);
6354     }
6355
6356     return node;
6357 }
6358
6359 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6360 {
6361     GenTree* op1 = nullptr;
6362
6363     switch (lclTyp)
6364     {
6365         int     ival;
6366         __int64 lval;
6367         double  dval;
6368
6369         case TYP_BOOL:
6370             ival = *((bool*)fldAddr);
6371             goto IVAL_COMMON;
6372
6373         case TYP_BYTE:
6374             ival = *((signed char*)fldAddr);
6375             goto IVAL_COMMON;
6376
6377         case TYP_UBYTE:
6378             ival = *((unsigned char*)fldAddr);
6379             goto IVAL_COMMON;
6380
6381         case TYP_SHORT:
6382             ival = *((short*)fldAddr);
6383             goto IVAL_COMMON;
6384
6385         case TYP_USHORT:
6386             ival = *((unsigned short*)fldAddr);
6387             goto IVAL_COMMON;
6388
6389         case TYP_UINT:
6390         case TYP_INT:
6391             ival = *((int*)fldAddr);
6392         IVAL_COMMON:
6393             op1 = gtNewIconNode(ival);
6394             break;
6395
6396         case TYP_LONG:
6397         case TYP_ULONG:
6398             lval = *((__int64*)fldAddr);
6399             op1  = gtNewLconNode(lval);
6400             break;
6401
6402         case TYP_FLOAT:
6403             dval = *((float*)fldAddr);
6404             op1  = gtNewDconNode(dval);
6405 #if !FEATURE_X87_DOUBLES
6406             // X87 stack doesn't differentiate between float/double
6407             // so R4 is treated as R8, but everybody else does
6408             op1->gtType = TYP_FLOAT;
6409 #endif // FEATURE_X87_DOUBLES
6410             break;
6411
6412         case TYP_DOUBLE:
6413             dval = *((double*)fldAddr);
6414             op1  = gtNewDconNode(dval);
6415             break;
6416
6417         default:
6418             assert(!"Unexpected lclTyp");
6419             break;
6420     }
6421
6422     return op1;
6423 }
6424
6425 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6426                                               CORINFO_ACCESS_FLAGS    access,
6427                                               CORINFO_FIELD_INFO*     pFieldInfo,
6428                                               var_types               lclTyp)
6429 {
6430     GenTree* op1;
6431
6432     switch (pFieldInfo->fieldAccessor)
6433     {
6434         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6435         {
6436             assert(!compIsForInlining());
6437
6438             // We first call a special helper to get the statics base pointer
6439             op1 = impParentClassTokenToHandle(pResolvedToken);
6440
6441             // compIsForInlining() is false so we should not neve get NULL here
6442             assert(op1 != nullptr);
6443
6444             var_types type = TYP_BYREF;
6445
6446             switch (pFieldInfo->helper)
6447             {
6448                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6449                     type = TYP_I_IMPL;
6450                     break;
6451                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6452                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6453                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6454                     break;
6455                 default:
6456                     assert(!"unknown generic statics helper");
6457                     break;
6458             }
6459
6460             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6461
6462             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6463             op1              = gtNewOperNode(GT_ADD, type, op1,
6464                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6465         }
6466         break;
6467
6468         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6469         {
6470 #ifdef FEATURE_READYTORUN_COMPILER
6471             if (opts.IsReadyToRun())
6472             {
6473                 unsigned callFlags = 0;
6474
6475                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6476                 {
6477                     callFlags |= GTF_CALL_HOISTABLE;
6478                 }
6479
6480                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6481                 op1->gtFlags |= callFlags;
6482
6483                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6484             }
6485             else
6486 #endif
6487             {
6488                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6489             }
6490
6491             {
6492                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6493                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6494                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6495             }
6496             break;
6497         }
6498
6499         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6500         {
6501 #ifdef FEATURE_READYTORUN_COMPILER
6502             noway_assert(opts.IsReadyToRun());
6503             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6504             assert(kind.needsRuntimeLookup);
6505
6506             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6507             GenTreeArgList* args    = gtNewArgList(ctxTree);
6508
6509             unsigned callFlags = 0;
6510
6511             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6512             {
6513                 callFlags |= GTF_CALL_HOISTABLE;
6514             }
6515             var_types type = TYP_BYREF;
6516             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6517             op1->gtFlags |= callFlags;
6518
6519             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6520             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6521             op1              = gtNewOperNode(GT_ADD, type, op1,
6522                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6523 #else
6524             unreached();
6525 #endif // FEATURE_READYTORUN_COMPILER
6526         }
6527         break;
6528
6529         default:
6530         {
6531             if (!(access & CORINFO_ACCESS_ADDRESS))
6532             {
6533                 // In future, it may be better to just create the right tree here instead of folding it later.
6534                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6535
6536                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6537                 {
6538                     op1->gtFlags |= GTF_FLD_INITCLASS;
6539                 }
6540
6541                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6542                 {
6543                     op1->gtType = TYP_REF; // points at boxed object
6544                     FieldSeqNode* firstElemFldSeq =
6545                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6546                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6547                                         new (this, GT_CNS_INT)
6548                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6549
6550                     if (varTypeIsStruct(lclTyp))
6551                     {
6552                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6553                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6554                     }
6555                     else
6556                     {
6557                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6558                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6559                     }
6560                 }
6561
6562                 return op1;
6563             }
6564             else
6565             {
6566                 void** pFldAddr = nullptr;
6567                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6568
6569                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6570
6571                 /* Create the data member node */
6572                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6573                                           fldSeq);
6574
6575                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6576                 {
6577                     op1->gtFlags |= GTF_ICON_INITCLASS;
6578                 }
6579
6580                 if (pFldAddr != nullptr)
6581                 {
6582                     // There are two cases here, either the static is RVA based,
6583                     // in which case the type of the FIELD node is not a GC type
6584                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6585                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6586                     // because handles to statics now go into the large object heap
6587
6588                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6589                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6590                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6591                 }
6592             }
6593             break;
6594         }
6595     }
6596
6597     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6598     {
6599         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6600
6601         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6602
6603         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6604                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6605     }
6606
6607     if (!(access & CORINFO_ACCESS_ADDRESS))
6608     {
6609         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6610         op1->gtFlags |= GTF_GLOB_REF;
6611     }
6612
6613     return op1;
6614 }
6615
6616 // In general try to call this before most of the verification work.  Most people expect the access
6617 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6618 // out if you can't access something we also think that you're unverifiable for other reasons.
6619 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6620 {
6621     if (result != CORINFO_ACCESS_ALLOWED)
6622     {
6623         impHandleAccessAllowedInternal(result, helperCall);
6624     }
6625 }
6626
6627 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6628 {
6629     switch (result)
6630     {
6631         case CORINFO_ACCESS_ALLOWED:
6632             break;
6633         case CORINFO_ACCESS_ILLEGAL:
6634             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6635             // method is verifiable.  Otherwise, delay the exception to runtime.
6636             if (compIsForImportOnly())
6637             {
6638                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6639             }
6640             else
6641             {
6642                 impInsertHelperCall(helperCall);
6643             }
6644             break;
6645         case CORINFO_ACCESS_RUNTIME_CHECK:
6646             impInsertHelperCall(helperCall);
6647             break;
6648     }
6649 }
6650
6651 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6652 {
6653     // Construct the argument list
6654     GenTreeArgList* args = nullptr;
6655     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6656     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6657     {
6658         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6659         GenTree*                  currentArg = nullptr;
6660         switch (helperArg.argType)
6661         {
6662             case CORINFO_HELPER_ARG_TYPE_Field:
6663                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6664                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6665                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6666                 break;
6667             case CORINFO_HELPER_ARG_TYPE_Method:
6668                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6669                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6670                 break;
6671             case CORINFO_HELPER_ARG_TYPE_Class:
6672                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6673                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6674                 break;
6675             case CORINFO_HELPER_ARG_TYPE_Module:
6676                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6677                 break;
6678             case CORINFO_HELPER_ARG_TYPE_Const:
6679                 currentArg = gtNewIconNode(helperArg.constant);
6680                 break;
6681             default:
6682                 NO_WAY("Illegal helper arg type");
6683         }
6684         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6685     }
6686
6687     /* TODO-Review:
6688      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6689      * Also, consider sticking this in the first basic block.
6690      */
6691     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6692     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6693 }
6694
6695 // Checks whether the return types of caller and callee are compatible
6696 // so that callee can be tail called. Note that here we don't check
6697 // compatibility in IL Verifier sense, but on the lines of return type
6698 // sizes are equal and get returned in the same return register.
6699 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6700                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6701                                             var_types            calleeRetType,
6702                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6703 {
6704     // Note that we can not relax this condition with genActualType() as the
6705     // calling convention dictates that the caller of a function with a small
6706     // typed return value is responsible for normalizing the return val.
6707     if (callerRetType == calleeRetType)
6708     {
6709         return true;
6710     }
6711
6712     // If the class handles are the same and not null, the return types are compatible.
6713     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6714     {
6715         return true;
6716     }
6717
6718 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6719     // Jit64 compat:
6720     if (callerRetType == TYP_VOID)
6721     {
6722         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6723         //     tail.call
6724         //     pop
6725         //     ret
6726         //
6727         // Note that the above IL pattern is not valid as per IL verification rules.
6728         // Therefore, only full trust code can take advantage of this pattern.
6729         return true;
6730     }
6731
6732     // These checks return true if the return value type sizes are the same and
6733     // get returned in the same return register i.e. caller doesn't need to normalize
6734     // return value. Some of the tail calls permitted by below checks would have
6735     // been rejected by IL Verifier before we reached here.  Therefore, only full
6736     // trust code can make those tail calls.
6737     unsigned callerRetTypeSize = 0;
6738     unsigned calleeRetTypeSize = 0;
6739     bool     isCallerRetTypMBEnreg =
6740         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6741     bool isCalleeRetTypMBEnreg =
6742         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6743
6744     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6745     {
6746         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6747     }
6748 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6749
6750     return false;
6751 }
6752
6753 // For prefixFlags
6754 enum
6755 {
6756     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6757     PREFIX_TAILCALL_IMPLICIT =
6758         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6759     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6760     PREFIX_VOLATILE    = 0x00000100,
6761     PREFIX_UNALIGNED   = 0x00001000,
6762     PREFIX_CONSTRAINED = 0x00010000,
6763     PREFIX_READONLY    = 0x00100000
6764 };
6765
6766 /********************************************************************************
6767  *
6768  * Returns true if the current opcode and and the opcodes following it correspond
6769  * to a supported tail call IL pattern.
6770  *
6771  */
6772 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6773                                       OPCODE      curOpcode,
6774                                       const BYTE* codeAddrOfNextOpcode,
6775                                       const BYTE* codeEnd,
6776                                       bool        isRecursive,
6777                                       bool*       isCallPopAndRet /* = nullptr */)
6778 {
6779     // Bail out if the current opcode is not a call.
6780     if (!impOpcodeIsCallOpcode(curOpcode))
6781     {
6782         return false;
6783     }
6784
6785 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6786     // If shared ret tail opt is not enabled, we will enable
6787     // it for recursive methods.
6788     if (isRecursive)
6789 #endif
6790     {
6791         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6792         // sequence. Make sure we don't go past the end of the IL however.
6793         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6794     }
6795
6796     // Bail out if there is no next opcode after call
6797     if (codeAddrOfNextOpcode >= codeEnd)
6798     {
6799         return false;
6800     }
6801
6802     // Scan the opcodes to look for the following IL patterns if either
6803     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6804     //  ii) if tail prefixed, IL verification is not needed for the method.
6805     //
6806     // Only in the above two cases we can allow the below tail call patterns
6807     // violating ECMA spec.
6808     //
6809     // Pattern1:
6810     //       call
6811     //       nop*
6812     //       ret
6813     //
6814     // Pattern2:
6815     //       call
6816     //       nop*
6817     //       pop
6818     //       nop*
6819     //       ret
6820     int    cntPop = 0;
6821     OPCODE nextOpcode;
6822
6823 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6824     do
6825     {
6826         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6827         codeAddrOfNextOpcode += sizeof(__int8);
6828     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6829              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6830              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6831                                                                                          // one pop seen so far.
6832 #else
6833     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6834 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6835
6836     if (isCallPopAndRet)
6837     {
6838         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6839         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6840     }
6841
6842 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6843     // Jit64 Compat:
6844     // Tail call IL pattern could be either of the following
6845     // 1) call/callvirt/calli + ret
6846     // 2) call/callvirt/calli + pop + ret in a method returning void.
6847     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6848 #else
6849     return (nextOpcode == CEE_RET) && (cntPop == 0);
6850 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6851 }
6852
6853 /*****************************************************************************
6854  *
6855  * Determine whether the call could be converted to an implicit tail call
6856  *
6857  */
6858 bool Compiler::impIsImplicitTailCallCandidate(
6859     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6860 {
6861
6862 #if FEATURE_TAILCALL_OPT
6863     if (!opts.compTailCallOpt)
6864     {
6865         return false;
6866     }
6867
6868     if (opts.compDbgCode || opts.MinOpts())
6869     {
6870         return false;
6871     }
6872
6873     // must not be tail prefixed
6874     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6875     {
6876         return false;
6877     }
6878
6879 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6880     // the block containing call is marked as BBJ_RETURN
6881     // We allow shared ret tail call optimization on recursive calls even under
6882     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6883     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6884         return false;
6885 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6886
6887     // must be call+ret or call+pop+ret
6888     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6889     {
6890         return false;
6891     }
6892
6893     return true;
6894 #else
6895     return false;
6896 #endif // FEATURE_TAILCALL_OPT
6897 }
6898
6899 //------------------------------------------------------------------------
6900 // impImportCall: import a call-inspiring opcode
6901 //
6902 // Arguments:
6903 //    opcode                    - opcode that inspires the call
6904 //    pResolvedToken            - resolved token for the call target
6905 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6906 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6907 //    prefixFlags               - IL prefix flags for the call
6908 //    callInfo                  - EE supplied info for the call
6909 //    rawILOffset               - IL offset of the opcode
6910 //
6911 // Returns:
6912 //    Type of the call's return value.
6913 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6914 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6915 //
6916 //
6917 // Notes:
6918 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6919 //
6920 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6921 //    uninitalized object.
6922
6923 #ifdef _PREFAST_
6924 #pragma warning(push)
6925 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6926 #endif
6927
6928 var_types Compiler::impImportCall(OPCODE                  opcode,
6929                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6930                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6931                                   GenTree*                newobjThis,
6932                                   int                     prefixFlags,
6933                                   CORINFO_CALL_INFO*      callInfo,
6934                                   IL_OFFSET               rawILOffset)
6935 {
6936     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6937
6938     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6939     var_types              callRetTyp                     = TYP_COUNT;
6940     CORINFO_SIG_INFO*      sig                            = nullptr;
6941     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6942     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6943     unsigned               clsFlags                       = 0;
6944     unsigned               mflags                         = 0;
6945     unsigned               argFlags                       = 0;
6946     GenTree*               call                           = nullptr;
6947     GenTreeArgList*        args                           = nullptr;
6948     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6949     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6950     bool                   exactContextNeedsRuntimeLookup = false;
6951     bool                   canTailCall                    = true;
6952     const char*            szCanTailCallFailReason        = nullptr;
6953     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6954     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6955
6956     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6957
6958     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6959     // do that before tailcalls, but that is probably not the intended
6960     // semantic. So just disallow tailcalls from synchronized methods.
6961     // Also, popping arguments in a varargs function is more work and NYI
6962     // If we have a security object, we have to keep our frame around for callers
6963     // to see any imperative security.
6964     if (info.compFlags & CORINFO_FLG_SYNCH)
6965     {
6966         canTailCall             = false;
6967         szCanTailCallFailReason = "Caller is synchronized";
6968     }
6969 #if !FEATURE_FIXED_OUT_ARGS
6970     else if (info.compIsVarArgs)
6971     {
6972         canTailCall             = false;
6973         szCanTailCallFailReason = "Caller is varargs";
6974     }
6975 #endif // FEATURE_FIXED_OUT_ARGS
6976     else if (opts.compNeedSecurityCheck)
6977     {
6978         canTailCall             = false;
6979         szCanTailCallFailReason = "Caller requires a security check.";
6980     }
6981
6982     // We only need to cast the return value of pinvoke inlined calls that return small types
6983
6984     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6985     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6986     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6987     // the time being that the callee might be compiled by the other JIT and thus the return
6988     // value will need to be widened by us (or not widened at all...)
6989
6990     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6991
6992     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6993     bool bIntrinsicImported = false;
6994
6995     CORINFO_SIG_INFO calliSig;
6996     GenTreeArgList*  extraArg = nullptr;
6997
6998     /*-------------------------------------------------------------------------
6999      * First create the call node
7000      */
7001
7002     if (opcode == CEE_CALLI)
7003     {
7004         /* Get the call site sig */
7005         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7006
7007         callRetTyp = JITtype2varType(calliSig.retType);
7008
7009         call = impImportIndirectCall(&calliSig, ilOffset);
7010
7011         // We don't know the target method, so we have to infer the flags, or
7012         // assume the worst-case.
7013         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7014
7015 #ifdef DEBUG
7016         if (verbose)
7017         {
7018             unsigned structSize =
7019                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7020             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7021                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7022         }
7023 #endif
7024         // This should be checked in impImportBlockCode.
7025         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7026
7027         sig = &calliSig;
7028
7029 #ifdef DEBUG
7030         // We cannot lazily obtain the signature of a CALLI call because it has no method
7031         // handle that we can use, so we need to save its full call signature here.
7032         assert(call->gtCall.callSig == nullptr);
7033         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7034         *call->gtCall.callSig = calliSig;
7035 #endif // DEBUG
7036
7037         if (IsTargetAbi(CORINFO_CORERT_ABI))
7038         {
7039             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7040                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7041                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7042                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7043             if (managedCall)
7044             {
7045                 addFatPointerCandidate(call->AsCall());
7046             }
7047         }
7048     }
7049     else // (opcode != CEE_CALLI)
7050     {
7051         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7052
7053         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7054         // supply the instantiation parameters necessary to make direct calls to underlying
7055         // shared generic code, rather than calling through instantiating stubs.  If the
7056         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7057         // must indeed pass an instantiation parameter.
7058
7059         methHnd = callInfo->hMethod;
7060
7061         sig        = &(callInfo->sig);
7062         callRetTyp = JITtype2varType(sig->retType);
7063
7064         mflags = callInfo->methodFlags;
7065
7066 #ifdef DEBUG
7067         if (verbose)
7068         {
7069             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7070             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7071                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7072         }
7073 #endif
7074         if (compIsForInlining())
7075         {
7076             /* Does this call site have security boundary restrictions? */
7077
7078             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7079             {
7080                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7081                 return TYP_UNDEF;
7082             }
7083
7084             /* Does the inlinee need a security check token on the frame */
7085
7086             if (mflags & CORINFO_FLG_SECURITYCHECK)
7087             {
7088                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7089                 return TYP_UNDEF;
7090             }
7091
7092             /* Does the inlinee use StackCrawlMark */
7093
7094             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7095             {
7096                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7097                 return TYP_UNDEF;
7098             }
7099
7100             /* For now ignore delegate invoke */
7101
7102             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7103             {
7104                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7105                 return TYP_UNDEF;
7106             }
7107
7108             /* For now ignore varargs */
7109             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7110             {
7111                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7112                 return TYP_UNDEF;
7113             }
7114
7115             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7116             {
7117                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7118                 return TYP_UNDEF;
7119             }
7120
7121             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7122             {
7123                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7124                 return TYP_UNDEF;
7125             }
7126         }
7127
7128         clsHnd = pResolvedToken->hClass;
7129
7130         clsFlags = callInfo->classFlags;
7131
7132 #ifdef DEBUG
7133         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7134
7135         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7136         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7137         const char* modName;
7138         const char* className;
7139         const char* methodName;
7140         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7141             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7142             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7143         {
7144             return impImportJitTestLabelMark(sig->numArgs);
7145         }
7146 #endif // DEBUG
7147
7148         // <NICE> Factor this into getCallInfo </NICE>
7149         bool isSpecialIntrinsic = false;
7150         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7151         {
7152             const bool isTail = canTailCall && (tailCall != 0);
7153
7154             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7155                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7156
7157             if (compDonotInline())
7158             {
7159                 return TYP_UNDEF;
7160             }
7161
7162             if (call != nullptr)
7163             {
7164                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7165                        (clsFlags & CORINFO_FLG_FINAL));
7166
7167 #ifdef FEATURE_READYTORUN_COMPILER
7168                 if (call->OperGet() == GT_INTRINSIC)
7169                 {
7170                     if (opts.IsReadyToRun())
7171                     {
7172                         noway_assert(callInfo->kind == CORINFO_CALL);
7173                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7174                     }
7175                     else
7176                     {
7177                         call->gtIntrinsic.gtEntryPoint.addr       = nullptr;
7178                         call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7179                     }
7180                 }
7181 #endif
7182
7183                 bIntrinsicImported = true;
7184                 goto DONE_CALL;
7185             }
7186         }
7187
7188 #ifdef FEATURE_SIMD
7189         if (featureSIMD)
7190         {
7191             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7192             if (call != nullptr)
7193             {
7194                 bIntrinsicImported = true;
7195                 goto DONE_CALL;
7196             }
7197         }
7198 #endif // FEATURE_SIMD
7199
7200         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7201         {
7202             NO_WAY("Virtual call to a function added via EnC is not supported");
7203         }
7204
7205         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7206             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7207             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7208         {
7209             BADCODE("Bad calling convention");
7210         }
7211
7212         //-------------------------------------------------------------------------
7213         //  Construct the call node
7214         //
7215         // Work out what sort of call we're making.
7216         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7217
7218         constraintCallThisTransform    = callInfo->thisTransform;
7219         exactContextHnd                = callInfo->contextHandle;
7220         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7221
7222         // Recursive call is treated as a loop to the begining of the method.
7223         if (gtIsRecursiveCall(methHnd))
7224         {
7225 #ifdef DEBUG
7226             if (verbose)
7227             {
7228                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7229                         fgFirstBB->bbNum, compCurBB->bbNum);
7230             }
7231 #endif
7232             fgMarkBackwardJump(fgFirstBB, compCurBB);
7233         }
7234
7235         switch (callInfo->kind)
7236         {
7237
7238             case CORINFO_VIRTUALCALL_STUB:
7239             {
7240                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7241                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7242                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7243                 {
7244
7245                     if (compIsForInlining())
7246                     {
7247                         // Don't import runtime lookups when inlining
7248                         // Inlining has to be aborted in such a case
7249                         /* XXX Fri 3/20/2009
7250                          * By the way, this would never succeed.  If the handle lookup is into the generic
7251                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7252                          * inlined code will crash.
7253                          *
7254                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7255                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7256                          * failing here.
7257                          */
7258                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7259                         return TYP_UNDEF;
7260                     }
7261
7262                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7263                     assert(!compDonotInline());
7264
7265                     // This is the rough code to set up an indirect stub call
7266                     assert(stubAddr != nullptr);
7267
7268                     // The stubAddr may be a
7269                     // complex expression. As it is evaluated after the args,
7270                     // it may cause registered args to be spilled. Simply spill it.
7271
7272                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7273                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7274                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7275
7276                     // Create the actual call node
7277
7278                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7279                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7280
7281                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7282
7283                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7284                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7285
7286 #ifdef _TARGET_X86_
7287                     // No tailcalls allowed for these yet...
7288                     canTailCall             = false;
7289                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7290 #endif
7291                 }
7292                 else
7293                 {
7294                     // ok, the stub is available at compile type.
7295
7296                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7297                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7298                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7299                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
7300                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7301                     {
7302                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7303                     }
7304                 }
7305
7306 #ifdef FEATURE_READYTORUN_COMPILER
7307                 if (opts.IsReadyToRun())
7308                 {
7309                     // Null check is sometimes needed for ready to run to handle
7310                     // non-virtual <-> virtual changes between versions
7311                     if (callInfo->nullInstanceCheck)
7312                     {
7313                         call->gtFlags |= GTF_CALL_NULLCHECK;
7314                     }
7315                 }
7316 #endif
7317
7318                 break;
7319             }
7320
7321             case CORINFO_VIRTUALCALL_VTABLE:
7322             {
7323                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7324                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7325                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7326                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7327                 break;
7328             }
7329
7330             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7331             {
7332                 if (compIsForInlining())
7333                 {
7334                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7335                     return TYP_UNDEF;
7336                 }
7337
7338                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7339                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7340                 // OK, We've been told to call via LDVIRTFTN, so just
7341                 // take the call now....
7342
7343                 args = impPopList(sig->numArgs, sig);
7344
7345                 GenTree* thisPtr = impPopStack().val;
7346                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7347                 assert(thisPtr != nullptr);
7348
7349                 // Clone the (possibly transformed) "this" pointer
7350                 GenTree* thisPtrCopy;
7351                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7352                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7353
7354                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7355                 assert(fptr != nullptr);
7356
7357                 thisPtr = nullptr; // can't reuse it
7358
7359                 // Now make an indirect call through the function pointer
7360
7361                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7362                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7363                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7364
7365                 // Create the actual call node
7366
7367                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7368                 call->gtCall.gtCallObjp = thisPtrCopy;
7369                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7370
7371                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7372                 {
7373                     // CoreRT generic virtual method: need to handle potential fat function pointers
7374                     addFatPointerCandidate(call->AsCall());
7375                 }
7376 #ifdef FEATURE_READYTORUN_COMPILER
7377                 if (opts.IsReadyToRun())
7378                 {
7379                     // Null check is needed for ready to run to handle
7380                     // non-virtual <-> virtual changes between versions
7381                     call->gtFlags |= GTF_CALL_NULLCHECK;
7382                 }
7383 #endif
7384
7385                 // Sine we are jumping over some code, check that its OK to skip that code
7386                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7387                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7388                 goto DONE;
7389             }
7390
7391             case CORINFO_CALL:
7392             {
7393                 // This is for a non-virtual, non-interface etc. call
7394                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7395
7396                 // We remove the nullcheck for the GetType call instrinsic.
7397                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7398                 // and instrinsics.
7399                 if (callInfo->nullInstanceCheck &&
7400                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7401                 {
7402                     call->gtFlags |= GTF_CALL_NULLCHECK;
7403                 }
7404
7405 #ifdef FEATURE_READYTORUN_COMPILER
7406                 if (opts.IsReadyToRun())
7407                 {
7408                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7409                 }
7410 #endif
7411                 break;
7412             }
7413
7414             case CORINFO_CALL_CODE_POINTER:
7415             {
7416                 // The EE has asked us to call by computing a code pointer and then doing an
7417                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7418
7419                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7420                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7421
7422                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7423                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7424
7425                 GenTree* fptr =
7426                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7427
7428                 if (compDonotInline())
7429                 {
7430                     return TYP_UNDEF;
7431                 }
7432
7433                 // Now make an indirect call through the function pointer
7434
7435                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7436                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7437                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7438
7439                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7440                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7441                 if (callInfo->nullInstanceCheck)
7442                 {
7443                     call->gtFlags |= GTF_CALL_NULLCHECK;
7444                 }
7445
7446                 break;
7447             }
7448
7449             default:
7450                 assert(!"unknown call kind");
7451                 break;
7452         }
7453
7454         //-------------------------------------------------------------------------
7455         // Set more flags
7456
7457         PREFIX_ASSUME(call != nullptr);
7458
7459         if (mflags & CORINFO_FLG_NOGCCHECK)
7460         {
7461             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7462         }
7463
7464         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7465         if (isSpecialIntrinsic)
7466         {
7467             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7468         }
7469     }
7470     assert(sig);
7471     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7472
7473     /* Some sanity checks */
7474
7475     // CALL_VIRT and NEWOBJ must have a THIS pointer
7476     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7477     // static bit and hasThis are negations of one another
7478     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7479     assert(call != nullptr);
7480
7481     /*-------------------------------------------------------------------------
7482      * Check special-cases etc
7483      */
7484
7485     /* Special case - Check if it is a call to Delegate.Invoke(). */
7486
7487     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7488     {
7489         assert(!compIsForInlining());
7490         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7491         assert(mflags & CORINFO_FLG_FINAL);
7492
7493         /* Set the delegate flag */
7494         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7495
7496         if (callInfo->secureDelegateInvoke)
7497         {
7498             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7499         }
7500
7501         if (opcode == CEE_CALLVIRT)
7502         {
7503             assert(mflags & CORINFO_FLG_FINAL);
7504
7505             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7506             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7507             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7508         }
7509     }
7510
7511     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7512     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7513     if (varTypeIsStruct(callRetTyp))
7514     {
7515         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7516         call->gtType = callRetTyp;
7517     }
7518
7519 #if !FEATURE_VARARG
7520     /* Check for varargs */
7521     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7522         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7523     {
7524         BADCODE("Varargs not supported.");
7525     }
7526 #endif // !FEATURE_VARARG
7527
7528 #ifdef UNIX_X86_ABI
7529     if (call->gtCall.callSig == nullptr)
7530     {
7531         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7532         *call->gtCall.callSig = *sig;
7533     }
7534 #endif // UNIX_X86_ABI
7535
7536     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7537         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7538     {
7539         assert(!compIsForInlining());
7540
7541         /* Set the right flags */
7542
7543         call->gtFlags |= GTF_CALL_POP_ARGS;
7544         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7545
7546         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7547            will be expecting to pop a certain number of arguments, but if we
7548            tailcall to a function with a different number of arguments, we
7549            are hosed. There are ways around this (caller remembers esp value,
7550            varargs is not caller-pop, etc), but not worth it. */
7551         CLANG_FORMAT_COMMENT_ANCHOR;
7552
7553 #ifdef _TARGET_X86_
7554         if (canTailCall)
7555         {
7556             canTailCall             = false;
7557             szCanTailCallFailReason = "Callee is varargs";
7558         }
7559 #endif
7560
7561         /* Get the total number of arguments - this is already correct
7562          * for CALLI - for methods we have to get it from the call site */
7563
7564         if (opcode != CEE_CALLI)
7565         {
7566 #ifdef DEBUG
7567             unsigned numArgsDef = sig->numArgs;
7568 #endif
7569             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7570
7571 #ifdef DEBUG
7572             // We cannot lazily obtain the signature of a vararg call because using its method
7573             // handle will give us only the declared argument list, not the full argument list.
7574             assert(call->gtCall.callSig == nullptr);
7575             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7576             *call->gtCall.callSig = *sig;
7577 #endif
7578
7579             // For vararg calls we must be sure to load the return type of the
7580             // method actually being called, as well as the return types of the
7581             // specified in the vararg signature. With type equivalency, these types
7582             // may not be the same.
7583             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7584             {
7585                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7586                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7587                     sig->retType != CORINFO_TYPE_VAR)
7588                 {
7589                     // Make sure that all valuetypes (including enums) that we push are loaded.
7590                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7591                     // all valuetypes in the method signature are already loaded.
7592                     // We need to be able to find the size of the valuetypes, but we cannot
7593                     // do a class-load from within GC.
7594                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7595                 }
7596             }
7597
7598             assert(numArgsDef <= sig->numArgs);
7599         }
7600
7601         /* We will have "cookie" as the last argument but we cannot push
7602          * it on the operand stack because we may overflow, so we append it
7603          * to the arg list next after we pop them */
7604     }
7605
7606     if (mflags & CORINFO_FLG_SECURITYCHECK)
7607     {
7608         assert(!compIsForInlining());
7609
7610         // Need security prolog/epilog callouts when there is
7611         // imperative security in the method. This is to give security a
7612         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7613
7614         if (compIsForInlining())
7615         {
7616             // Cannot handle this if the method being imported is an inlinee by itself.
7617             // Because inlinee method does not have its own frame.
7618
7619             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7620             return TYP_UNDEF;
7621         }
7622         else
7623         {
7624             tiSecurityCalloutNeeded = true;
7625
7626             // If the current method calls a method which needs a security check,
7627             // (i.e. the method being compiled has imperative security)
7628             // we need to reserve a slot for the security object in
7629             // the current method's stack frame
7630             opts.compNeedSecurityCheck = true;
7631         }
7632     }
7633
7634     //--------------------------- Inline NDirect ------------------------------
7635
7636     // For inline cases we technically should look at both the current
7637     // block and the call site block (or just the latter if we've
7638     // fused the EH trees). However the block-related checks pertain to
7639     // EH and we currently won't inline a method with EH. So for
7640     // inlinees, just checking the call site block is sufficient.
7641     {
7642         // New lexical block here to avoid compilation errors because of GOTOs.
7643         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7644         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7645     }
7646
7647     if (call->gtFlags & GTF_CALL_UNMANAGED)
7648     {
7649         // We set up the unmanaged call by linking the frame, disabling GC, etc
7650         // This needs to be cleaned up on return
7651         if (canTailCall)
7652         {
7653             canTailCall             = false;
7654             szCanTailCallFailReason = "Callee is native";
7655         }
7656
7657         checkForSmallType = true;
7658
7659         impPopArgsForUnmanagedCall(call, sig);
7660
7661         goto DONE;
7662     }
7663     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7664                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7665                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7666                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7667     {
7668         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7669         {
7670             // Normally this only happens with inlining.
7671             // However, a generic method (or type) being NGENd into another module
7672             // can run into this issue as well.  There's not an easy fall-back for NGEN
7673             // so instead we fallback to JIT.
7674             if (compIsForInlining())
7675             {
7676                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7677             }
7678             else
7679             {
7680                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7681             }
7682
7683             return TYP_UNDEF;
7684         }
7685
7686         GenTree* cookie = eeGetPInvokeCookie(sig);
7687
7688         // This cookie is required to be either a simple GT_CNS_INT or
7689         // an indirection of a GT_CNS_INT
7690         //
7691         GenTree* cookieConst = cookie;
7692         if (cookie->gtOper == GT_IND)
7693         {
7694             cookieConst = cookie->gtOp.gtOp1;
7695         }
7696         assert(cookieConst->gtOper == GT_CNS_INT);
7697
7698         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7699         // we won't allow this tree to participate in any CSE logic
7700         //
7701         cookie->gtFlags |= GTF_DONT_CSE;
7702         cookieConst->gtFlags |= GTF_DONT_CSE;
7703
7704         call->gtCall.gtCallCookie = cookie;
7705
7706         if (canTailCall)
7707         {
7708             canTailCall             = false;
7709             szCanTailCallFailReason = "PInvoke calli";
7710         }
7711     }
7712
7713     /*-------------------------------------------------------------------------
7714      * Create the argument list
7715      */
7716
7717     //-------------------------------------------------------------------------
7718     // Special case - for varargs we have an implicit last argument
7719
7720     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7721     {
7722         assert(!compIsForInlining());
7723
7724         void *varCookie, *pVarCookie;
7725         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7726         {
7727             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7728             return TYP_UNDEF;
7729         }
7730
7731         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7732         assert((!varCookie) != (!pVarCookie));
7733         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7734
7735         assert(extraArg == nullptr);
7736         extraArg = gtNewArgList(cookie);
7737     }
7738
7739     //-------------------------------------------------------------------------
7740     // Extra arg for shared generic code and array methods
7741     //
7742     // Extra argument containing instantiation information is passed in the
7743     // following circumstances:
7744     // (a) To the "Address" method on array classes; the extra parameter is
7745     //     the array's type handle (a TypeDesc)
7746     // (b) To shared-code instance methods in generic structs; the extra parameter
7747     //     is the struct's type handle (a vtable ptr)
7748     // (c) To shared-code per-instantiation non-generic static methods in generic
7749     //     classes and structs; the extra parameter is the type handle
7750     // (d) To shared-code generic methods; the extra parameter is an
7751     //     exact-instantiation MethodDesc
7752     //
7753     // We also set the exact type context associated with the call so we can
7754     // inline the call correctly later on.
7755
7756     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7757     {
7758         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7759         if (clsHnd == nullptr)
7760         {
7761             NO_WAY("CALLI on parameterized type");
7762         }
7763
7764         assert(opcode != CEE_CALLI);
7765
7766         GenTree* instParam;
7767         BOOL     runtimeLookup;
7768
7769         // Instantiated generic method
7770         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7771         {
7772             CORINFO_METHOD_HANDLE exactMethodHandle =
7773                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7774
7775             if (!exactContextNeedsRuntimeLookup)
7776             {
7777 #ifdef FEATURE_READYTORUN_COMPILER
7778                 if (opts.IsReadyToRun())
7779                 {
7780                     instParam =
7781                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7782                     if (instParam == nullptr)
7783                     {
7784                         assert(compDonotInline());
7785                         return TYP_UNDEF;
7786                     }
7787                 }
7788                 else
7789 #endif
7790                 {
7791                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7792                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7793                 }
7794             }
7795             else
7796             {
7797                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7798                 if (instParam == nullptr)
7799                 {
7800                     assert(compDonotInline());
7801                     return TYP_UNDEF;
7802                 }
7803             }
7804         }
7805
7806         // otherwise must be an instance method in a generic struct,
7807         // a static method in a generic type, or a runtime-generated array method
7808         else
7809         {
7810             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7811             CORINFO_CLASS_HANDLE exactClassHandle =
7812                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7813
7814             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7815             {
7816                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7817                 return TYP_UNDEF;
7818             }
7819
7820             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7821             {
7822                 // We indicate "readonly" to the Address operation by using a null
7823                 // instParam.
7824                 instParam = gtNewIconNode(0, TYP_REF);
7825             }
7826             else if (!exactContextNeedsRuntimeLookup)
7827             {
7828 #ifdef FEATURE_READYTORUN_COMPILER
7829                 if (opts.IsReadyToRun())
7830                 {
7831                     instParam =
7832                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7833                     if (instParam == nullptr)
7834                     {
7835                         assert(compDonotInline());
7836                         return TYP_UNDEF;
7837                     }
7838                 }
7839                 else
7840 #endif
7841                 {
7842                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7843                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7844                 }
7845             }
7846             else
7847             {
7848                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7849                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7850                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7851                 if (pConstrainedResolvedToken)
7852                 {
7853                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7854                                                  FALSE /* importParent */);
7855                 }
7856                 else
7857                 {
7858                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7859                 }
7860
7861                 if (instParam == nullptr)
7862                 {
7863                     assert(compDonotInline());
7864                     return TYP_UNDEF;
7865                 }
7866             }
7867         }
7868
7869         assert(extraArg == nullptr);
7870         extraArg = gtNewArgList(instParam);
7871     }
7872
7873     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7874     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7875     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7876     // exactContextHnd is not currently required when inlining shared generic code into shared
7877     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7878     // (e.g. anything marked needsRuntimeLookup)
7879     if (exactContextNeedsRuntimeLookup)
7880     {
7881         exactContextHnd = nullptr;
7882     }
7883
7884     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7885     {
7886         // Only verifiable cases are supported.
7887         // dup; ldvirtftn; newobj; or ldftn; newobj.
7888         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7889         if (impStackHeight() > 0)
7890         {
7891             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7892             if (delegateTypeInfo.IsToken())
7893             {
7894                 ldftnToken = delegateTypeInfo.GetToken();
7895             }
7896         }
7897     }
7898
7899     //-------------------------------------------------------------------------
7900     // The main group of arguments
7901
7902     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7903
7904     if (args)
7905     {
7906         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7907     }
7908
7909     //-------------------------------------------------------------------------
7910     // The "this" pointer
7911
7912     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7913     {
7914         GenTree* obj;
7915
7916         if (opcode == CEE_NEWOBJ)
7917         {
7918             obj = newobjThis;
7919         }
7920         else
7921         {
7922             obj = impPopStack().val;
7923             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7924             if (compDonotInline())
7925             {
7926                 return TYP_UNDEF;
7927             }
7928         }
7929
7930         // Store the "this" value in the call
7931         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7932         call->gtCall.gtCallObjp = obj;
7933
7934         // Is this a virtual or interface call?
7935         if (call->gtCall.IsVirtual())
7936         {
7937             // only true object pointers can be virtual
7938             assert(obj->gtType == TYP_REF);
7939
7940             // See if we can devirtualize.
7941             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7942                                 &exactContextHnd);
7943         }
7944
7945         if (impIsThis(obj))
7946         {
7947             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7948         }
7949     }
7950
7951     //-------------------------------------------------------------------------
7952     // The "this" pointer for "newobj"
7953
7954     if (opcode == CEE_NEWOBJ)
7955     {
7956         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7957         {
7958             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7959             // This is a 'new' of a variable sized object, wher
7960             // the constructor is to return the object.  In this case
7961             // the constructor claims to return VOID but we know it
7962             // actually returns the new object
7963             assert(callRetTyp == TYP_VOID);
7964             callRetTyp   = TYP_REF;
7965             call->gtType = TYP_REF;
7966             impSpillSpecialSideEff();
7967
7968             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7969         }
7970         else
7971         {
7972             if (clsFlags & CORINFO_FLG_DELEGATE)
7973             {
7974                 // New inliner morph it in impImportCall.
7975                 // This will allow us to inline the call to the delegate constructor.
7976                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7977             }
7978
7979             if (!bIntrinsicImported)
7980             {
7981
7982 #if defined(DEBUG) || defined(INLINE_DATA)
7983
7984                 // Keep track of the raw IL offset of the call
7985                 call->gtCall.gtRawILOffset = rawILOffset;
7986
7987 #endif // defined(DEBUG) || defined(INLINE_DATA)
7988
7989                 // Is it an inline candidate?
7990                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7991             }
7992
7993             // append the call node.
7994             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7995
7996             // Now push the value of the 'new onto the stack
7997
7998             // This is a 'new' of a non-variable sized object.
7999             // Append the new node (op1) to the statement list,
8000             // and then push the local holding the value of this
8001             // new instruction on the stack.
8002
8003             if (clsFlags & CORINFO_FLG_VALUECLASS)
8004             {
8005                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8006
8007                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8008                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8009             }
8010             else
8011             {
8012                 if (newobjThis->gtOper == GT_COMMA)
8013                 {
8014                     // In coreclr the callout can be inserted even if verification is disabled
8015                     // so we cannot rely on tiVerificationNeeded alone
8016
8017                     // We must have inserted the callout. Get the real newobj.
8018                     newobjThis = newobjThis->gtOp.gtOp2;
8019                 }
8020
8021                 assert(newobjThis->gtOper == GT_LCL_VAR);
8022                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8023             }
8024         }
8025         return callRetTyp;
8026     }
8027
8028 DONE:
8029
8030     if (tailCall)
8031     {
8032         // This check cannot be performed for implicit tail calls for the reason
8033         // that impIsImplicitTailCallCandidate() is not checking whether return
8034         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8035         // As a result it is possible that in the following case, we find that
8036         // the type stack is non-empty if Callee() is considered for implicit
8037         // tail calling.
8038         //      int Caller(..) { .... void Callee(); ret val; ... }
8039         //
8040         // Note that we cannot check return type compatibility before ImpImportCall()
8041         // as we don't have required info or need to duplicate some of the logic of
8042         // ImpImportCall().
8043         //
8044         // For implicit tail calls, we perform this check after return types are
8045         // known to be compatible.
8046         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8047         {
8048             BADCODE("Stack should be empty after tailcall");
8049         }
8050
8051         // Note that we can not relax this condition with genActualType() as
8052         // the calling convention dictates that the caller of a function with
8053         // a small-typed return value is responsible for normalizing the return val
8054
8055         if (canTailCall &&
8056             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8057                                           callInfo->sig.retTypeClass))
8058         {
8059             canTailCall             = false;
8060             szCanTailCallFailReason = "Return types are not tail call compatible";
8061         }
8062
8063         // Stack empty check for implicit tail calls.
8064         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8065         {
8066 #ifdef _TARGET_AMD64_
8067             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8068             // in JIT64, not an InvalidProgramException.
8069             Verify(false, "Stack should be empty after tailcall");
8070 #else  // _TARGET_64BIT_
8071             BADCODE("Stack should be empty after tailcall");
8072 #endif //!_TARGET_64BIT_
8073         }
8074
8075         // assert(compCurBB is not a catch, finally or filter block);
8076         // assert(compCurBB is not a try block protected by a finally block);
8077
8078         // Check for permission to tailcall
8079         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8080
8081         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8082
8083         if (canTailCall)
8084         {
8085             // True virtual or indirect calls, shouldn't pass in a callee handle.
8086             CORINFO_METHOD_HANDLE exactCalleeHnd =
8087                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8088             GenTree* thisArg = call->gtCall.gtCallObjp;
8089
8090             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8091             {
8092                 canTailCall = true;
8093                 if (explicitTailCall)
8094                 {
8095                     // In case of explicit tail calls, mark it so that it is not considered
8096                     // for in-lining.
8097                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8098 #ifdef DEBUG
8099                     if (verbose)
8100                     {
8101                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8102                         printTreeID(call);
8103                         printf("\n");
8104                     }
8105 #endif
8106                 }
8107                 else
8108                 {
8109 #if FEATURE_TAILCALL_OPT
8110                     // Must be an implicit tail call.
8111                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8112
8113                     // It is possible that a call node is both an inline candidate and marked
8114                     // for opportunistic tail calling.  In-lining happens before morhphing of
8115                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8116                     // reason, it will survive to the morphing stage at which point it will be
8117                     // transformed into a tail call after performing additional checks.
8118
8119                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8120 #ifdef DEBUG
8121                     if (verbose)
8122                     {
8123                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8124                         printTreeID(call);
8125                         printf("\n");
8126                     }
8127 #endif
8128
8129 #else //! FEATURE_TAILCALL_OPT
8130                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8131
8132 #endif // FEATURE_TAILCALL_OPT
8133                 }
8134
8135                 // we can't report success just yet...
8136             }
8137             else
8138             {
8139                 canTailCall = false;
8140 // canTailCall reported its reasons already
8141 #ifdef DEBUG
8142                 if (verbose)
8143                 {
8144                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8145                     printTreeID(call);
8146                     printf("\n");
8147                 }
8148 #endif
8149             }
8150         }
8151         else
8152         {
8153             // If this assert fires it means that canTailCall was set to false without setting a reason!
8154             assert(szCanTailCallFailReason != nullptr);
8155
8156 #ifdef DEBUG
8157             if (verbose)
8158             {
8159                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8160                 printTreeID(call);
8161                 printf(": %s\n", szCanTailCallFailReason);
8162             }
8163 #endif
8164             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8165                                                      szCanTailCallFailReason);
8166         }
8167     }
8168
8169     // Note: we assume that small return types are already normalized by the managed callee
8170     // or by the pinvoke stub for calls to unmanaged code.
8171
8172     if (!bIntrinsicImported)
8173     {
8174         //
8175         // Things needed to be checked when bIntrinsicImported is false.
8176         //
8177
8178         assert(call->gtOper == GT_CALL);
8179         assert(sig != nullptr);
8180
8181         // Tail calls require us to save the call site's sig info so we can obtain an argument
8182         // copying thunk from the EE later on.
8183         if (call->gtCall.callSig == nullptr)
8184         {
8185             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8186             *call->gtCall.callSig = *sig;
8187         }
8188
8189         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8190         {
8191             GenTree* callObj = call->gtCall.gtCallObjp;
8192             assert(callObj != nullptr);
8193
8194             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8195                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8196                                                                    impInlineInfo->inlArgInfo))
8197             {
8198                 impInlineInfo->thisDereferencedFirst = true;
8199             }
8200         }
8201
8202 #if defined(DEBUG) || defined(INLINE_DATA)
8203
8204         // Keep track of the raw IL offset of the call
8205         call->gtCall.gtRawILOffset = rawILOffset;
8206
8207 #endif // defined(DEBUG) || defined(INLINE_DATA)
8208
8209         // Is it an inline candidate?
8210         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8211     }
8212
8213 DONE_CALL:
8214     // Push or append the result of the call
8215     if (callRetTyp == TYP_VOID)
8216     {
8217         if (opcode == CEE_NEWOBJ)
8218         {
8219             // we actually did push something, so don't spill the thing we just pushed.
8220             assert(verCurrentState.esStackDepth > 0);
8221             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8222         }
8223         else
8224         {
8225             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8226         }
8227     }
8228     else
8229     {
8230         impSpillSpecialSideEff();
8231
8232         if (clsFlags & CORINFO_FLG_ARRAY)
8233         {
8234             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8235         }
8236
8237         // Find the return type used for verification by interpreting the method signature.
8238         // NB: we are clobbering the already established sig.
8239         if (tiVerificationNeeded)
8240         {
8241             // Actually, we never get the sig for the original method.
8242             sig = &(callInfo->verSig);
8243         }
8244
8245         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8246         tiRetVal.NormaliseForStack();
8247
8248         // The CEE_READONLY prefix modifies the verification semantics of an Address
8249         // operation on an array type.
8250         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8251         {
8252             tiRetVal.SetIsReadonlyByRef();
8253         }
8254
8255         if (tiVerificationNeeded)
8256         {
8257             // We assume all calls return permanent home byrefs. If they
8258             // didn't they wouldn't be verifiable. This is also covering
8259             // the Address() helper for multidimensional arrays.
8260             if (tiRetVal.IsByRef())
8261             {
8262                 tiRetVal.SetIsPermanentHomeByRef();
8263             }
8264         }
8265
8266         if (call->IsCall())
8267         {
8268             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8269
8270             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8271             if (varTypeIsStruct(callRetTyp))
8272             {
8273                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8274             }
8275
8276             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8277             {
8278                 assert(opts.OptEnabled(CLFLG_INLINING));
8279                 assert(!fatPointerCandidate); // We should not try to inline calli.
8280
8281                 // Make the call its own tree (spill the stack if needed).
8282                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8283
8284                 // TODO: Still using the widened type.
8285                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8286             }
8287             else
8288             {
8289                 if (fatPointerCandidate)
8290                 {
8291                     // fatPointer candidates should be in statements of the form call() or var = call().
8292                     // Such form allows to find statements with fat calls without walking through whole trees
8293                     // and removes problems with cutting trees.
8294                     assert(!bIntrinsicImported);
8295                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8296                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8297                     {
8298                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8299                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8300                         varDsc->lvVerTypeInfo = tiRetVal;
8301                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8302                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8303                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8304                         call           = gtNewLclvNode(calliSlot, type);
8305                     }
8306                 }
8307
8308                 // For non-candidates we must also spill, since we
8309                 // might have locals live on the eval stack that this
8310                 // call can modify.
8311                 //
8312                 // Suppress this for certain well-known call targets
8313                 // that we know won't modify locals, eg calls that are
8314                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8315                 // we may break key fragile pattern matches later on.
8316                 bool spillStack = true;
8317                 if (call->IsCall())
8318                 {
8319                     GenTreeCall* callNode = call->AsCall();
8320                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8321                     {
8322                         spillStack = false;
8323                     }
8324                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8325                     {
8326                         spillStack = false;
8327                     }
8328                 }
8329
8330                 if (spillStack)
8331                 {
8332                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8333                 }
8334             }
8335         }
8336
8337         if (!bIntrinsicImported)
8338         {
8339             //-------------------------------------------------------------------------
8340             //
8341             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8342                 before returning.
8343                 However, we need to normalize small type values returned by unmanaged
8344                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8345                 if we use the shorter inlined pinvoke stub. */
8346
8347             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8348             {
8349                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8350             }
8351         }
8352
8353         impPushOnStack(call, tiRetVal);
8354     }
8355
8356     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8357     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8358     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8359     //  callInfoCache.uncacheCallInfo();
8360
8361     return callRetTyp;
8362 }
8363 #ifdef _PREFAST_
8364 #pragma warning(pop)
8365 #endif
8366
8367 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8368 {
8369     CorInfoType corType = methInfo->args.retType;
8370
8371     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8372     {
8373         // We have some kind of STRUCT being returned
8374
8375         structPassingKind howToReturnStruct = SPK_Unknown;
8376
8377         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8378
8379         if (howToReturnStruct == SPK_ByReference)
8380         {
8381             return true;
8382         }
8383     }
8384
8385     return false;
8386 }
8387
8388 #ifdef DEBUG
8389 //
8390 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8391 {
8392     TestLabelAndNum tlAndN;
8393     if (numArgs == 2)
8394     {
8395         tlAndN.m_num  = 0;
8396         StackEntry se = impPopStack();
8397         assert(se.seTypeInfo.GetType() == TI_INT);
8398         GenTree* val = se.val;
8399         assert(val->IsCnsIntOrI());
8400         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8401     }
8402     else if (numArgs == 3)
8403     {
8404         StackEntry se = impPopStack();
8405         assert(se.seTypeInfo.GetType() == TI_INT);
8406         GenTree* val = se.val;
8407         assert(val->IsCnsIntOrI());
8408         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8409         se           = impPopStack();
8410         assert(se.seTypeInfo.GetType() == TI_INT);
8411         val = se.val;
8412         assert(val->IsCnsIntOrI());
8413         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8414     }
8415     else
8416     {
8417         assert(false);
8418     }
8419
8420     StackEntry expSe = impPopStack();
8421     GenTree*   node  = expSe.val;
8422
8423     // There are a small number of special cases, where we actually put the annotation on a subnode.
8424     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8425     {
8426         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8427         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8428         // offset within the the static field block whose address is returned by the helper call.
8429         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8430         GenTree* helperCall = nullptr;
8431         assert(node->OperGet() == GT_IND);
8432         tlAndN.m_num -= 100;
8433         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8434         GetNodeTestData()->Remove(node);
8435     }
8436     else
8437     {
8438         GetNodeTestData()->Set(node, tlAndN);
8439     }
8440
8441     impPushOnStack(node, expSe.seTypeInfo);
8442     return node->TypeGet();
8443 }
8444 #endif // DEBUG
8445
8446 //-----------------------------------------------------------------------------------
8447 //  impFixupCallStructReturn: For a call node that returns a struct type either
8448 //  adjust the return type to an enregisterable type, or set the flag to indicate
8449 //  struct return via retbuf arg.
8450 //
8451 //  Arguments:
8452 //    call       -  GT_CALL GenTree node
8453 //    retClsHnd  -  Class handle of return type of the call
8454 //
8455 //  Return Value:
8456 //    Returns new GenTree node after fixing struct return of call node
8457 //
8458 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8459 {
8460     if (!varTypeIsStruct(call))
8461     {
8462         return call;
8463     }
8464
8465     call->gtRetClsHnd = retClsHnd;
8466
8467 #if FEATURE_MULTIREG_RET
8468     // Initialize Return type descriptor of call node
8469     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8470     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8471 #endif // FEATURE_MULTIREG_RET
8472
8473 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8474
8475     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8476     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8477
8478     // The return type will remain as the incoming struct type unless normalized to a
8479     // single eightbyte return type below.
8480     call->gtReturnType = call->gtType;
8481
8482     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8483     if (retRegCount != 0)
8484     {
8485         if (retRegCount == 1)
8486         {
8487             // struct returned in a single register
8488             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8489         }
8490         else
8491         {
8492             // must be a struct returned in two registers
8493             assert(retRegCount == 2);
8494
8495             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8496             {
8497                 // Force a call returning multi-reg struct to be always of the IR form
8498                 //   tmp = call
8499                 //
8500                 // No need to assign a multi-reg struct to a local var if:
8501                 //  - It is a tail call or
8502                 //  - The call is marked for in-lining later
8503                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8504             }
8505         }
8506     }
8507     else
8508     {
8509         // struct not returned in registers i.e returned via hiddden retbuf arg.
8510         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8511     }
8512
8513 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8514
8515     // Check for TYP_STRUCT type that wraps a primitive type
8516     // Such structs are returned using a single register
8517     // and we change the return type on those calls here.
8518     //
8519     structPassingKind howToReturnStruct;
8520     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8521
8522     if (howToReturnStruct == SPK_ByReference)
8523     {
8524         assert(returnType == TYP_UNKNOWN);
8525         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8526     }
8527     else
8528     {
8529         assert(returnType != TYP_UNKNOWN);
8530         call->gtReturnType = returnType;
8531
8532         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8533         if ((returnType == TYP_LONG) && (compLongUsed == false))
8534         {
8535             compLongUsed = true;
8536         }
8537         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8538         {
8539             compFloatingPointUsed = true;
8540         }
8541
8542 #if FEATURE_MULTIREG_RET
8543         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8544         assert(retRegCount != 0);
8545
8546         if (retRegCount >= 2)
8547         {
8548             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8549             {
8550                 // Force a call returning multi-reg struct to be always of the IR form
8551                 //   tmp = call
8552                 //
8553                 // No need to assign a multi-reg struct to a local var if:
8554                 //  - It is a tail call or
8555                 //  - The call is marked for in-lining later
8556                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8557             }
8558         }
8559 #endif // FEATURE_MULTIREG_RET
8560     }
8561
8562 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8563
8564     return call;
8565 }
8566
8567 /*****************************************************************************
8568    For struct return values, re-type the operand in the case where the ABI
8569    does not use a struct return buffer
8570    Note that this method is only call for !_TARGET_X86_
8571  */
8572
8573 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8574 {
8575     assert(varTypeIsStruct(info.compRetType));
8576     assert(info.compRetBuffArg == BAD_VAR_NUM);
8577
8578 #if defined(_TARGET_XARCH_)
8579
8580 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8581     // No VarArgs for CoreCLR on x64 Unix
8582     assert(!info.compIsVarArgs);
8583
8584     // Is method returning a multi-reg struct?
8585     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8586     {
8587         // In case of multi-reg struct return, we force IR to be one of the following:
8588         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8589         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8590
8591         if (op->gtOper == GT_LCL_VAR)
8592         {
8593             // Make sure that this struct stays in memory and doesn't get promoted.
8594             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8595             lvaTable[lclNum].lvIsMultiRegRet = true;
8596
8597             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8598             op->gtFlags |= GTF_DONT_CSE;
8599
8600             return op;
8601         }
8602
8603         if (op->gtOper == GT_CALL)
8604         {
8605             return op;
8606         }
8607
8608         return impAssignMultiRegTypeToVar(op, retClsHnd);
8609     }
8610 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8611     assert(info.compRetNativeType != TYP_STRUCT);
8612 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8613
8614 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8615
8616     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8617     {
8618         if (op->gtOper == GT_LCL_VAR)
8619         {
8620             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8621             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8622             // Make sure this struct type stays as struct so that we can return it as an HFA
8623             lvaTable[lclNum].lvIsMultiRegRet = true;
8624
8625             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8626             op->gtFlags |= GTF_DONT_CSE;
8627
8628             return op;
8629         }
8630
8631         if (op->gtOper == GT_CALL)
8632         {
8633             if (op->gtCall.IsVarargs())
8634             {
8635                 // We cannot tail call because control needs to return to fixup the calling
8636                 // convention for result return.
8637                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8638                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8639             }
8640             else
8641             {
8642                 return op;
8643             }
8644         }
8645         return impAssignMultiRegTypeToVar(op, retClsHnd);
8646     }
8647
8648 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8649
8650     // Is method returning a multi-reg struct?
8651     if (IsMultiRegReturnedType(retClsHnd))
8652     {
8653         if (op->gtOper == GT_LCL_VAR)
8654         {
8655             // This LCL_VAR stays as a TYP_STRUCT
8656             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8657
8658             // Make sure this struct type is not struct promoted
8659             lvaTable[lclNum].lvIsMultiRegRet = true;
8660
8661             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8662             op->gtFlags |= GTF_DONT_CSE;
8663
8664             return op;
8665         }
8666
8667         if (op->gtOper == GT_CALL)
8668         {
8669             if (op->gtCall.IsVarargs())
8670             {
8671                 // We cannot tail call because control needs to return to fixup the calling
8672                 // convention for result return.
8673                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8674                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8675             }
8676             else
8677             {
8678                 return op;
8679             }
8680         }
8681         return impAssignMultiRegTypeToVar(op, retClsHnd);
8682     }
8683
8684 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8685
8686 REDO_RETURN_NODE:
8687     // adjust the type away from struct to integral
8688     // and no normalizing
8689     if (op->gtOper == GT_LCL_VAR)
8690     {
8691         op->ChangeOper(GT_LCL_FLD);
8692     }
8693     else if (op->gtOper == GT_OBJ)
8694     {
8695         GenTree* op1 = op->AsObj()->Addr();
8696
8697         // We will fold away OBJ/ADDR
8698         // except for OBJ/ADDR/INDEX
8699         //     as the array type influences the array element's offset
8700         //     Later in this method we change op->gtType to info.compRetNativeType
8701         //     This is not correct when op is a GT_INDEX as the starting offset
8702         //     for the array elements 'elemOffs' is different for an array of
8703         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8704         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8705         //
8706         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8707         {
8708             // Change '*(&X)' to 'X' and see if we can do better
8709             op = op1->gtOp.gtOp1;
8710             goto REDO_RETURN_NODE;
8711         }
8712         op->gtObj.gtClass = NO_CLASS_HANDLE;
8713         op->ChangeOperUnchecked(GT_IND);
8714         op->gtFlags |= GTF_IND_TGTANYWHERE;
8715     }
8716     else if (op->gtOper == GT_CALL)
8717     {
8718         if (op->AsCall()->TreatAsHasRetBufArg(this))
8719         {
8720             // This must be one of those 'special' helpers that don't
8721             // really have a return buffer, but instead use it as a way
8722             // to keep the trees cleaner with fewer address-taken temps.
8723             //
8724             // Well now we have to materialize the the return buffer as
8725             // an address-taken temp. Then we can return the temp.
8726             //
8727             // NOTE: this code assumes that since the call directly
8728             // feeds the return, then the call must be returning the
8729             // same structure/class/type.
8730             //
8731             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8732
8733             // No need to spill anything as we're about to return.
8734             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8735
8736             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8737             // jump directly to a GT_LCL_FLD.
8738             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8739             op->ChangeOper(GT_LCL_FLD);
8740         }
8741         else
8742         {
8743             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8744
8745             // Don't change the gtType of the node just yet, it will get changed later.
8746             return op;
8747         }
8748     }
8749 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8750     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8751     {
8752         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8753         // assert(op->gtType == info.compRetNativeType)
8754         if (op->gtType != info.compRetNativeType)
8755         {
8756             // Insert a register move to keep target type of SIMD intrinsic intact
8757             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8758         }
8759     }
8760 #endif
8761     else if (op->gtOper == GT_COMMA)
8762     {
8763         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8764     }
8765
8766     op->gtType = info.compRetNativeType;
8767
8768     return op;
8769 }
8770
8771 /*****************************************************************************
8772    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8773    finally-protected try. We find the finally blocks protecting the current
8774    offset (in order) by walking over the complete exception table and
8775    finding enclosing clauses. This assumes that the table is sorted.
8776    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8777
8778    If we are leaving a catch handler, we need to attach the
8779    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8780
8781    After this function, the BBJ_LEAVE block has been converted to a different type.
8782  */
8783
8784 #if !FEATURE_EH_FUNCLETS
8785
8786 void Compiler::impImportLeave(BasicBlock* block)
8787 {
8788 #ifdef DEBUG
8789     if (verbose)
8790     {
8791         printf("\nBefore import CEE_LEAVE:\n");
8792         fgDispBasicBlocks();
8793         fgDispHandlerTab();
8794     }
8795 #endif // DEBUG
8796
8797     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8798     unsigned    blkAddr         = block->bbCodeOffs;
8799     BasicBlock* leaveTarget     = block->bbJumpDest;
8800     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8801
8802     // LEAVE clears the stack, spill side effects, and set stack to 0
8803
8804     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8805     verCurrentState.esStackDepth = 0;
8806
8807     assert(block->bbJumpKind == BBJ_LEAVE);
8808     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8809
8810     BasicBlock* step         = DUMMY_INIT(NULL);
8811     unsigned    encFinallies = 0; // Number of enclosing finallies.
8812     GenTree*    endCatches   = NULL;
8813     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8814
8815     unsigned  XTnum;
8816     EHblkDsc* HBtab;
8817
8818     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8819     {
8820         // Grab the handler offsets
8821
8822         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8823         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8824         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8825         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8826
8827         /* Is this a catch-handler we are CEE_LEAVEing out of?
8828          * If so, we need to call CORINFO_HELP_ENDCATCH.
8829          */
8830
8831         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8832         {
8833             // Can't CEE_LEAVE out of a finally/fault handler
8834             if (HBtab->HasFinallyOrFaultHandler())
8835                 BADCODE("leave out of fault/finally block");
8836
8837             // Create the call to CORINFO_HELP_ENDCATCH
8838             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8839
8840             // Make a list of all the currently pending endCatches
8841             if (endCatches)
8842                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8843             else
8844                 endCatches = endCatch;
8845
8846 #ifdef DEBUG
8847             if (verbose)
8848             {
8849                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8850                        "CORINFO_HELP_ENDCATCH\n",
8851                        block->bbNum, XTnum);
8852             }
8853 #endif
8854         }
8855         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8856                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8857         {
8858             /* This is a finally-protected try we are jumping out of */
8859
8860             /* If there are any pending endCatches, and we have already
8861                jumped out of a finally-protected try, then the endCatches
8862                have to be put in a block in an outer try for async
8863                exceptions to work correctly.
8864                Else, just use append to the original block */
8865
8866             BasicBlock* callBlock;
8867
8868             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8869
8870             if (encFinallies == 0)
8871             {
8872                 assert(step == DUMMY_INIT(NULL));
8873                 callBlock             = block;
8874                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8875
8876                 if (endCatches)
8877                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8878
8879 #ifdef DEBUG
8880                 if (verbose)
8881                 {
8882                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8883                            "block %s\n",
8884                            callBlock->dspToString());
8885                 }
8886 #endif
8887             }
8888             else
8889             {
8890                 assert(step != DUMMY_INIT(NULL));
8891
8892                 /* Calling the finally block */
8893                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8894                 assert(step->bbJumpKind == BBJ_ALWAYS);
8895                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8896                                               // finally in the chain)
8897                 step->bbJumpDest->bbRefs++;
8898
8899                 /* The new block will inherit this block's weight */
8900                 callBlock->setBBWeight(block->bbWeight);
8901                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8902
8903 #ifdef DEBUG
8904                 if (verbose)
8905                 {
8906                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8907                            callBlock->dspToString());
8908                 }
8909 #endif
8910
8911                 GenTree* lastStmt;
8912
8913                 if (endCatches)
8914                 {
8915                     lastStmt         = gtNewStmt(endCatches);
8916                     endLFin->gtNext  = lastStmt;
8917                     lastStmt->gtPrev = endLFin;
8918                 }
8919                 else
8920                 {
8921                     lastStmt = endLFin;
8922                 }
8923
8924                 // note that this sets BBF_IMPORTED on the block
8925                 impEndTreeList(callBlock, endLFin, lastStmt);
8926             }
8927
8928             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8929             /* The new block will inherit this block's weight */
8930             step->setBBWeight(block->bbWeight);
8931             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8932
8933 #ifdef DEBUG
8934             if (verbose)
8935             {
8936                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8937                        step->dspToString());
8938             }
8939 #endif
8940
8941             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8942             assert(finallyNesting <= compHndBBtabCount);
8943
8944             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8945             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8946             endLFin               = gtNewStmt(endLFin);
8947             endCatches            = NULL;
8948
8949             encFinallies++;
8950
8951             invalidatePreds = true;
8952         }
8953     }
8954
8955     /* Append any remaining endCatches, if any */
8956
8957     assert(!encFinallies == !endLFin);
8958
8959     if (encFinallies == 0)
8960     {
8961         assert(step == DUMMY_INIT(NULL));
8962         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8963
8964         if (endCatches)
8965             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8966
8967 #ifdef DEBUG
8968         if (verbose)
8969         {
8970             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8971                    "block %s\n",
8972                    block->dspToString());
8973         }
8974 #endif
8975     }
8976     else
8977     {
8978         // If leaveTarget is the start of another try block, we want to make sure that
8979         // we do not insert finalStep into that try block. Hence, we find the enclosing
8980         // try block.
8981         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8982
8983         // Insert a new BB either in the try region indicated by tryIndex or
8984         // the handler region indicated by leaveTarget->bbHndIndex,
8985         // depending on which is the inner region.
8986         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8987         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8988         step->bbJumpDest = finalStep;
8989
8990         /* The new block will inherit this block's weight */
8991         finalStep->setBBWeight(block->bbWeight);
8992         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8993
8994 #ifdef DEBUG
8995         if (verbose)
8996         {
8997             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
8998                    finalStep->dspToString());
8999         }
9000 #endif
9001
9002         GenTree* lastStmt;
9003
9004         if (endCatches)
9005         {
9006             lastStmt         = gtNewStmt(endCatches);
9007             endLFin->gtNext  = lastStmt;
9008             lastStmt->gtPrev = endLFin;
9009         }
9010         else
9011         {
9012             lastStmt = endLFin;
9013         }
9014
9015         impEndTreeList(finalStep, endLFin, lastStmt);
9016
9017         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9018
9019         // Queue up the jump target for importing
9020
9021         impImportBlockPending(leaveTarget);
9022
9023         invalidatePreds = true;
9024     }
9025
9026     if (invalidatePreds && fgComputePredsDone)
9027     {
9028         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9029         fgRemovePreds();
9030     }
9031
9032 #ifdef DEBUG
9033     fgVerifyHandlerTab();
9034
9035     if (verbose)
9036     {
9037         printf("\nAfter import CEE_LEAVE:\n");
9038         fgDispBasicBlocks();
9039         fgDispHandlerTab();
9040     }
9041 #endif // DEBUG
9042 }
9043
9044 #else // FEATURE_EH_FUNCLETS
9045
9046 void Compiler::impImportLeave(BasicBlock* block)
9047 {
9048 #ifdef DEBUG
9049     if (verbose)
9050     {
9051         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9052         fgDispBasicBlocks();
9053         fgDispHandlerTab();
9054     }
9055 #endif // DEBUG
9056
9057     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9058     unsigned    blkAddr         = block->bbCodeOffs;
9059     BasicBlock* leaveTarget     = block->bbJumpDest;
9060     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9061
9062     // LEAVE clears the stack, spill side effects, and set stack to 0
9063
9064     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9065     verCurrentState.esStackDepth = 0;
9066
9067     assert(block->bbJumpKind == BBJ_LEAVE);
9068     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9069
9070     BasicBlock* step = nullptr;
9071
9072     enum StepType
9073     {
9074         // No step type; step == NULL.
9075         ST_None,
9076
9077         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9078         // That is, is step->bbJumpDest where a finally will return to?
9079         ST_FinallyReturn,
9080
9081         // The step block is a catch return.
9082         ST_Catch,
9083
9084         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9085         ST_Try
9086     };
9087     StepType stepType = ST_None;
9088
9089     unsigned  XTnum;
9090     EHblkDsc* HBtab;
9091
9092     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9093     {
9094         // Grab the handler offsets
9095
9096         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9097         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9098         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9099         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9100
9101         /* Is this a catch-handler we are CEE_LEAVEing out of?
9102          */
9103
9104         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9105         {
9106             // Can't CEE_LEAVE out of a finally/fault handler
9107             if (HBtab->HasFinallyOrFaultHandler())
9108             {
9109                 BADCODE("leave out of fault/finally block");
9110             }
9111
9112             /* We are jumping out of a catch */
9113
9114             if (step == nullptr)
9115             {
9116                 step             = block;
9117                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9118                 stepType         = ST_Catch;
9119
9120 #ifdef DEBUG
9121                 if (verbose)
9122                 {
9123                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9124                            "block\n",
9125                            XTnum, step->bbNum);
9126                 }
9127 #endif
9128             }
9129             else
9130             {
9131                 BasicBlock* exitBlock;
9132
9133                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9134                  * scope */
9135                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9136
9137                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9138                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9139                                               // exit) returns to this block
9140                 step->bbJumpDest->bbRefs++;
9141
9142 #if defined(_TARGET_ARM_)
9143                 if (stepType == ST_FinallyReturn)
9144                 {
9145                     assert(step->bbJumpKind == BBJ_ALWAYS);
9146                     // Mark the target of a finally return
9147                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9148                 }
9149 #endif // defined(_TARGET_ARM_)
9150
9151                 /* The new block will inherit this block's weight */
9152                 exitBlock->setBBWeight(block->bbWeight);
9153                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9154
9155                 /* This exit block is the new step */
9156                 step     = exitBlock;
9157                 stepType = ST_Catch;
9158
9159                 invalidatePreds = true;
9160
9161 #ifdef DEBUG
9162                 if (verbose)
9163                 {
9164                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9165                            exitBlock->bbNum);
9166                 }
9167 #endif
9168             }
9169         }
9170         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9171                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9172         {
9173             /* We are jumping out of a finally-protected try */
9174
9175             BasicBlock* callBlock;
9176
9177             if (step == nullptr)
9178             {
9179 #if FEATURE_EH_CALLFINALLY_THUNKS
9180
9181                 // Put the call to the finally in the enclosing region.
9182                 unsigned callFinallyTryIndex =
9183                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9184                 unsigned callFinallyHndIndex =
9185                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9186                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9187
9188                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9189                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9190                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9191                 // next block, and flow optimizations will remove it.
9192                 block->bbJumpKind = BBJ_ALWAYS;
9193                 block->bbJumpDest = callBlock;
9194                 block->bbJumpDest->bbRefs++;
9195
9196                 /* The new block will inherit this block's weight */
9197                 callBlock->setBBWeight(block->bbWeight);
9198                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9199
9200 #ifdef DEBUG
9201                 if (verbose)
9202                 {
9203                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9204                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9205                            XTnum, block->bbNum, callBlock->bbNum);
9206                 }
9207 #endif
9208
9209 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9210
9211                 callBlock             = block;
9212                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9213
9214 #ifdef DEBUG
9215                 if (verbose)
9216                 {
9217                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9218                            "BBJ_CALLFINALLY block\n",
9219                            XTnum, callBlock->bbNum);
9220                 }
9221 #endif
9222
9223 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9224             }
9225             else
9226             {
9227                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9228                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9229                 // a 'finally'), or the step block is the return from a catch.
9230                 //
9231                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9232                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9233                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9234                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9235                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9236                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9237                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9238                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9239                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9240                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9241                 // stack walks.)
9242
9243                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9244
9245 #if FEATURE_EH_CALLFINALLY_THUNKS
9246                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9247                 {
9248                     // Need to create another step block in the 'try' region that will actually branch to the
9249                     // call-to-finally thunk.
9250                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9251                     step->bbJumpDest  = step2;
9252                     step->bbJumpDest->bbRefs++;
9253                     step2->setBBWeight(block->bbWeight);
9254                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9255
9256 #ifdef DEBUG
9257                     if (verbose)
9258                     {
9259                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9260                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9261                                XTnum, step->bbNum, step2->bbNum);
9262                     }
9263 #endif
9264
9265                     step = step2;
9266                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9267                 }
9268 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9269
9270 #if FEATURE_EH_CALLFINALLY_THUNKS
9271                 unsigned callFinallyTryIndex =
9272                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9273                 unsigned callFinallyHndIndex =
9274                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9275 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9276                 unsigned callFinallyTryIndex = XTnum + 1;
9277                 unsigned callFinallyHndIndex = 0; // don't care
9278 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9279
9280                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9281                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9282                                               // finally in the chain)
9283                 step->bbJumpDest->bbRefs++;
9284
9285 #if defined(_TARGET_ARM_)
9286                 if (stepType == ST_FinallyReturn)
9287                 {
9288                     assert(step->bbJumpKind == BBJ_ALWAYS);
9289                     // Mark the target of a finally return
9290                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9291                 }
9292 #endif // defined(_TARGET_ARM_)
9293
9294                 /* The new block will inherit this block's weight */
9295                 callBlock->setBBWeight(block->bbWeight);
9296                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9297
9298 #ifdef DEBUG
9299                 if (verbose)
9300                 {
9301                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9302                            "BB%02u\n",
9303                            XTnum, callBlock->bbNum);
9304                 }
9305 #endif
9306             }
9307
9308             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9309             stepType = ST_FinallyReturn;
9310
9311             /* The new block will inherit this block's weight */
9312             step->setBBWeight(block->bbWeight);
9313             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9314
9315 #ifdef DEBUG
9316             if (verbose)
9317             {
9318                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9319                        "block BB%02u\n",
9320                        XTnum, step->bbNum);
9321             }
9322 #endif
9323
9324             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9325
9326             invalidatePreds = true;
9327         }
9328         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9329                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9330         {
9331             // We are jumping out of a catch-protected try.
9332             //
9333             // If we are returning from a call to a finally, then we must have a step block within a try
9334             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9335             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9336             // and invoke the appropriate catch.
9337             //
9338             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9339             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9340             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9341             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9342             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9343             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9344             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9345             // For example:
9346             //
9347             // try {
9348             //    try {
9349             //       // something here raises ThreadAbortException
9350             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9351             //    } catch (Exception) {
9352             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9353             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9354             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9355             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9356             //       // need to do this transformation if the current EH block is a try/catch that catches
9357             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9358             //       // information, so currently we do it for all catch types.
9359             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9360             //    }
9361             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9362             // } catch (ThreadAbortException) {
9363             // }
9364             // LABEL_1:
9365             //
9366             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9367             // compiler.
9368
9369             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9370             {
9371                 BasicBlock* catchStep;
9372
9373                 assert(step);
9374
9375                 if (stepType == ST_FinallyReturn)
9376                 {
9377                     assert(step->bbJumpKind == BBJ_ALWAYS);
9378                 }
9379                 else
9380                 {
9381                     assert(stepType == ST_Catch);
9382                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9383                 }
9384
9385                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9386                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9387                 step->bbJumpDest = catchStep;
9388                 step->bbJumpDest->bbRefs++;
9389
9390 #if defined(_TARGET_ARM_)
9391                 if (stepType == ST_FinallyReturn)
9392                 {
9393                     // Mark the target of a finally return
9394                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9395                 }
9396 #endif // defined(_TARGET_ARM_)
9397
9398                 /* The new block will inherit this block's weight */
9399                 catchStep->setBBWeight(block->bbWeight);
9400                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9401
9402 #ifdef DEBUG
9403                 if (verbose)
9404                 {
9405                     if (stepType == ST_FinallyReturn)
9406                     {
9407                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9408                                "BBJ_ALWAYS block BB%02u\n",
9409                                XTnum, catchStep->bbNum);
9410                     }
9411                     else
9412                     {
9413                         assert(stepType == ST_Catch);
9414                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9415                                "BBJ_ALWAYS block BB%02u\n",
9416                                XTnum, catchStep->bbNum);
9417                     }
9418                 }
9419 #endif // DEBUG
9420
9421                 /* This block is the new step */
9422                 step     = catchStep;
9423                 stepType = ST_Try;
9424
9425                 invalidatePreds = true;
9426             }
9427         }
9428     }
9429
9430     if (step == nullptr)
9431     {
9432         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9433
9434 #ifdef DEBUG
9435         if (verbose)
9436         {
9437             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9438                    "block BB%02u to BBJ_ALWAYS\n",
9439                    block->bbNum);
9440         }
9441 #endif
9442     }
9443     else
9444     {
9445         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9446
9447 #if defined(_TARGET_ARM_)
9448         if (stepType == ST_FinallyReturn)
9449         {
9450             assert(step->bbJumpKind == BBJ_ALWAYS);
9451             // Mark the target of a finally return
9452             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9453         }
9454 #endif // defined(_TARGET_ARM_)
9455
9456 #ifdef DEBUG
9457         if (verbose)
9458         {
9459             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9460         }
9461 #endif
9462
9463         // Queue up the jump target for importing
9464
9465         impImportBlockPending(leaveTarget);
9466     }
9467
9468     if (invalidatePreds && fgComputePredsDone)
9469     {
9470         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9471         fgRemovePreds();
9472     }
9473
9474 #ifdef DEBUG
9475     fgVerifyHandlerTab();
9476
9477     if (verbose)
9478     {
9479         printf("\nAfter import CEE_LEAVE:\n");
9480         fgDispBasicBlocks();
9481         fgDispHandlerTab();
9482     }
9483 #endif // DEBUG
9484 }
9485
9486 #endif // FEATURE_EH_FUNCLETS
9487
9488 /*****************************************************************************/
9489 // This is called when reimporting a leave block. It resets the JumpKind,
9490 // JumpDest, and bbNext to the original values
9491
9492 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9493 {
9494 #if FEATURE_EH_FUNCLETS
9495     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9496     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9497     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9498     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9499     // only predecessor are also considered orphans and attempted to be deleted.
9500     //
9501     //  try  {
9502     //     ....
9503     //     try
9504     //     {
9505     //         ....
9506     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9507     //     } finally { }
9508     //  } finally { }
9509     //  OUTSIDE:
9510     //
9511     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9512     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9513     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9514     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9515     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9516     // will be treated as pair and handled correctly.
9517     if (block->bbJumpKind == BBJ_CALLFINALLY)
9518     {
9519         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9520         dupBlock->bbFlags    = block->bbFlags;
9521         dupBlock->bbJumpDest = block->bbJumpDest;
9522         dupBlock->copyEHRegion(block);
9523         dupBlock->bbCatchTyp = block->bbCatchTyp;
9524
9525         // Mark this block as
9526         //  a) not referenced by any other block to make sure that it gets deleted
9527         //  b) weight zero
9528         //  c) prevent from being imported
9529         //  d) as internal
9530         //  e) as rarely run
9531         dupBlock->bbRefs   = 0;
9532         dupBlock->bbWeight = 0;
9533         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9534
9535         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9536         // will be next to each other.
9537         fgInsertBBafter(block, dupBlock);
9538
9539 #ifdef DEBUG
9540         if (verbose)
9541         {
9542             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9543         }
9544 #endif
9545     }
9546 #endif // FEATURE_EH_FUNCLETS
9547
9548     block->bbJumpKind = BBJ_LEAVE;
9549     fgInitBBLookup();
9550     block->bbJumpDest = fgLookupBB(jmpAddr);
9551
9552     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9553     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9554     // reason we don't want to remove the block at this point is that if we call
9555     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9556     // added and the linked list length will be different than fgBBcount.
9557 }
9558
9559 /*****************************************************************************/
9560 // Get the first non-prefix opcode. Used for verification of valid combinations
9561 // of prefixes and actual opcodes.
9562
9563 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9564 {
9565     while (codeAddr < codeEndp)
9566     {
9567         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9568         codeAddr += sizeof(__int8);
9569
9570         if (opcode == CEE_PREFIX1)
9571         {
9572             if (codeAddr >= codeEndp)
9573             {
9574                 break;
9575             }
9576             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9577             codeAddr += sizeof(__int8);
9578         }
9579
9580         switch (opcode)
9581         {
9582             case CEE_UNALIGNED:
9583             case CEE_VOLATILE:
9584             case CEE_TAILCALL:
9585             case CEE_CONSTRAINED:
9586             case CEE_READONLY:
9587                 break;
9588             default:
9589                 return opcode;
9590         }
9591
9592         codeAddr += opcodeSizes[opcode];
9593     }
9594
9595     return CEE_ILLEGAL;
9596 }
9597
9598 /*****************************************************************************/
9599 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9600
9601 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9602 {
9603     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9604
9605     if (!(
9606             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9607             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9608             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9609             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9610             // volatile. prefix is allowed with the ldsfld and stsfld
9611             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9612     {
9613         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9614     }
9615 }
9616
9617 /*****************************************************************************/
9618
9619 #ifdef DEBUG
9620
9621 #undef RETURN // undef contracts RETURN macro
9622
9623 enum controlFlow_t
9624 {
9625     NEXT,
9626     CALL,
9627     RETURN,
9628     THROW,
9629     BRANCH,
9630     COND_BRANCH,
9631     BREAK,
9632     PHI,
9633     META,
9634 };
9635
9636 const static controlFlow_t controlFlow[] = {
9637 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9638 #include "opcode.def"
9639 #undef OPDEF
9640 };
9641
9642 #endif // DEBUG
9643
9644 /*****************************************************************************
9645  *  Determine the result type of an arithemetic operation
9646  *  On 64-bit inserts upcasts when native int is mixed with int32
9647  */
9648 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9649 {
9650     var_types type = TYP_UNDEF;
9651     GenTree*  op1  = *pOp1;
9652     GenTree*  op2  = *pOp2;
9653
9654     // Arithemetic operations are generally only allowed with
9655     // primitive types, but certain operations are allowed
9656     // with byrefs
9657
9658     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9659     {
9660         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9661         {
9662             // byref1-byref2 => gives a native int
9663             type = TYP_I_IMPL;
9664         }
9665         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9666         {
9667             // [native] int - byref => gives a native int
9668
9669             //
9670             // The reason is that it is possible, in managed C++,
9671             // to have a tree like this:
9672             //
9673             //              -
9674             //             / \
9675             //            /   \
9676             //           /     \
9677             //          /       \
9678             // const(h) int     addr byref
9679             //
9680             // <BUGNUM> VSW 318822 </BUGNUM>
9681             //
9682             // So here we decide to make the resulting type to be a native int.
9683             CLANG_FORMAT_COMMENT_ANCHOR;
9684
9685 #ifdef _TARGET_64BIT_
9686             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9687             {
9688                 // insert an explicit upcast
9689                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9690             }
9691 #endif // _TARGET_64BIT_
9692
9693             type = TYP_I_IMPL;
9694         }
9695         else
9696         {
9697             // byref - [native] int => gives a byref
9698             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9699
9700 #ifdef _TARGET_64BIT_
9701             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9702             {
9703                 // insert an explicit upcast
9704                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9705             }
9706 #endif // _TARGET_64BIT_
9707
9708             type = TYP_BYREF;
9709         }
9710     }
9711     else if ((oper == GT_ADD) &&
9712              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9713     {
9714         // byref + [native] int => gives a byref
9715         // (or)
9716         // [native] int + byref => gives a byref
9717
9718         // only one can be a byref : byref op byref not allowed
9719         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9720         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9721
9722 #ifdef _TARGET_64BIT_
9723         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9724         {
9725             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9726             {
9727                 // insert an explicit upcast
9728                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9729             }
9730         }
9731         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9732         {
9733             // insert an explicit upcast
9734             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9735         }
9736 #endif // _TARGET_64BIT_
9737
9738         type = TYP_BYREF;
9739     }
9740 #ifdef _TARGET_64BIT_
9741     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9742     {
9743         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9744
9745         // int + long => gives long
9746         // long + int => gives long
9747         // we get this because in the IL the long isn't Int64, it's just IntPtr
9748
9749         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9750         {
9751             // insert an explicit upcast
9752             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9753         }
9754         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9755         {
9756             // insert an explicit upcast
9757             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9758         }
9759
9760         type = TYP_I_IMPL;
9761     }
9762 #else  // 32-bit TARGET
9763     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9764     {
9765         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9766
9767         // int + long => gives long
9768         // long + int => gives long
9769
9770         type = TYP_LONG;
9771     }
9772 #endif // _TARGET_64BIT_
9773     else
9774     {
9775         // int + int => gives an int
9776         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9777
9778         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9779                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9780
9781         type = genActualType(op1->gtType);
9782
9783 #if FEATURE_X87_DOUBLES
9784
9785         // For x87, since we only have 1 size of registers, prefer double
9786         // For everybody else, be more precise
9787         if (type == TYP_FLOAT)
9788             type = TYP_DOUBLE;
9789
9790 #else // !FEATURE_X87_DOUBLES
9791
9792         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9793         // Otherwise, turn floats into doubles
9794         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9795         {
9796             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9797             type = TYP_DOUBLE;
9798         }
9799
9800 #endif // FEATURE_X87_DOUBLES
9801     }
9802
9803 #if FEATURE_X87_DOUBLES
9804     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9805 #else  // FEATURE_X87_DOUBLES
9806     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9807 #endif // FEATURE_X87_DOUBLES
9808
9809     return type;
9810 }
9811
9812 //------------------------------------------------------------------------
9813 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9814 //
9815 // Arguments:
9816 //   op1 - value to cast
9817 //   pResolvedToken - resolved token for type to cast to
9818 //   isCastClass - true if this is a castclass, false if isinst
9819 //
9820 // Return Value:
9821 //   tree representing optimized cast, or null if no optimization possible
9822
9823 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9824 {
9825     assert(op1->TypeGet() == TYP_REF);
9826
9827     // Don't optimize for minopts or debug codegen.
9828     if (opts.compDbgCode || opts.MinOpts())
9829     {
9830         return nullptr;
9831     }
9832
9833     // See what we know about the type of the object being cast.
9834     bool                 isExact   = false;
9835     bool                 isNonNull = false;
9836     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9837     GenTree*             optResult = nullptr;
9838
9839     if (fromClass != nullptr)
9840     {
9841         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9842         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9843                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9844                 info.compCompHnd->getClassName(toClass));
9845
9846         // Perhaps we know if the cast will succeed or fail.
9847         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9848
9849         if (castResult == TypeCompareState::Must)
9850         {
9851             // Cast will succeed, result is simply op1.
9852             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9853             return op1;
9854         }
9855         else if (castResult == TypeCompareState::MustNot)
9856         {
9857             // See if we can sharpen exactness by looking for final classes
9858             if (!isExact)
9859             {
9860                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9861                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9862                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9863                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9864             }
9865
9866             // Cast to exact type will fail. Handle case where we have
9867             // an exact type (that is, fromClass is not a subtype)
9868             // and we're not going to throw on failure.
9869             if (isExact && !isCastClass)
9870             {
9871                 JITDUMP("Cast will fail, optimizing to return null\n");
9872                 GenTree* result = gtNewIconNode(0, TYP_REF);
9873
9874                 // If the cast was fed by a box, we can remove that too.
9875                 if (op1->IsBoxedValue())
9876                 {
9877                     JITDUMP("Also removing upstream box\n");
9878                     gtTryRemoveBoxUpstreamEffects(op1);
9879                 }
9880
9881                 return result;
9882             }
9883             else if (isExact)
9884             {
9885                 JITDUMP("Not optimizing failing castclass (yet)\n");
9886             }
9887             else
9888             {
9889                 JITDUMP("Can't optimize since fromClass is inexact\n");
9890             }
9891         }
9892         else
9893         {
9894             JITDUMP("Result of cast unknown, must generate runtime test\n");
9895         }
9896     }
9897     else
9898     {
9899         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9900     }
9901
9902     return nullptr;
9903 }
9904
9905 //------------------------------------------------------------------------
9906 // impCastClassOrIsInstToTree: build and import castclass/isinst
9907 //
9908 // Arguments:
9909 //   op1 - value to cast
9910 //   op2 - type handle for type to cast to
9911 //   pResolvedToken - resolved token from the cast operation
9912 //   isCastClass - true if this is castclass, false means isinst
9913 //
9914 // Return Value:
9915 //   Tree representing the cast
9916 //
9917 // Notes:
9918 //   May expand into a series of runtime checks or a helper call.
9919
9920 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
9921                                               GenTree*                op2,
9922                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
9923                                               bool                    isCastClass)
9924 {
9925     assert(op1->TypeGet() == TYP_REF);
9926
9927     // Optimistically assume the jit should expand this as an inline test
9928     bool shouldExpandInline = true;
9929
9930     // Profitability check.
9931     //
9932     // Don't bother with inline expansion when jit is trying to
9933     // generate code quickly, or the cast is in code that won't run very
9934     // often, or the method already is pretty big.
9935     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9936     {
9937         // not worth the code expansion if jitting fast or in a rarely run block
9938         shouldExpandInline = false;
9939     }
9940     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9941     {
9942         // not worth creating an untracked local variable
9943         shouldExpandInline = false;
9944     }
9945
9946     // Pessimistically assume the jit cannot expand this as an inline test
9947     bool                  canExpandInline = false;
9948     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9949
9950     // Legality check.
9951     //
9952     // Not all classclass/isinst operations can be inline expanded.
9953     // Check legality only if an inline expansion is desirable.
9954     if (shouldExpandInline)
9955     {
9956         if (isCastClass)
9957         {
9958             // Jit can only inline expand the normal CHKCASTCLASS helper.
9959             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9960         }
9961         else
9962         {
9963             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9964             {
9965                 // Check the class attributes.
9966                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9967
9968                 // If the class is final and is not marshal byref or
9969                 // contextful, the jit can expand the IsInst check inline.
9970                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9971                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9972             }
9973         }
9974     }
9975
9976     const bool expandInline = canExpandInline && shouldExpandInline;
9977
9978     if (!expandInline)
9979     {
9980         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9981                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9982
9983         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9984         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9985         //
9986         op2->gtFlags |= GTF_DONT_CSE;
9987
9988         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9989     }
9990
9991     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9992
9993     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9994
9995     GenTree* temp;
9996     GenTree* condMT;
9997     //
9998     // expand the methodtable match:
9999     //
10000     //  condMT ==>   GT_NE
10001     //               /    \
10002     //           GT_IND   op2 (typically CNS_INT)
10003     //              |
10004     //           op1Copy
10005     //
10006
10007     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10008     //
10009     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10010     //
10011     // op1 is now known to be a non-complex tree
10012     // thus we can use gtClone(op1) from now on
10013     //
10014
10015     GenTree* op2Var = op2;
10016     if (isCastClass)
10017     {
10018         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10019         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10020     }
10021     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10022     temp->gtFlags |= GTF_EXCEPT;
10023     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10024
10025     GenTree* condNull;
10026     //
10027     // expand the null check:
10028     //
10029     //  condNull ==>   GT_EQ
10030     //                 /    \
10031     //             op1Copy CNS_INT
10032     //                      null
10033     //
10034     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10035
10036     //
10037     // expand the true and false trees for the condMT
10038     //
10039     GenTree* condFalse = gtClone(op1);
10040     GenTree* condTrue;
10041     if (isCastClass)
10042     {
10043         //
10044         // use the special helper that skips the cases checked by our inlined cast
10045         //
10046         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10047
10048         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10049     }
10050     else
10051     {
10052         condTrue = gtNewIconNode(0, TYP_REF);
10053     }
10054
10055 #define USE_QMARK_TREES
10056
10057 #ifdef USE_QMARK_TREES
10058     GenTree* qmarkMT;
10059     //
10060     // Generate first QMARK - COLON tree
10061     //
10062     //  qmarkMT ==>   GT_QMARK
10063     //                 /     \
10064     //            condMT   GT_COLON
10065     //                      /     \
10066     //                condFalse  condTrue
10067     //
10068     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10069     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10070     condMT->gtFlags |= GTF_RELOP_QMARK;
10071
10072     GenTree* qmarkNull;
10073     //
10074     // Generate second QMARK - COLON tree
10075     //
10076     //  qmarkNull ==>  GT_QMARK
10077     //                 /     \
10078     //           condNull  GT_COLON
10079     //                      /     \
10080     //                qmarkMT   op1Copy
10081     //
10082     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10083     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10084     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10085     condNull->gtFlags |= GTF_RELOP_QMARK;
10086
10087     // Make QMark node a top level node by spilling it.
10088     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10089     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10090
10091     // TODO: Is it possible op1 has a better type?
10092     lvaSetClass(tmp, pResolvedToken->hClass);
10093     return gtNewLclvNode(tmp, TYP_REF);
10094 #endif
10095 }
10096
10097 #ifndef DEBUG
10098 #define assertImp(cond) ((void)0)
10099 #else
10100 #define assertImp(cond)                                                                                                \
10101     do                                                                                                                 \
10102     {                                                                                                                  \
10103         if (!(cond))                                                                                                   \
10104         {                                                                                                              \
10105             const int cchAssertImpBuf = 600;                                                                           \
10106             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10107             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10108                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10109                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10110                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10111             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10112         }                                                                                                              \
10113     } while (0)
10114 #endif // DEBUG
10115
10116 #ifdef _PREFAST_
10117 #pragma warning(push)
10118 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10119 #endif
10120 /*****************************************************************************
10121  *  Import the instr for the given basic block
10122  */
10123 void Compiler::impImportBlockCode(BasicBlock* block)
10124 {
10125 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10126
10127 #ifdef DEBUG
10128
10129     if (verbose)
10130     {
10131         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10132     }
10133 #endif
10134
10135     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10136     IL_OFFSET nxtStmtOffs;
10137
10138     GenTree*                     arrayNodeFrom;
10139     GenTree*                     arrayNodeTo;
10140     GenTree*                     arrayNodeToIndex;
10141     CorInfoHelpFunc              helper;
10142     CorInfoIsAccessAllowedResult accessAllowedResult;
10143     CORINFO_HELPER_DESC          calloutHelper;
10144     const BYTE*                  lastLoadToken = nullptr;
10145
10146     // reject cyclic constraints
10147     if (tiVerificationNeeded)
10148     {
10149         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10150         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10151     }
10152
10153     /* Get the tree list started */
10154
10155     impBeginTreeList();
10156
10157     /* Walk the opcodes that comprise the basic block */
10158
10159     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10160     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10161
10162     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10163     IL_OFFSET lastSpillOffs = opcodeOffs;
10164
10165     signed jmpDist;
10166
10167     /* remember the start of the delegate creation sequence (used for verification) */
10168     const BYTE* delegateCreateStart = nullptr;
10169
10170     int  prefixFlags = 0;
10171     bool explicitTailCall, constraintCall, readonlyCall;
10172
10173     typeInfo tiRetVal;
10174
10175     unsigned numArgs = info.compArgsCount;
10176
10177     /* Now process all the opcodes in the block */
10178
10179     var_types callTyp    = TYP_COUNT;
10180     OPCODE    prevOpcode = CEE_ILLEGAL;
10181
10182     if (block->bbCatchTyp)
10183     {
10184         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10185         {
10186             impCurStmtOffsSet(block->bbCodeOffs);
10187         }
10188
10189         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10190         // to a temp. This is a trade off for code simplicity
10191         impSpillSpecialSideEff();
10192     }
10193
10194     while (codeAddr < codeEndp)
10195     {
10196         bool                   usingReadyToRunHelper = false;
10197         CORINFO_RESOLVED_TOKEN resolvedToken;
10198         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10199         CORINFO_CALL_INFO      callInfo;
10200         CORINFO_FIELD_INFO     fieldInfo;
10201
10202         tiRetVal = typeInfo(); // Default type info
10203
10204         //---------------------------------------------------------------------
10205
10206         /* We need to restrict the max tree depth as many of the Compiler
10207            functions are recursive. We do this by spilling the stack */
10208
10209         if (verCurrentState.esStackDepth)
10210         {
10211             /* Has it been a while since we last saw a non-empty stack (which
10212                guarantees that the tree depth isnt accumulating. */
10213
10214             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10215             {
10216                 impSpillStackEnsure();
10217                 lastSpillOffs = opcodeOffs;
10218             }
10219         }
10220         else
10221         {
10222             lastSpillOffs   = opcodeOffs;
10223             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10224         }
10225
10226         /* Compute the current instr offset */
10227
10228         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10229
10230 #ifndef DEBUG
10231         if (opts.compDbgInfo)
10232 #endif
10233         {
10234             if (!compIsForInlining())
10235             {
10236                 nxtStmtOffs =
10237                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10238
10239                 /* Have we reached the next stmt boundary ? */
10240
10241                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10242                 {
10243                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10244
10245                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10246                     {
10247                         /* We need to provide accurate IP-mapping at this point.
10248                            So spill anything on the stack so that it will form
10249                            gtStmts with the correct stmt offset noted */
10250
10251                         impSpillStackEnsure(true);
10252                     }
10253
10254                     // Has impCurStmtOffs been reported in any tree?
10255
10256                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10257                     {
10258                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10259                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10260
10261                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10262                     }
10263
10264                     if (impCurStmtOffs == BAD_IL_OFFSET)
10265                     {
10266                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10267                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10268
10269                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10270                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10271                         {
10272                             nxtStmtIndex++;
10273                         }
10274
10275                         /* Go to the new stmt */
10276
10277                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10278
10279                         /* Update the stmt boundary index */
10280
10281                         nxtStmtIndex++;
10282                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10283
10284                         /* Are there any more line# entries after this one? */
10285
10286                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10287                         {
10288                             /* Remember where the next line# starts */
10289
10290                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10291                         }
10292                         else
10293                         {
10294                             /* No more line# entries */
10295
10296                             nxtStmtOffs = BAD_IL_OFFSET;
10297                         }
10298                     }
10299                 }
10300                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10301                          (verCurrentState.esStackDepth == 0))
10302                 {
10303                     /* At stack-empty locations, we have already added the tree to
10304                        the stmt list with the last offset. We just need to update
10305                        impCurStmtOffs
10306                      */
10307
10308                     impCurStmtOffsSet(opcodeOffs);
10309                 }
10310                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10311                          impOpcodeIsCallSiteBoundary(prevOpcode))
10312                 {
10313                     /* Make sure we have a type cached */
10314                     assert(callTyp != TYP_COUNT);
10315
10316                     if (callTyp == TYP_VOID)
10317                     {
10318                         impCurStmtOffsSet(opcodeOffs);
10319                     }
10320                     else if (opts.compDbgCode)
10321                     {
10322                         impSpillStackEnsure(true);
10323                         impCurStmtOffsSet(opcodeOffs);
10324                     }
10325                 }
10326                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10327                 {
10328                     if (opts.compDbgCode)
10329                     {
10330                         impSpillStackEnsure(true);
10331                     }
10332
10333                     impCurStmtOffsSet(opcodeOffs);
10334                 }
10335
10336                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10337                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10338             }
10339         }
10340
10341         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10342         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10343         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10344
10345         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10346         GenTree*        op1           = DUMMY_INIT(NULL);
10347         GenTree*        op2           = DUMMY_INIT(NULL);
10348         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10349         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10350         bool            uns           = DUMMY_INIT(false);
10351         bool            isLocal       = false;
10352
10353         /* Get the next opcode and the size of its parameters */
10354
10355         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10356         codeAddr += sizeof(__int8);
10357
10358 #ifdef DEBUG
10359         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10360         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10361 #endif
10362
10363     DECODE_OPCODE:
10364
10365         // Return if any previous code has caused inline to fail.
10366         if (compDonotInline())
10367         {
10368             return;
10369         }
10370
10371         /* Get the size of additional parameters */
10372
10373         signed int sz = opcodeSizes[opcode];
10374
10375 #ifdef DEBUG
10376         clsHnd  = NO_CLASS_HANDLE;
10377         lclTyp  = TYP_COUNT;
10378         callTyp = TYP_COUNT;
10379
10380         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10381         impCurOpcName = opcodeNames[opcode];
10382
10383         if (verbose && (opcode != CEE_PREFIX1))
10384         {
10385             printf("%s", impCurOpcName);
10386         }
10387
10388         /* Use assertImp() to display the opcode */
10389
10390         op1 = op2 = nullptr;
10391 #endif
10392
10393         /* See what kind of an opcode we have, then */
10394
10395         unsigned mflags   = 0;
10396         unsigned clsFlags = 0;
10397
10398         switch (opcode)
10399         {
10400             unsigned  lclNum;
10401             var_types type;
10402
10403             GenTree*   op3;
10404             genTreeOps oper;
10405             unsigned   size;
10406
10407             int val;
10408
10409             CORINFO_SIG_INFO     sig;
10410             IL_OFFSET            jmpAddr;
10411             bool                 ovfl, unordered, callNode;
10412             bool                 ldstruct;
10413             CORINFO_CLASS_HANDLE tokenType;
10414
10415             union {
10416                 int     intVal;
10417                 float   fltVal;
10418                 __int64 lngVal;
10419                 double  dblVal;
10420             } cval;
10421
10422             case CEE_PREFIX1:
10423                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10424                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10425                 codeAddr += sizeof(__int8);
10426                 goto DECODE_OPCODE;
10427
10428             SPILL_APPEND:
10429
10430                 // We need to call impSpillLclRefs() for a struct type lclVar.
10431                 // This is done for non-block assignments in the handling of stloc.
10432                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10433                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10434                 {
10435                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10436                 }
10437
10438                 /* Append 'op1' to the list of statements */
10439                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10440                 goto DONE_APPEND;
10441
10442             APPEND:
10443
10444                 /* Append 'op1' to the list of statements */
10445
10446                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10447                 goto DONE_APPEND;
10448
10449             DONE_APPEND:
10450
10451 #ifdef DEBUG
10452                 // Remember at which BC offset the tree was finished
10453                 impNoteLastILoffs();
10454 #endif
10455                 break;
10456
10457             case CEE_LDNULL:
10458                 impPushNullObjRefOnStack();
10459                 break;
10460
10461             case CEE_LDC_I4_M1:
10462             case CEE_LDC_I4_0:
10463             case CEE_LDC_I4_1:
10464             case CEE_LDC_I4_2:
10465             case CEE_LDC_I4_3:
10466             case CEE_LDC_I4_4:
10467             case CEE_LDC_I4_5:
10468             case CEE_LDC_I4_6:
10469             case CEE_LDC_I4_7:
10470             case CEE_LDC_I4_8:
10471                 cval.intVal = (opcode - CEE_LDC_I4_0);
10472                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10473                 goto PUSH_I4CON;
10474
10475             case CEE_LDC_I4_S:
10476                 cval.intVal = getI1LittleEndian(codeAddr);
10477                 goto PUSH_I4CON;
10478             case CEE_LDC_I4:
10479                 cval.intVal = getI4LittleEndian(codeAddr);
10480                 goto PUSH_I4CON;
10481             PUSH_I4CON:
10482                 JITDUMP(" %d", cval.intVal);
10483                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10484                 break;
10485
10486             case CEE_LDC_I8:
10487                 cval.lngVal = getI8LittleEndian(codeAddr);
10488                 JITDUMP(" 0x%016llx", cval.lngVal);
10489                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10490                 break;
10491
10492             case CEE_LDC_R8:
10493                 cval.dblVal = getR8LittleEndian(codeAddr);
10494                 JITDUMP(" %#.17g", cval.dblVal);
10495                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10496                 break;
10497
10498             case CEE_LDC_R4:
10499                 cval.dblVal = getR4LittleEndian(codeAddr);
10500                 JITDUMP(" %#.17g", cval.dblVal);
10501                 {
10502                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10503 #if !FEATURE_X87_DOUBLES
10504                     // X87 stack doesn't differentiate between float/double
10505                     // so R4 is treated as R8, but everybody else does
10506                     cnsOp->gtType = TYP_FLOAT;
10507 #endif // FEATURE_X87_DOUBLES
10508                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10509                 }
10510                 break;
10511
10512             case CEE_LDSTR:
10513
10514                 if (compIsForInlining())
10515                 {
10516                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10517                     {
10518                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10519                         return;
10520                     }
10521                 }
10522
10523                 val = getU4LittleEndian(codeAddr);
10524                 JITDUMP(" %08X", val);
10525                 if (tiVerificationNeeded)
10526                 {
10527                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10528                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10529                 }
10530                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10531
10532                 break;
10533
10534             case CEE_LDARG:
10535                 lclNum = getU2LittleEndian(codeAddr);
10536                 JITDUMP(" %u", lclNum);
10537                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10538                 break;
10539
10540             case CEE_LDARG_S:
10541                 lclNum = getU1LittleEndian(codeAddr);
10542                 JITDUMP(" %u", lclNum);
10543                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10544                 break;
10545
10546             case CEE_LDARG_0:
10547             case CEE_LDARG_1:
10548             case CEE_LDARG_2:
10549             case CEE_LDARG_3:
10550                 lclNum = (opcode - CEE_LDARG_0);
10551                 assert(lclNum >= 0 && lclNum < 4);
10552                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10553                 break;
10554
10555             case CEE_LDLOC:
10556                 lclNum = getU2LittleEndian(codeAddr);
10557                 JITDUMP(" %u", lclNum);
10558                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10559                 break;
10560
10561             case CEE_LDLOC_S:
10562                 lclNum = getU1LittleEndian(codeAddr);
10563                 JITDUMP(" %u", lclNum);
10564                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10565                 break;
10566
10567             case CEE_LDLOC_0:
10568             case CEE_LDLOC_1:
10569             case CEE_LDLOC_2:
10570             case CEE_LDLOC_3:
10571                 lclNum = (opcode - CEE_LDLOC_0);
10572                 assert(lclNum >= 0 && lclNum < 4);
10573                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10574                 break;
10575
10576             case CEE_STARG:
10577                 lclNum = getU2LittleEndian(codeAddr);
10578                 goto STARG;
10579
10580             case CEE_STARG_S:
10581                 lclNum = getU1LittleEndian(codeAddr);
10582             STARG:
10583                 JITDUMP(" %u", lclNum);
10584
10585                 if (tiVerificationNeeded)
10586                 {
10587                     Verify(lclNum < info.compILargsCount, "bad arg num");
10588                 }
10589
10590                 if (compIsForInlining())
10591                 {
10592                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10593                     noway_assert(op1->gtOper == GT_LCL_VAR);
10594                     lclNum = op1->AsLclVar()->gtLclNum;
10595
10596                     goto VAR_ST_VALID;
10597                 }
10598
10599                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10600                 assertImp(lclNum < numArgs);
10601
10602                 if (lclNum == info.compThisArg)
10603                 {
10604                     lclNum = lvaArg0Var;
10605                 }
10606
10607                 // We should have seen this arg write in the prescan
10608                 assert(lvaTable[lclNum].lvHasILStoreOp);
10609
10610                 if (tiVerificationNeeded)
10611                 {
10612                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10613                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10614                            "type mismatch");
10615
10616                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10617                     {
10618                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10619                     }
10620                 }
10621
10622                 goto VAR_ST;
10623
10624             case CEE_STLOC:
10625                 lclNum  = getU2LittleEndian(codeAddr);
10626                 isLocal = true;
10627                 JITDUMP(" %u", lclNum);
10628                 goto LOC_ST;
10629
10630             case CEE_STLOC_S:
10631                 lclNum  = getU1LittleEndian(codeAddr);
10632                 isLocal = true;
10633                 JITDUMP(" %u", lclNum);
10634                 goto LOC_ST;
10635
10636             case CEE_STLOC_0:
10637             case CEE_STLOC_1:
10638             case CEE_STLOC_2:
10639             case CEE_STLOC_3:
10640                 isLocal = true;
10641                 lclNum  = (opcode - CEE_STLOC_0);
10642                 assert(lclNum >= 0 && lclNum < 4);
10643
10644             LOC_ST:
10645                 if (tiVerificationNeeded)
10646                 {
10647                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10648                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10649                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10650                            "type mismatch");
10651                 }
10652
10653                 if (compIsForInlining())
10654                 {
10655                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10656
10657                     /* Have we allocated a temp for this local? */
10658
10659                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10660
10661                     goto _PopValue;
10662                 }
10663
10664                 lclNum += numArgs;
10665
10666             VAR_ST:
10667
10668                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10669                 {
10670                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10671                     BADCODE("Bad IL");
10672                 }
10673
10674             VAR_ST_VALID:
10675
10676                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10677                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10678
10679                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10680                 {
10681                     lclTyp = lvaGetRealType(lclNum);
10682                 }
10683                 else
10684                 {
10685                     lclTyp = lvaGetActualType(lclNum);
10686                 }
10687
10688             _PopValue:
10689                 /* Pop the value being assigned */
10690
10691                 {
10692                     StackEntry se = impPopStack();
10693                     clsHnd        = se.seTypeInfo.GetClassHandle();
10694                     op1           = se.val;
10695                     tiRetVal      = se.seTypeInfo;
10696                 }
10697
10698 #ifdef FEATURE_SIMD
10699                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10700                 {
10701                     assert(op1->TypeGet() == TYP_STRUCT);
10702                     op1->gtType = lclTyp;
10703                 }
10704 #endif // FEATURE_SIMD
10705
10706                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10707
10708 #ifdef _TARGET_64BIT_
10709                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10710                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10711                 {
10712                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10713                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10714                 }
10715 #endif // _TARGET_64BIT_
10716
10717                 // We had better assign it a value of the correct type
10718                 assertImp(
10719                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10720                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10721                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10722                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10723                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10724                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10725
10726                 /* If op1 is "&var" then its type is the transient "*" and it can
10727                    be used either as TYP_BYREF or TYP_I_IMPL */
10728
10729                 if (op1->IsVarAddr())
10730                 {
10731                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10732
10733                     /* When "&var" is created, we assume it is a byref. If it is
10734                        being assigned to a TYP_I_IMPL var, change the type to
10735                        prevent unnecessary GC info */
10736
10737                     if (genActualType(lclTyp) == TYP_I_IMPL)
10738                     {
10739                         op1->gtType = TYP_I_IMPL;
10740                     }
10741                 }
10742
10743                 // If this is a local and the local is a ref type, see
10744                 // if we can improve type information based on the
10745                 // value being assigned.
10746                 if (isLocal && (lclTyp == TYP_REF))
10747                 {
10748                     // We should have seen a stloc in our IL prescan.
10749                     assert(lvaTable[lclNum].lvHasILStoreOp);
10750
10751                     const bool isSingleILStoreLocal =
10752                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10753
10754                     // Conservative check that there is just one
10755                     // definition that reaches this store.
10756                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10757
10758                     if (isSingleILStoreLocal && hasSingleReachingDef)
10759                     {
10760                         lvaUpdateClass(lclNum, op1, clsHnd);
10761                     }
10762                 }
10763
10764                 /* Filter out simple assignments to itself */
10765
10766                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10767                 {
10768                     if (opts.compDbgCode)
10769                     {
10770                         op1 = gtNewNothingNode();
10771                         goto SPILL_APPEND;
10772                     }
10773                     else
10774                     {
10775                         break;
10776                     }
10777                 }
10778
10779                 /* Create the assignment node */
10780
10781                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10782
10783                 /* If the local is aliased or pinned, we need to spill calls and
10784                    indirections from the stack. */
10785
10786                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10787                     (verCurrentState.esStackDepth > 0))
10788                 {
10789                     impSpillSideEffects(false,
10790                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10791                 }
10792
10793                 /* Spill any refs to the local from the stack */
10794
10795                 impSpillLclRefs(lclNum);
10796
10797 #if !FEATURE_X87_DOUBLES
10798                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10799                 // We insert a cast to the dest 'op2' type
10800                 //
10801                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10802                     varTypeIsFloating(op2->gtType))
10803                 {
10804                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10805                 }
10806 #endif // !FEATURE_X87_DOUBLES
10807
10808                 if (varTypeIsStruct(lclTyp))
10809                 {
10810                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10811                 }
10812                 else
10813                 {
10814                     // The code generator generates GC tracking information
10815                     // based on the RHS of the assignment.  Later the LHS (which is
10816                     // is a BYREF) gets used and the emitter checks that that variable
10817                     // is being tracked.  It is not (since the RHS was an int and did
10818                     // not need tracking).  To keep this assert happy, we change the RHS
10819                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10820                     {
10821                         op1->gtType = TYP_BYREF;
10822                     }
10823                     op1 = gtNewAssignNode(op2, op1);
10824                 }
10825
10826                 goto SPILL_APPEND;
10827
10828             case CEE_LDLOCA:
10829                 lclNum = getU2LittleEndian(codeAddr);
10830                 goto LDLOCA;
10831
10832             case CEE_LDLOCA_S:
10833                 lclNum = getU1LittleEndian(codeAddr);
10834             LDLOCA:
10835                 JITDUMP(" %u", lclNum);
10836                 if (tiVerificationNeeded)
10837                 {
10838                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10839                     Verify(info.compInitMem, "initLocals not set");
10840                 }
10841
10842                 if (compIsForInlining())
10843                 {
10844                     // Get the local type
10845                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10846
10847                     /* Have we allocated a temp for this local? */
10848
10849                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10850
10851                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10852
10853                     goto _PUSH_ADRVAR;
10854                 }
10855
10856                 lclNum += numArgs;
10857                 assertImp(lclNum < info.compLocalsCount);
10858                 goto ADRVAR;
10859
10860             case CEE_LDARGA:
10861                 lclNum = getU2LittleEndian(codeAddr);
10862                 goto LDARGA;
10863
10864             case CEE_LDARGA_S:
10865                 lclNum = getU1LittleEndian(codeAddr);
10866             LDARGA:
10867                 JITDUMP(" %u", lclNum);
10868                 Verify(lclNum < info.compILargsCount, "bad arg num");
10869
10870                 if (compIsForInlining())
10871                 {
10872                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10873                     // followed by a ldfld to load the field.
10874
10875                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10876                     if (op1->gtOper != GT_LCL_VAR)
10877                     {
10878                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10879                         return;
10880                     }
10881
10882                     assert(op1->gtOper == GT_LCL_VAR);
10883
10884                     goto _PUSH_ADRVAR;
10885                 }
10886
10887                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10888                 assertImp(lclNum < numArgs);
10889
10890                 if (lclNum == info.compThisArg)
10891                 {
10892                     lclNum = lvaArg0Var;
10893                 }
10894
10895                 goto ADRVAR;
10896
10897             ADRVAR:
10898
10899                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10900
10901             _PUSH_ADRVAR:
10902                 assert(op1->gtOper == GT_LCL_VAR);
10903
10904                 /* Note that this is supposed to create the transient type "*"
10905                    which may be used as a TYP_I_IMPL. However we catch places
10906                    where it is used as a TYP_I_IMPL and change the node if needed.
10907                    Thus we are pessimistic and may report byrefs in the GC info
10908                    where it was not absolutely needed, but it is safer this way.
10909                  */
10910                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10911
10912                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10913                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10914
10915                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10916                 if (tiVerificationNeeded)
10917                 {
10918                     // Don't allow taking address of uninit this ptr.
10919                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10920                     {
10921                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10922                     }
10923
10924                     if (!tiRetVal.IsByRef())
10925                     {
10926                         tiRetVal.MakeByRef();
10927                     }
10928                     else
10929                     {
10930                         Verify(false, "byref to byref");
10931                     }
10932                 }
10933
10934                 impPushOnStack(op1, tiRetVal);
10935                 break;
10936
10937             case CEE_ARGLIST:
10938
10939                 if (!info.compIsVarArgs)
10940                 {
10941                     BADCODE("arglist in non-vararg method");
10942                 }
10943
10944                 if (tiVerificationNeeded)
10945                 {
10946                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10947                 }
10948                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10949
10950                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10951                    adjusted the arg count cos this is like fetching the last param */
10952                 assertImp(0 < numArgs);
10953                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10954                 lclNum = lvaVarargsHandleArg;
10955                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10956                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10957                 impPushOnStack(op1, tiRetVal);
10958                 break;
10959
10960             case CEE_ENDFINALLY:
10961
10962                 if (compIsForInlining())
10963                 {
10964                     assert(!"Shouldn't have exception handlers in the inliner!");
10965                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10966                     return;
10967                 }
10968
10969                 if (verCurrentState.esStackDepth > 0)
10970                 {
10971                     impEvalSideEffects();
10972                 }
10973
10974                 if (info.compXcptnsCount == 0)
10975                 {
10976                     BADCODE("endfinally outside finally");
10977                 }
10978
10979                 assert(verCurrentState.esStackDepth == 0);
10980
10981                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10982                 goto APPEND;
10983
10984             case CEE_ENDFILTER:
10985
10986                 if (compIsForInlining())
10987                 {
10988                     assert(!"Shouldn't have exception handlers in the inliner!");
10989                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10990                     return;
10991                 }
10992
10993                 block->bbSetRunRarely(); // filters are rare
10994
10995                 if (info.compXcptnsCount == 0)
10996                 {
10997                     BADCODE("endfilter outside filter");
10998                 }
10999
11000                 if (tiVerificationNeeded)
11001                 {
11002                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11003                 }
11004
11005                 op1 = impPopStack().val;
11006                 assertImp(op1->gtType == TYP_INT);
11007                 if (!bbInFilterILRange(block))
11008                 {
11009                     BADCODE("EndFilter outside a filter handler");
11010                 }
11011
11012                 /* Mark current bb as end of filter */
11013
11014                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11015                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11016
11017                 /* Mark catch handler as successor */
11018
11019                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11020                 if (verCurrentState.esStackDepth != 0)
11021                 {
11022                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11023                                                 DEBUGARG(__LINE__));
11024                 }
11025                 goto APPEND;
11026
11027             case CEE_RET:
11028                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11029             RET:
11030                 if (!impReturnInstruction(block, prefixFlags, opcode))
11031                 {
11032                     return; // abort
11033                 }
11034                 else
11035                 {
11036                     break;
11037                 }
11038
11039             case CEE_JMP:
11040
11041                 assert(!compIsForInlining());
11042
11043                 if (tiVerificationNeeded)
11044                 {
11045                     Verify(false, "Invalid opcode: CEE_JMP");
11046                 }
11047
11048                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11049                 {
11050                     /* CEE_JMP does not make sense in some "protected" regions. */
11051
11052                     BADCODE("Jmp not allowed in protected region");
11053                 }
11054
11055                 if (verCurrentState.esStackDepth != 0)
11056                 {
11057                     BADCODE("Stack must be empty after CEE_JMPs");
11058                 }
11059
11060                 _impResolveToken(CORINFO_TOKENKIND_Method);
11061
11062                 JITDUMP(" %08X", resolvedToken.token);
11063
11064                 /* The signature of the target has to be identical to ours.
11065                    At least check that argCnt and returnType match */
11066
11067                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11068                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11069                     sig.retType != info.compMethodInfo->args.retType ||
11070                     sig.callConv != info.compMethodInfo->args.callConv)
11071                 {
11072                     BADCODE("Incompatible target for CEE_JMPs");
11073                 }
11074
11075                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11076
11077                 /* Mark the basic block as being a JUMP instead of RETURN */
11078
11079                 block->bbFlags |= BBF_HAS_JMP;
11080
11081                 /* Set this flag to make sure register arguments have a location assigned
11082                  * even if we don't use them inside the method */
11083
11084                 compJmpOpUsed = true;
11085
11086                 fgNoStructPromotion = true;
11087
11088                 goto APPEND;
11089
11090             case CEE_LDELEMA:
11091                 assertImp(sz == sizeof(unsigned));
11092
11093                 _impResolveToken(CORINFO_TOKENKIND_Class);
11094
11095                 JITDUMP(" %08X", resolvedToken.token);
11096
11097                 ldelemClsHnd = resolvedToken.hClass;
11098
11099                 if (tiVerificationNeeded)
11100                 {
11101                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11102                     typeInfo tiIndex = impStackTop().seTypeInfo;
11103
11104                     // As per ECMA 'index' specified can be either int32 or native int.
11105                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11106
11107                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11108                     Verify(tiArray.IsNullObjRef() ||
11109                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11110                            "bad array");
11111
11112                     tiRetVal = arrayElemType;
11113                     tiRetVal.MakeByRef();
11114                     if (prefixFlags & PREFIX_READONLY)
11115                     {
11116                         tiRetVal.SetIsReadonlyByRef();
11117                     }
11118
11119                     // an array interior pointer is always in the heap
11120                     tiRetVal.SetIsPermanentHomeByRef();
11121                 }
11122
11123                 // If it's a value class array we just do a simple address-of
11124                 if (eeIsValueClass(ldelemClsHnd))
11125                 {
11126                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11127                     if (cit == CORINFO_TYPE_UNDEF)
11128                     {
11129                         lclTyp = TYP_STRUCT;
11130                     }
11131                     else
11132                     {
11133                         lclTyp = JITtype2varType(cit);
11134                     }
11135                     goto ARR_LD_POST_VERIFY;
11136                 }
11137
11138                 // Similarly, if its a readonly access, we can do a simple address-of
11139                 // without doing a runtime type-check
11140                 if (prefixFlags & PREFIX_READONLY)
11141                 {
11142                     lclTyp = TYP_REF;
11143                     goto ARR_LD_POST_VERIFY;
11144                 }
11145
11146                 // Otherwise we need the full helper function with run-time type check
11147                 op1 = impTokenToHandle(&resolvedToken);
11148                 if (op1 == nullptr)
11149                 { // compDonotInline()
11150                     return;
11151                 }
11152
11153                 args = gtNewArgList(op1);                      // Type
11154                 args = gtNewListNode(impPopStack().val, args); // index
11155                 args = gtNewListNode(impPopStack().val, args); // array
11156                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11157
11158                 impPushOnStack(op1, tiRetVal);
11159                 break;
11160
11161             // ldelem for reference and value types
11162             case CEE_LDELEM:
11163                 assertImp(sz == sizeof(unsigned));
11164
11165                 _impResolveToken(CORINFO_TOKENKIND_Class);
11166
11167                 JITDUMP(" %08X", resolvedToken.token);
11168
11169                 ldelemClsHnd = resolvedToken.hClass;
11170
11171                 if (tiVerificationNeeded)
11172                 {
11173                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11174                     typeInfo tiIndex = impStackTop().seTypeInfo;
11175
11176                     // As per ECMA 'index' specified can be either int32 or native int.
11177                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11178                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11179
11180                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11181                            "type of array incompatible with type operand");
11182                     tiRetVal.NormaliseForStack();
11183                 }
11184
11185                 // If it's a reference type or generic variable type
11186                 // then just generate code as though it's a ldelem.ref instruction
11187                 if (!eeIsValueClass(ldelemClsHnd))
11188                 {
11189                     lclTyp = TYP_REF;
11190                     opcode = CEE_LDELEM_REF;
11191                 }
11192                 else
11193                 {
11194                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11195                     lclTyp             = JITtype2varType(jitTyp);
11196                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11197                     tiRetVal.NormaliseForStack();
11198                 }
11199                 goto ARR_LD_POST_VERIFY;
11200
11201             case CEE_LDELEM_I1:
11202                 lclTyp = TYP_BYTE;
11203                 goto ARR_LD;
11204             case CEE_LDELEM_I2:
11205                 lclTyp = TYP_SHORT;
11206                 goto ARR_LD;
11207             case CEE_LDELEM_I:
11208                 lclTyp = TYP_I_IMPL;
11209                 goto ARR_LD;
11210
11211             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11212             // and treating it as TYP_INT avoids other asserts.
11213             case CEE_LDELEM_U4:
11214                 lclTyp = TYP_INT;
11215                 goto ARR_LD;
11216
11217             case CEE_LDELEM_I4:
11218                 lclTyp = TYP_INT;
11219                 goto ARR_LD;
11220             case CEE_LDELEM_I8:
11221                 lclTyp = TYP_LONG;
11222                 goto ARR_LD;
11223             case CEE_LDELEM_REF:
11224                 lclTyp = TYP_REF;
11225                 goto ARR_LD;
11226             case CEE_LDELEM_R4:
11227                 lclTyp = TYP_FLOAT;
11228                 goto ARR_LD;
11229             case CEE_LDELEM_R8:
11230                 lclTyp = TYP_DOUBLE;
11231                 goto ARR_LD;
11232             case CEE_LDELEM_U1:
11233                 lclTyp = TYP_UBYTE;
11234                 goto ARR_LD;
11235             case CEE_LDELEM_U2:
11236                 lclTyp = TYP_USHORT;
11237                 goto ARR_LD;
11238
11239             ARR_LD:
11240
11241                 if (tiVerificationNeeded)
11242                 {
11243                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11244                     typeInfo tiIndex = impStackTop().seTypeInfo;
11245
11246                     // As per ECMA 'index' specified can be either int32 or native int.
11247                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11248                     if (tiArray.IsNullObjRef())
11249                     {
11250                         if (lclTyp == TYP_REF)
11251                         { // we will say a deref of a null array yields a null ref
11252                             tiRetVal = typeInfo(TI_NULL);
11253                         }
11254                         else
11255                         {
11256                             tiRetVal = typeInfo(lclTyp);
11257                         }
11258                     }
11259                     else
11260                     {
11261                         tiRetVal             = verGetArrayElemType(tiArray);
11262                         typeInfo arrayElemTi = typeInfo(lclTyp);
11263 #ifdef _TARGET_64BIT_
11264                         if (opcode == CEE_LDELEM_I)
11265                         {
11266                             arrayElemTi = typeInfo::nativeInt();
11267                         }
11268
11269                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11270                         {
11271                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11272                         }
11273                         else
11274 #endif // _TARGET_64BIT_
11275                         {
11276                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11277                         }
11278                     }
11279                     tiRetVal.NormaliseForStack();
11280                 }
11281             ARR_LD_POST_VERIFY:
11282
11283                 /* Pull the index value and array address */
11284                 op2 = impPopStack().val;
11285                 op1 = impPopStack().val;
11286                 assertImp(op1->gtType == TYP_REF);
11287
11288                 /* Check for null pointer - in the inliner case we simply abort */
11289
11290                 if (compIsForInlining())
11291                 {
11292                     if (op1->gtOper == GT_CNS_INT)
11293                     {
11294                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11295                         return;
11296                     }
11297                 }
11298
11299                 op1 = impCheckForNullPointer(op1);
11300
11301                 /* Mark the block as containing an index expression */
11302
11303                 if (op1->gtOper == GT_LCL_VAR)
11304                 {
11305                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11306                     {
11307                         block->bbFlags |= BBF_HAS_IDX_LEN;
11308                         optMethodFlags |= OMF_HAS_ARRAYREF;
11309                     }
11310                 }
11311
11312                 /* Create the index node and push it on the stack */
11313
11314                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11315
11316                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11317
11318                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11319                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11320                 {
11321                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11322
11323                     // remember the element size
11324                     if (lclTyp == TYP_REF)
11325                     {
11326                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11327                     }
11328                     else
11329                     {
11330                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11331                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11332                         {
11333                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11334                         }
11335                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11336                         if (lclTyp == TYP_STRUCT)
11337                         {
11338                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11339                             op1->gtIndex.gtIndElemSize = size;
11340                             op1->gtType                = lclTyp;
11341                         }
11342                     }
11343
11344                     if ((opcode == CEE_LDELEMA) || ldstruct)
11345                     {
11346                         // wrap it in a &
11347                         lclTyp = TYP_BYREF;
11348
11349                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11350                     }
11351                     else
11352                     {
11353                         assert(lclTyp != TYP_STRUCT);
11354                     }
11355                 }
11356
11357                 if (ldstruct)
11358                 {
11359                     // Create an OBJ for the result
11360                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11361                     op1->gtFlags |= GTF_EXCEPT;
11362                 }
11363                 impPushOnStack(op1, tiRetVal);
11364                 break;
11365
11366             // stelem for reference and value types
11367             case CEE_STELEM:
11368
11369                 assertImp(sz == sizeof(unsigned));
11370
11371                 _impResolveToken(CORINFO_TOKENKIND_Class);
11372
11373                 JITDUMP(" %08X", resolvedToken.token);
11374
11375                 stelemClsHnd = resolvedToken.hClass;
11376
11377                 if (tiVerificationNeeded)
11378                 {
11379                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11380                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11381                     typeInfo tiValue = impStackTop().seTypeInfo;
11382
11383                     // As per ECMA 'index' specified can be either int32 or native int.
11384                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11385                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11386
11387                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11388                            "type operand incompatible with array element type");
11389                     arrayElem.NormaliseForStack();
11390                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11391                 }
11392
11393                 // If it's a reference type just behave as though it's a stelem.ref instruction
11394                 if (!eeIsValueClass(stelemClsHnd))
11395                 {
11396                     goto STELEM_REF_POST_VERIFY;
11397                 }
11398
11399                 // Otherwise extract the type
11400                 {
11401                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11402                     lclTyp             = JITtype2varType(jitTyp);
11403                     goto ARR_ST_POST_VERIFY;
11404                 }
11405
11406             case CEE_STELEM_REF:
11407
11408                 if (tiVerificationNeeded)
11409                 {
11410                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11411                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11412                     typeInfo tiValue = impStackTop().seTypeInfo;
11413
11414                     // As per ECMA 'index' specified can be either int32 or native int.
11415                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11416                     Verify(tiValue.IsObjRef(), "bad value");
11417
11418                     // we only check that it is an object referece, The helper does additional checks
11419                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11420                 }
11421
11422             STELEM_REF_POST_VERIFY:
11423
11424                 arrayNodeTo      = impStackTop(2).val;
11425                 arrayNodeToIndex = impStackTop(1).val;
11426                 arrayNodeFrom    = impStackTop().val;
11427
11428                 //
11429                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11430                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11431                 //
11432
11433                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11434                 // This does not need CORINFO_HELP_ARRADDR_ST
11435                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11436                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11437                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11438                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11439                 {
11440                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11441                     lclTyp = TYP_REF;
11442                     goto ARR_ST_POST_VERIFY;
11443                 }
11444
11445                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11446                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11447                 {
11448                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11449                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11450                     lclTyp = TYP_REF;
11451                     goto ARR_ST_POST_VERIFY;
11452                 }
11453
11454                 /* Call a helper function to do the assignment */
11455                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11456
11457                 goto SPILL_APPEND;
11458
11459             case CEE_STELEM_I1:
11460                 lclTyp = TYP_BYTE;
11461                 goto ARR_ST;
11462             case CEE_STELEM_I2:
11463                 lclTyp = TYP_SHORT;
11464                 goto ARR_ST;
11465             case CEE_STELEM_I:
11466                 lclTyp = TYP_I_IMPL;
11467                 goto ARR_ST;
11468             case CEE_STELEM_I4:
11469                 lclTyp = TYP_INT;
11470                 goto ARR_ST;
11471             case CEE_STELEM_I8:
11472                 lclTyp = TYP_LONG;
11473                 goto ARR_ST;
11474             case CEE_STELEM_R4:
11475                 lclTyp = TYP_FLOAT;
11476                 goto ARR_ST;
11477             case CEE_STELEM_R8:
11478                 lclTyp = TYP_DOUBLE;
11479                 goto ARR_ST;
11480
11481             ARR_ST:
11482
11483                 if (tiVerificationNeeded)
11484                 {
11485                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11486                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11487                     typeInfo tiValue = impStackTop().seTypeInfo;
11488
11489                     // As per ECMA 'index' specified can be either int32 or native int.
11490                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11491                     typeInfo arrayElem = typeInfo(lclTyp);
11492 #ifdef _TARGET_64BIT_
11493                     if (opcode == CEE_STELEM_I)
11494                     {
11495                         arrayElem = typeInfo::nativeInt();
11496                     }
11497 #endif // _TARGET_64BIT_
11498                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11499                            "bad array");
11500
11501                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11502                            "bad value");
11503                 }
11504
11505             ARR_ST_POST_VERIFY:
11506                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11507                    range-check, and then assignment. However, codegen currently
11508                    does the range-check before evaluation the RHS-operands. So to
11509                    maintain strict ordering, we spill the stack. */
11510
11511                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11512                 {
11513                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11514                                                    "Strict ordering of exceptions for Array store"));
11515                 }
11516
11517                 /* Pull the new value from the stack */
11518                 op2 = impPopStack().val;
11519
11520                 /* Pull the index value */
11521                 op1 = impPopStack().val;
11522
11523                 /* Pull the array address */
11524                 op3 = impPopStack().val;
11525
11526                 assertImp(op3->gtType == TYP_REF);
11527                 if (op2->IsVarAddr())
11528                 {
11529                     op2->gtType = TYP_I_IMPL;
11530                 }
11531
11532                 op3 = impCheckForNullPointer(op3);
11533
11534                 // Mark the block as containing an index expression
11535
11536                 if (op3->gtOper == GT_LCL_VAR)
11537                 {
11538                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11539                     {
11540                         block->bbFlags |= BBF_HAS_IDX_LEN;
11541                         optMethodFlags |= OMF_HAS_ARRAYREF;
11542                     }
11543                 }
11544
11545                 /* Create the index node */
11546
11547                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11548
11549                 /* Create the assignment node and append it */
11550
11551                 if (lclTyp == TYP_STRUCT)
11552                 {
11553                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11554
11555                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11556                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11557                 }
11558                 if (varTypeIsStruct(op1))
11559                 {
11560                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11561                 }
11562                 else
11563                 {
11564                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11565                     op1 = gtNewAssignNode(op1, op2);
11566                 }
11567
11568                 /* Mark the expression as containing an assignment */
11569
11570                 op1->gtFlags |= GTF_ASG;
11571
11572                 goto SPILL_APPEND;
11573
11574             case CEE_ADD:
11575                 oper = GT_ADD;
11576                 goto MATH_OP2;
11577
11578             case CEE_ADD_OVF:
11579                 uns = false;
11580                 goto ADD_OVF;
11581             case CEE_ADD_OVF_UN:
11582                 uns = true;
11583                 goto ADD_OVF;
11584
11585             ADD_OVF:
11586                 ovfl     = true;
11587                 callNode = false;
11588                 oper     = GT_ADD;
11589                 goto MATH_OP2_FLAGS;
11590
11591             case CEE_SUB:
11592                 oper = GT_SUB;
11593                 goto MATH_OP2;
11594
11595             case CEE_SUB_OVF:
11596                 uns = false;
11597                 goto SUB_OVF;
11598             case CEE_SUB_OVF_UN:
11599                 uns = true;
11600                 goto SUB_OVF;
11601
11602             SUB_OVF:
11603                 ovfl     = true;
11604                 callNode = false;
11605                 oper     = GT_SUB;
11606                 goto MATH_OP2_FLAGS;
11607
11608             case CEE_MUL:
11609                 oper = GT_MUL;
11610                 goto MATH_MAYBE_CALL_NO_OVF;
11611
11612             case CEE_MUL_OVF:
11613                 uns = false;
11614                 goto MUL_OVF;
11615             case CEE_MUL_OVF_UN:
11616                 uns = true;
11617                 goto MUL_OVF;
11618
11619             MUL_OVF:
11620                 ovfl = true;
11621                 oper = GT_MUL;
11622                 goto MATH_MAYBE_CALL_OVF;
11623
11624             // Other binary math operations
11625
11626             case CEE_DIV:
11627                 oper = GT_DIV;
11628                 goto MATH_MAYBE_CALL_NO_OVF;
11629
11630             case CEE_DIV_UN:
11631                 oper = GT_UDIV;
11632                 goto MATH_MAYBE_CALL_NO_OVF;
11633
11634             case CEE_REM:
11635                 oper = GT_MOD;
11636                 goto MATH_MAYBE_CALL_NO_OVF;
11637
11638             case CEE_REM_UN:
11639                 oper = GT_UMOD;
11640                 goto MATH_MAYBE_CALL_NO_OVF;
11641
11642             MATH_MAYBE_CALL_NO_OVF:
11643                 ovfl = false;
11644             MATH_MAYBE_CALL_OVF:
11645                 // Morpher has some complex logic about when to turn different
11646                 // typed nodes on different platforms into helper calls. We
11647                 // need to either duplicate that logic here, or just
11648                 // pessimistically make all the nodes large enough to become
11649                 // call nodes.  Since call nodes aren't that much larger and
11650                 // these opcodes are infrequent enough I chose the latter.
11651                 callNode = true;
11652                 goto MATH_OP2_FLAGS;
11653
11654             case CEE_AND:
11655                 oper = GT_AND;
11656                 goto MATH_OP2;
11657             case CEE_OR:
11658                 oper = GT_OR;
11659                 goto MATH_OP2;
11660             case CEE_XOR:
11661                 oper = GT_XOR;
11662                 goto MATH_OP2;
11663
11664             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11665
11666                 ovfl     = false;
11667                 callNode = false;
11668
11669             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11670
11671                 /* Pull two values and push back the result */
11672
11673                 if (tiVerificationNeeded)
11674                 {
11675                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11676                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11677
11678                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11679                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11680                     {
11681                         Verify(tiOp1.IsNumberType(), "not number");
11682                     }
11683                     else
11684                     {
11685                         Verify(tiOp1.IsIntegerType(), "not integer");
11686                     }
11687
11688                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11689
11690                     tiRetVal = tiOp1;
11691
11692 #ifdef _TARGET_64BIT_
11693                     if (tiOp2.IsNativeIntType())
11694                     {
11695                         tiRetVal = tiOp2;
11696                     }
11697 #endif // _TARGET_64BIT_
11698                 }
11699
11700                 op2 = impPopStack().val;
11701                 op1 = impPopStack().val;
11702
11703 #if !CPU_HAS_FP_SUPPORT
11704                 if (varTypeIsFloating(op1->gtType))
11705                 {
11706                     callNode = true;
11707                 }
11708 #endif
11709                 /* Can't do arithmetic with references */
11710                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11711
11712                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11713                 // if it is in the stack)
11714                 impBashVarAddrsToI(op1, op2);
11715
11716                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11717
11718                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11719
11720                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11721
11722                 if (op2->gtOper == GT_CNS_INT)
11723                 {
11724                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11725                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11726
11727                     {
11728                         impPushOnStack(op1, tiRetVal);
11729                         break;
11730                     }
11731                 }
11732
11733 #if !FEATURE_X87_DOUBLES
11734                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11735                 //
11736                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11737                 {
11738                     if (op1->TypeGet() != type)
11739                     {
11740                         // We insert a cast of op1 to 'type'
11741                         op1 = gtNewCastNode(type, op1, type);
11742                     }
11743                     if (op2->TypeGet() != type)
11744                     {
11745                         // We insert a cast of op2 to 'type'
11746                         op2 = gtNewCastNode(type, op2, type);
11747                     }
11748                 }
11749 #endif // !FEATURE_X87_DOUBLES
11750
11751 #if SMALL_TREE_NODES
11752                 if (callNode)
11753                 {
11754                     /* These operators can later be transformed into 'GT_CALL' */
11755
11756                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11757 #ifndef _TARGET_ARM_
11758                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11759                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11760                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11761                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11762 #endif
11763                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11764                     // that we'll need to transform into a general large node, but rather specifically
11765                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11766                     // and a CALL is no longer the largest.
11767                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11768                     // than an "if".
11769                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11770                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11771                 }
11772                 else
11773 #endif // SMALL_TREE_NODES
11774                 {
11775                     op1 = gtNewOperNode(oper, type, op1, op2);
11776                 }
11777
11778                 /* Special case: integer/long division may throw an exception */
11779
11780                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11781                 {
11782                     op1->gtFlags |= GTF_EXCEPT;
11783                 }
11784
11785                 if (ovfl)
11786                 {
11787                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11788                     if (ovflType != TYP_UNKNOWN)
11789                     {
11790                         op1->gtType = ovflType;
11791                     }
11792                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11793                     if (uns)
11794                     {
11795                         op1->gtFlags |= GTF_UNSIGNED;
11796                     }
11797                 }
11798
11799                 impPushOnStack(op1, tiRetVal);
11800                 break;
11801
11802             case CEE_SHL:
11803                 oper = GT_LSH;
11804                 goto CEE_SH_OP2;
11805
11806             case CEE_SHR:
11807                 oper = GT_RSH;
11808                 goto CEE_SH_OP2;
11809             case CEE_SHR_UN:
11810                 oper = GT_RSZ;
11811                 goto CEE_SH_OP2;
11812
11813             CEE_SH_OP2:
11814                 if (tiVerificationNeeded)
11815                 {
11816                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11817                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11818                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11819                     tiRetVal = tiVal;
11820                 }
11821                 op2 = impPopStack().val;
11822                 op1 = impPopStack().val; // operand to be shifted
11823                 impBashVarAddrsToI(op1, op2);
11824
11825                 type = genActualType(op1->TypeGet());
11826                 op1  = gtNewOperNode(oper, type, op1, op2);
11827
11828                 impPushOnStack(op1, tiRetVal);
11829                 break;
11830
11831             case CEE_NOT:
11832                 if (tiVerificationNeeded)
11833                 {
11834                     tiRetVal = impStackTop().seTypeInfo;
11835                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11836                 }
11837
11838                 op1 = impPopStack().val;
11839                 impBashVarAddrsToI(op1, nullptr);
11840                 type = genActualType(op1->TypeGet());
11841                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11842                 break;
11843
11844             case CEE_CKFINITE:
11845                 if (tiVerificationNeeded)
11846                 {
11847                     tiRetVal = impStackTop().seTypeInfo;
11848                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11849                 }
11850                 op1  = impPopStack().val;
11851                 type = op1->TypeGet();
11852                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11853                 op1->gtFlags |= GTF_EXCEPT;
11854
11855                 impPushOnStack(op1, tiRetVal);
11856                 break;
11857
11858             case CEE_LEAVE:
11859
11860                 val     = getI4LittleEndian(codeAddr); // jump distance
11861                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11862                 goto LEAVE;
11863
11864             case CEE_LEAVE_S:
11865                 val     = getI1LittleEndian(codeAddr); // jump distance
11866                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11867
11868             LEAVE:
11869
11870                 if (compIsForInlining())
11871                 {
11872                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11873                     return;
11874                 }
11875
11876                 JITDUMP(" %04X", jmpAddr);
11877                 if (block->bbJumpKind != BBJ_LEAVE)
11878                 {
11879                     impResetLeaveBlock(block, jmpAddr);
11880                 }
11881
11882                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11883                 impImportLeave(block);
11884                 impNoteBranchOffs();
11885
11886                 break;
11887
11888             case CEE_BR:
11889             case CEE_BR_S:
11890                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11891
11892                 if (compIsForInlining() && jmpDist == 0)
11893                 {
11894                     break; /* NOP */
11895                 }
11896
11897                 impNoteBranchOffs();
11898                 break;
11899
11900             case CEE_BRTRUE:
11901             case CEE_BRTRUE_S:
11902             case CEE_BRFALSE:
11903             case CEE_BRFALSE_S:
11904
11905                 /* Pop the comparand (now there's a neat term) from the stack */
11906                 if (tiVerificationNeeded)
11907                 {
11908                     typeInfo& tiVal = impStackTop().seTypeInfo;
11909                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11910                            "bad value");
11911                 }
11912
11913                 op1  = impPopStack().val;
11914                 type = op1->TypeGet();
11915
11916                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11917                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11918                 {
11919                     block->bbJumpKind = BBJ_NONE;
11920
11921                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11922                     {
11923                         op1 = gtUnusedValNode(op1);
11924                         goto SPILL_APPEND;
11925                     }
11926                     else
11927                     {
11928                         break;
11929                     }
11930                 }
11931
11932                 if (op1->OperIsCompare())
11933                 {
11934                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11935                     {
11936                         // Flip the sense of the compare
11937
11938                         op1 = gtReverseCond(op1);
11939                     }
11940                 }
11941                 else
11942                 {
11943                     /* We'll compare against an equally-sized integer 0 */
11944                     /* For small types, we always compare against int   */
11945                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11946
11947                     /* Create the comparison operator and try to fold it */
11948
11949                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11950                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11951                 }
11952
11953             // fall through
11954
11955             COND_JUMP:
11956
11957                 /* Fold comparison if we can */
11958
11959                 op1 = gtFoldExpr(op1);
11960
11961                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11962                 /* Don't make any blocks unreachable in import only mode */
11963
11964                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11965                 {
11966                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11967                        unreachable under compDbgCode */
11968                     assert(!opts.compDbgCode);
11969
11970                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11971                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11972                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11973                                                                          // block for the second time
11974
11975                     block->bbJumpKind = foldedJumpKind;
11976 #ifdef DEBUG
11977                     if (verbose)
11978                     {
11979                         if (op1->gtIntCon.gtIconVal)
11980                         {
11981                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11982                                    block->bbJumpDest->bbNum);
11983                         }
11984                         else
11985                         {
11986                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11987                         }
11988                     }
11989 #endif
11990                     break;
11991                 }
11992
11993                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11994
11995                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11996                    in impImportBlock(block). For correct line numbers, spill stack. */
11997
11998                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11999                 {
12000                     impSpillStackEnsure(true);
12001                 }
12002
12003                 goto SPILL_APPEND;
12004
12005             case CEE_CEQ:
12006                 oper = GT_EQ;
12007                 uns  = false;
12008                 goto CMP_2_OPs;
12009             case CEE_CGT_UN:
12010                 oper = GT_GT;
12011                 uns  = true;
12012                 goto CMP_2_OPs;
12013             case CEE_CGT:
12014                 oper = GT_GT;
12015                 uns  = false;
12016                 goto CMP_2_OPs;
12017             case CEE_CLT_UN:
12018                 oper = GT_LT;
12019                 uns  = true;
12020                 goto CMP_2_OPs;
12021             case CEE_CLT:
12022                 oper = GT_LT;
12023                 uns  = false;
12024                 goto CMP_2_OPs;
12025
12026             CMP_2_OPs:
12027                 if (tiVerificationNeeded)
12028                 {
12029                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12030                     tiRetVal = typeInfo(TI_INT);
12031                 }
12032
12033                 op2 = impPopStack().val;
12034                 op1 = impPopStack().val;
12035
12036 #ifdef _TARGET_64BIT_
12037                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12038                 {
12039                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12040                 }
12041                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12042                 {
12043                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12044                 }
12045 #endif // _TARGET_64BIT_
12046
12047                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12048                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12049                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12050
12051                 /* Create the comparison node */
12052
12053                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12054
12055                 /* TODO: setting both flags when only one is appropriate */
12056                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12057                 {
12058                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12059                 }
12060
12061                 // Fold result, if possible.
12062                 op1 = gtFoldExpr(op1);
12063
12064                 impPushOnStack(op1, tiRetVal);
12065                 break;
12066
12067             case CEE_BEQ_S:
12068             case CEE_BEQ:
12069                 oper = GT_EQ;
12070                 goto CMP_2_OPs_AND_BR;
12071
12072             case CEE_BGE_S:
12073             case CEE_BGE:
12074                 oper = GT_GE;
12075                 goto CMP_2_OPs_AND_BR;
12076
12077             case CEE_BGE_UN_S:
12078             case CEE_BGE_UN:
12079                 oper = GT_GE;
12080                 goto CMP_2_OPs_AND_BR_UN;
12081
12082             case CEE_BGT_S:
12083             case CEE_BGT:
12084                 oper = GT_GT;
12085                 goto CMP_2_OPs_AND_BR;
12086
12087             case CEE_BGT_UN_S:
12088             case CEE_BGT_UN:
12089                 oper = GT_GT;
12090                 goto CMP_2_OPs_AND_BR_UN;
12091
12092             case CEE_BLE_S:
12093             case CEE_BLE:
12094                 oper = GT_LE;
12095                 goto CMP_2_OPs_AND_BR;
12096
12097             case CEE_BLE_UN_S:
12098             case CEE_BLE_UN:
12099                 oper = GT_LE;
12100                 goto CMP_2_OPs_AND_BR_UN;
12101
12102             case CEE_BLT_S:
12103             case CEE_BLT:
12104                 oper = GT_LT;
12105                 goto CMP_2_OPs_AND_BR;
12106
12107             case CEE_BLT_UN_S:
12108             case CEE_BLT_UN:
12109                 oper = GT_LT;
12110                 goto CMP_2_OPs_AND_BR_UN;
12111
12112             case CEE_BNE_UN_S:
12113             case CEE_BNE_UN:
12114                 oper = GT_NE;
12115                 goto CMP_2_OPs_AND_BR_UN;
12116
12117             CMP_2_OPs_AND_BR_UN:
12118                 uns       = true;
12119                 unordered = true;
12120                 goto CMP_2_OPs_AND_BR_ALL;
12121             CMP_2_OPs_AND_BR:
12122                 uns       = false;
12123                 unordered = false;
12124                 goto CMP_2_OPs_AND_BR_ALL;
12125             CMP_2_OPs_AND_BR_ALL:
12126
12127                 if (tiVerificationNeeded)
12128                 {
12129                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12130                 }
12131
12132                 /* Pull two values */
12133                 op2 = impPopStack().val;
12134                 op1 = impPopStack().val;
12135
12136 #ifdef _TARGET_64BIT_
12137                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12138                 {
12139                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12140                 }
12141                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12142                 {
12143                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12144                 }
12145 #endif // _TARGET_64BIT_
12146
12147                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12148                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12149                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12150
12151                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12152                 {
12153                     block->bbJumpKind = BBJ_NONE;
12154
12155                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12156                     {
12157                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12158                                                        "Branch to next Optimization, op1 side effect"));
12159                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12160                     }
12161                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12162                     {
12163                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12164                                                        "Branch to next Optimization, op2 side effect"));
12165                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12166                     }
12167
12168 #ifdef DEBUG
12169                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12170                     {
12171                         impNoteLastILoffs();
12172                     }
12173 #endif
12174                     break;
12175                 }
12176 #if !FEATURE_X87_DOUBLES
12177                 // We can generate an compare of different sized floating point op1 and op2
12178                 // We insert a cast
12179                 //
12180                 if (varTypeIsFloating(op1->TypeGet()))
12181                 {
12182                     if (op1->TypeGet() != op2->TypeGet())
12183                     {
12184                         assert(varTypeIsFloating(op2->TypeGet()));
12185
12186                         // say op1=double, op2=float. To avoid loss of precision
12187                         // while comparing, op2 is converted to double and double
12188                         // comparison is done.
12189                         if (op1->TypeGet() == TYP_DOUBLE)
12190                         {
12191                             // We insert a cast of op2 to TYP_DOUBLE
12192                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
12193                         }
12194                         else if (op2->TypeGet() == TYP_DOUBLE)
12195                         {
12196                             // We insert a cast of op1 to TYP_DOUBLE
12197                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
12198                         }
12199                     }
12200                 }
12201 #endif // !FEATURE_X87_DOUBLES
12202
12203                 /* Create and append the operator */
12204
12205                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12206
12207                 if (uns)
12208                 {
12209                     op1->gtFlags |= GTF_UNSIGNED;
12210                 }
12211
12212                 if (unordered)
12213                 {
12214                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12215                 }
12216
12217                 goto COND_JUMP;
12218
12219             case CEE_SWITCH:
12220                 assert(!compIsForInlining());
12221
12222                 if (tiVerificationNeeded)
12223                 {
12224                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12225                 }
12226                 /* Pop the switch value off the stack */
12227                 op1 = impPopStack().val;
12228                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12229
12230                 /* We can create a switch node */
12231
12232                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12233
12234                 val = (int)getU4LittleEndian(codeAddr);
12235                 codeAddr += 4 + val * 4; // skip over the switch-table
12236
12237                 goto SPILL_APPEND;
12238
12239             /************************** Casting OPCODES ***************************/
12240
12241             case CEE_CONV_OVF_I1:
12242                 lclTyp = TYP_BYTE;
12243                 goto CONV_OVF;
12244             case CEE_CONV_OVF_I2:
12245                 lclTyp = TYP_SHORT;
12246                 goto CONV_OVF;
12247             case CEE_CONV_OVF_I:
12248                 lclTyp = TYP_I_IMPL;
12249                 goto CONV_OVF;
12250             case CEE_CONV_OVF_I4:
12251                 lclTyp = TYP_INT;
12252                 goto CONV_OVF;
12253             case CEE_CONV_OVF_I8:
12254                 lclTyp = TYP_LONG;
12255                 goto CONV_OVF;
12256
12257             case CEE_CONV_OVF_U1:
12258                 lclTyp = TYP_UBYTE;
12259                 goto CONV_OVF;
12260             case CEE_CONV_OVF_U2:
12261                 lclTyp = TYP_USHORT;
12262                 goto CONV_OVF;
12263             case CEE_CONV_OVF_U:
12264                 lclTyp = TYP_U_IMPL;
12265                 goto CONV_OVF;
12266             case CEE_CONV_OVF_U4:
12267                 lclTyp = TYP_UINT;
12268                 goto CONV_OVF;
12269             case CEE_CONV_OVF_U8:
12270                 lclTyp = TYP_ULONG;
12271                 goto CONV_OVF;
12272
12273             case CEE_CONV_OVF_I1_UN:
12274                 lclTyp = TYP_BYTE;
12275                 goto CONV_OVF_UN;
12276             case CEE_CONV_OVF_I2_UN:
12277                 lclTyp = TYP_SHORT;
12278                 goto CONV_OVF_UN;
12279             case CEE_CONV_OVF_I_UN:
12280                 lclTyp = TYP_I_IMPL;
12281                 goto CONV_OVF_UN;
12282             case CEE_CONV_OVF_I4_UN:
12283                 lclTyp = TYP_INT;
12284                 goto CONV_OVF_UN;
12285             case CEE_CONV_OVF_I8_UN:
12286                 lclTyp = TYP_LONG;
12287                 goto CONV_OVF_UN;
12288
12289             case CEE_CONV_OVF_U1_UN:
12290                 lclTyp = TYP_UBYTE;
12291                 goto CONV_OVF_UN;
12292             case CEE_CONV_OVF_U2_UN:
12293                 lclTyp = TYP_USHORT;
12294                 goto CONV_OVF_UN;
12295             case CEE_CONV_OVF_U_UN:
12296                 lclTyp = TYP_U_IMPL;
12297                 goto CONV_OVF_UN;
12298             case CEE_CONV_OVF_U4_UN:
12299                 lclTyp = TYP_UINT;
12300                 goto CONV_OVF_UN;
12301             case CEE_CONV_OVF_U8_UN:
12302                 lclTyp = TYP_ULONG;
12303                 goto CONV_OVF_UN;
12304
12305             CONV_OVF_UN:
12306                 uns = true;
12307                 goto CONV_OVF_COMMON;
12308             CONV_OVF:
12309                 uns = false;
12310                 goto CONV_OVF_COMMON;
12311
12312             CONV_OVF_COMMON:
12313                 ovfl = true;
12314                 goto _CONV;
12315
12316             case CEE_CONV_I1:
12317                 lclTyp = TYP_BYTE;
12318                 goto CONV;
12319             case CEE_CONV_I2:
12320                 lclTyp = TYP_SHORT;
12321                 goto CONV;
12322             case CEE_CONV_I:
12323                 lclTyp = TYP_I_IMPL;
12324                 goto CONV;
12325             case CEE_CONV_I4:
12326                 lclTyp = TYP_INT;
12327                 goto CONV;
12328             case CEE_CONV_I8:
12329                 lclTyp = TYP_LONG;
12330                 goto CONV;
12331
12332             case CEE_CONV_U1:
12333                 lclTyp = TYP_UBYTE;
12334                 goto CONV;
12335             case CEE_CONV_U2:
12336                 lclTyp = TYP_USHORT;
12337                 goto CONV;
12338 #if (REGSIZE_BYTES == 8)
12339             case CEE_CONV_U:
12340                 lclTyp = TYP_U_IMPL;
12341                 goto CONV_UN;
12342 #else
12343             case CEE_CONV_U:
12344                 lclTyp = TYP_U_IMPL;
12345                 goto CONV;
12346 #endif
12347             case CEE_CONV_U4:
12348                 lclTyp = TYP_UINT;
12349                 goto CONV;
12350             case CEE_CONV_U8:
12351                 lclTyp = TYP_ULONG;
12352                 goto CONV_UN;
12353
12354             case CEE_CONV_R4:
12355                 lclTyp = TYP_FLOAT;
12356                 goto CONV;
12357             case CEE_CONV_R8:
12358                 lclTyp = TYP_DOUBLE;
12359                 goto CONV;
12360
12361             case CEE_CONV_R_UN:
12362                 lclTyp = TYP_DOUBLE;
12363                 goto CONV_UN;
12364
12365             CONV_UN:
12366                 uns  = true;
12367                 ovfl = false;
12368                 goto _CONV;
12369
12370             CONV:
12371                 uns  = false;
12372                 ovfl = false;
12373                 goto _CONV;
12374
12375             _CONV:
12376                 // just check that we have a number on the stack
12377                 if (tiVerificationNeeded)
12378                 {
12379                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12380                     Verify(tiVal.IsNumberType(), "bad arg");
12381
12382 #ifdef _TARGET_64BIT_
12383                     bool isNative = false;
12384
12385                     switch (opcode)
12386                     {
12387                         case CEE_CONV_OVF_I:
12388                         case CEE_CONV_OVF_I_UN:
12389                         case CEE_CONV_I:
12390                         case CEE_CONV_OVF_U:
12391                         case CEE_CONV_OVF_U_UN:
12392                         case CEE_CONV_U:
12393                             isNative = true;
12394                         default:
12395                             // leave 'isNative' = false;
12396                             break;
12397                     }
12398                     if (isNative)
12399                     {
12400                         tiRetVal = typeInfo::nativeInt();
12401                     }
12402                     else
12403 #endif // _TARGET_64BIT_
12404                     {
12405                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12406                     }
12407                 }
12408
12409                 // only converts from FLOAT or DOUBLE to an integer type
12410                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12411
12412                 if (varTypeIsFloating(lclTyp))
12413                 {
12414                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12415 #ifdef _TARGET_64BIT_
12416                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12417                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12418                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12419                                // and generate SSE2 code instead of going through helper calls.
12420                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12421 #endif
12422                         ;
12423                 }
12424                 else
12425                 {
12426                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12427                 }
12428
12429                 // At this point uns, ovf, callNode all set
12430
12431                 op1 = impPopStack().val;
12432                 impBashVarAddrsToI(op1);
12433
12434                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12435                 {
12436                     op2 = op1->gtOp.gtOp2;
12437
12438                     if (op2->gtOper == GT_CNS_INT)
12439                     {
12440                         ssize_t ival = op2->gtIntCon.gtIconVal;
12441                         ssize_t mask, umask;
12442
12443                         switch (lclTyp)
12444                         {
12445                             case TYP_BYTE:
12446                             case TYP_UBYTE:
12447                                 mask  = 0x00FF;
12448                                 umask = 0x007F;
12449                                 break;
12450                             case TYP_USHORT:
12451                             case TYP_SHORT:
12452                                 mask  = 0xFFFF;
12453                                 umask = 0x7FFF;
12454                                 break;
12455
12456                             default:
12457                                 assert(!"unexpected type");
12458                                 return;
12459                         }
12460
12461                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12462                         {
12463                             /* Toss the cast, it's a waste of time */
12464
12465                             impPushOnStack(op1, tiRetVal);
12466                             break;
12467                         }
12468                         else if (ival == mask)
12469                         {
12470                             /* Toss the masking, it's a waste of time, since
12471                                we sign-extend from the small value anyways */
12472
12473                             op1 = op1->gtOp.gtOp1;
12474                         }
12475                     }
12476                 }
12477
12478                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12479                     since the result of a cast to one of the 'small' integer
12480                     types is an integer.
12481                  */
12482
12483                 type = genActualType(lclTyp);
12484
12485 #if SMALL_TREE_NODES
12486                 if (callNode)
12487                 {
12488                     op1 = gtNewCastNodeL(type, op1, lclTyp);
12489                 }
12490                 else
12491 #endif // SMALL_TREE_NODES
12492                 {
12493                     op1 = gtNewCastNode(type, op1, lclTyp);
12494                 }
12495
12496                 if (ovfl)
12497                 {
12498                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12499                 }
12500                 if (uns)
12501                 {
12502                     op1->gtFlags |= GTF_UNSIGNED;
12503                 }
12504                 impPushOnStack(op1, tiRetVal);
12505                 break;
12506
12507             case CEE_NEG:
12508                 if (tiVerificationNeeded)
12509                 {
12510                     tiRetVal = impStackTop().seTypeInfo;
12511                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12512                 }
12513
12514                 op1 = impPopStack().val;
12515                 impBashVarAddrsToI(op1, nullptr);
12516                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12517                 break;
12518
12519             case CEE_POP:
12520             {
12521                 /* Pull the top value from the stack */
12522
12523                 StackEntry se = impPopStack();
12524                 clsHnd        = se.seTypeInfo.GetClassHandle();
12525                 op1           = se.val;
12526
12527                 /* Get hold of the type of the value being duplicated */
12528
12529                 lclTyp = genActualType(op1->gtType);
12530
12531                 /* Does the value have any side effects? */
12532
12533                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12534                 {
12535                     // Since we are throwing away the value, just normalize
12536                     // it to its address.  This is more efficient.
12537
12538                     if (varTypeIsStruct(op1))
12539                     {
12540 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12541                         // Non-calls, such as obj or ret_expr, have to go through this.
12542                         // Calls with large struct return value have to go through this.
12543                         // Helper calls with small struct return value also have to go
12544                         // through this since they do not follow Unix calling convention.
12545                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12546                             op1->AsCall()->gtCallType == CT_HELPER)
12547 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12548                         {
12549                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12550                         }
12551                     }
12552
12553                     // If op1 is non-overflow cast, throw it away since it is useless.
12554                     // Another reason for throwing away the useless cast is in the context of
12555                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12556                     // The cast gets added as part of importing GT_CALL, which gets in the way
12557                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12558                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12559                     {
12560                         op1 = op1->gtOp.gtOp1;
12561                     }
12562
12563                     // If 'op1' is an expression, create an assignment node.
12564                     // Helps analyses (like CSE) to work fine.
12565
12566                     if (op1->gtOper != GT_CALL)
12567                     {
12568                         op1 = gtUnusedValNode(op1);
12569                     }
12570
12571                     /* Append the value to the tree list */
12572                     goto SPILL_APPEND;
12573                 }
12574
12575                 /* No side effects - just throw the <BEEP> thing away */
12576             }
12577             break;
12578
12579             case CEE_DUP:
12580             {
12581                 if (tiVerificationNeeded)
12582                 {
12583                     // Dup could start the begining of delegate creation sequence, remember that
12584                     delegateCreateStart = codeAddr - 1;
12585                     impStackTop(0);
12586                 }
12587
12588                 // If the expression to dup is simple, just clone it.
12589                 // Otherwise spill it to a temp, and reload the temp
12590                 // twice.
12591                 StackEntry se   = impPopStack();
12592                 GenTree*   tree = se.val;
12593                 tiRetVal        = se.seTypeInfo;
12594                 op1             = tree;
12595
12596                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12597                 {
12598                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12599                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12600                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12601                     op1            = gtNewLclvNode(tmpNum, type);
12602
12603                     // Propagate type info to the temp from the stack and the original tree
12604                     if (type == TYP_REF)
12605                     {
12606                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12607                     }
12608                 }
12609
12610                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12611                                    nullptr DEBUGARG("DUP instruction"));
12612
12613                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12614                 impPushOnStack(op1, tiRetVal);
12615                 impPushOnStack(op2, tiRetVal);
12616             }
12617             break;
12618
12619             case CEE_STIND_I1:
12620                 lclTyp = TYP_BYTE;
12621                 goto STIND;
12622             case CEE_STIND_I2:
12623                 lclTyp = TYP_SHORT;
12624                 goto STIND;
12625             case CEE_STIND_I4:
12626                 lclTyp = TYP_INT;
12627                 goto STIND;
12628             case CEE_STIND_I8:
12629                 lclTyp = TYP_LONG;
12630                 goto STIND;
12631             case CEE_STIND_I:
12632                 lclTyp = TYP_I_IMPL;
12633                 goto STIND;
12634             case CEE_STIND_REF:
12635                 lclTyp = TYP_REF;
12636                 goto STIND;
12637             case CEE_STIND_R4:
12638                 lclTyp = TYP_FLOAT;
12639                 goto STIND;
12640             case CEE_STIND_R8:
12641                 lclTyp = TYP_DOUBLE;
12642                 goto STIND;
12643             STIND:
12644
12645                 if (tiVerificationNeeded)
12646                 {
12647                     typeInfo instrType(lclTyp);
12648 #ifdef _TARGET_64BIT_
12649                     if (opcode == CEE_STIND_I)
12650                     {
12651                         instrType = typeInfo::nativeInt();
12652                     }
12653 #endif // _TARGET_64BIT_
12654                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12655                 }
12656                 else
12657                 {
12658                     compUnsafeCastUsed = true; // Have to go conservative
12659                 }
12660
12661             STIND_POST_VERIFY:
12662
12663                 op2 = impPopStack().val; // value to store
12664                 op1 = impPopStack().val; // address to store to
12665
12666                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12667                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12668
12669                 impBashVarAddrsToI(op1, op2);
12670
12671                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12672
12673 #ifdef _TARGET_64BIT_
12674                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12675                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12676                 {
12677                     op2->gtType = TYP_I_IMPL;
12678                 }
12679                 else
12680                 {
12681                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12682                     //
12683                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12684                     {
12685                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12686                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12687                     }
12688                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12689                     //
12690                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12691                     {
12692                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12693                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12694                     }
12695                 }
12696 #endif // _TARGET_64BIT_
12697
12698                 if (opcode == CEE_STIND_REF)
12699                 {
12700                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12701                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12702                     lclTyp = genActualType(op2->TypeGet());
12703                 }
12704
12705 // Check target type.
12706 #ifdef DEBUG
12707                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12708                 {
12709                     if (op2->gtType == TYP_BYREF)
12710                     {
12711                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12712                     }
12713                     else if (lclTyp == TYP_BYREF)
12714                     {
12715                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12716                     }
12717                 }
12718                 else
12719                 {
12720                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12721                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12722                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12723                 }
12724 #endif
12725
12726                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12727
12728                 // stind could point anywhere, example a boxed class static int
12729                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12730
12731                 if (prefixFlags & PREFIX_VOLATILE)
12732                 {
12733                     assert(op1->OperGet() == GT_IND);
12734                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12735                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12736                     op1->gtFlags |= GTF_IND_VOLATILE;
12737                 }
12738
12739                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12740                 {
12741                     assert(op1->OperGet() == GT_IND);
12742                     op1->gtFlags |= GTF_IND_UNALIGNED;
12743                 }
12744
12745                 op1 = gtNewAssignNode(op1, op2);
12746                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12747
12748                 // Spill side-effects AND global-data-accesses
12749                 if (verCurrentState.esStackDepth > 0)
12750                 {
12751                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12752                 }
12753
12754                 goto APPEND;
12755
12756             case CEE_LDIND_I1:
12757                 lclTyp = TYP_BYTE;
12758                 goto LDIND;
12759             case CEE_LDIND_I2:
12760                 lclTyp = TYP_SHORT;
12761                 goto LDIND;
12762             case CEE_LDIND_U4:
12763             case CEE_LDIND_I4:
12764                 lclTyp = TYP_INT;
12765                 goto LDIND;
12766             case CEE_LDIND_I8:
12767                 lclTyp = TYP_LONG;
12768                 goto LDIND;
12769             case CEE_LDIND_REF:
12770                 lclTyp = TYP_REF;
12771                 goto LDIND;
12772             case CEE_LDIND_I:
12773                 lclTyp = TYP_I_IMPL;
12774                 goto LDIND;
12775             case CEE_LDIND_R4:
12776                 lclTyp = TYP_FLOAT;
12777                 goto LDIND;
12778             case CEE_LDIND_R8:
12779                 lclTyp = TYP_DOUBLE;
12780                 goto LDIND;
12781             case CEE_LDIND_U1:
12782                 lclTyp = TYP_UBYTE;
12783                 goto LDIND;
12784             case CEE_LDIND_U2:
12785                 lclTyp = TYP_USHORT;
12786                 goto LDIND;
12787             LDIND:
12788
12789                 if (tiVerificationNeeded)
12790                 {
12791                     typeInfo lclTiType(lclTyp);
12792 #ifdef _TARGET_64BIT_
12793                     if (opcode == CEE_LDIND_I)
12794                     {
12795                         lclTiType = typeInfo::nativeInt();
12796                     }
12797 #endif // _TARGET_64BIT_
12798                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12799                     tiRetVal.NormaliseForStack();
12800                 }
12801                 else
12802                 {
12803                     compUnsafeCastUsed = true; // Have to go conservative
12804                 }
12805
12806             LDIND_POST_VERIFY:
12807
12808                 op1 = impPopStack().val; // address to load from
12809                 impBashVarAddrsToI(op1);
12810
12811 #ifdef _TARGET_64BIT_
12812                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12813                 //
12814                 if (genActualType(op1->gtType) == TYP_INT)
12815                 {
12816                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12817                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12818                 }
12819 #endif
12820
12821                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12822
12823                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12824
12825                 // ldind could point anywhere, example a boxed class static int
12826                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12827
12828                 if (prefixFlags & PREFIX_VOLATILE)
12829                 {
12830                     assert(op1->OperGet() == GT_IND);
12831                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12832                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12833                     op1->gtFlags |= GTF_IND_VOLATILE;
12834                 }
12835
12836                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12837                 {
12838                     assert(op1->OperGet() == GT_IND);
12839                     op1->gtFlags |= GTF_IND_UNALIGNED;
12840                 }
12841
12842                 impPushOnStack(op1, tiRetVal);
12843
12844                 break;
12845
12846             case CEE_UNALIGNED:
12847
12848                 assert(sz == 1);
12849                 val = getU1LittleEndian(codeAddr);
12850                 ++codeAddr;
12851                 JITDUMP(" %u", val);
12852                 if ((val != 1) && (val != 2) && (val != 4))
12853                 {
12854                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12855                 }
12856
12857                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12858                 prefixFlags |= PREFIX_UNALIGNED;
12859
12860                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12861
12862             PREFIX:
12863                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12864                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12865                 codeAddr += sizeof(__int8);
12866                 goto DECODE_OPCODE;
12867
12868             case CEE_VOLATILE:
12869
12870                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12871                 prefixFlags |= PREFIX_VOLATILE;
12872
12873                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12874
12875                 assert(sz == 0);
12876                 goto PREFIX;
12877
12878             case CEE_LDFTN:
12879             {
12880                 // Need to do a lookup here so that we perform an access check
12881                 // and do a NOWAY if protections are violated
12882                 _impResolveToken(CORINFO_TOKENKIND_Method);
12883
12884                 JITDUMP(" %08X", resolvedToken.token);
12885
12886                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12887                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12888                               &callInfo);
12889
12890                 // This check really only applies to intrinsic Array.Address methods
12891                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12892                 {
12893                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12894                 }
12895
12896                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12897                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12898
12899                 if (tiVerificationNeeded)
12900                 {
12901                     // LDFTN could start the begining of delegate creation sequence, remember that
12902                     delegateCreateStart = codeAddr - 2;
12903
12904                     // check any constraints on the callee's class and type parameters
12905                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12906                                    "method has unsatisfied class constraints");
12907                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12908                                                                                 resolvedToken.hMethod),
12909                                    "method has unsatisfied method constraints");
12910
12911                     mflags = callInfo.verMethodFlags;
12912                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12913                 }
12914
12915             DO_LDFTN:
12916                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12917                 if (compDonotInline())
12918                 {
12919                     return;
12920                 }
12921
12922                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12923                 impPushOnStack(op1, typeInfo(heapToken));
12924
12925                 break;
12926             }
12927
12928             case CEE_LDVIRTFTN:
12929             {
12930                 /* Get the method token */
12931
12932                 _impResolveToken(CORINFO_TOKENKIND_Method);
12933
12934                 JITDUMP(" %08X", resolvedToken.token);
12935
12936                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12937                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12938                                                     CORINFO_CALLINFO_CALLVIRT)),
12939                               &callInfo);
12940
12941                 // This check really only applies to intrinsic Array.Address methods
12942                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12943                 {
12944                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12945                 }
12946
12947                 mflags = callInfo.methodFlags;
12948
12949                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12950
12951                 if (compIsForInlining())
12952                 {
12953                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12954                     {
12955                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12956                         return;
12957                     }
12958                 }
12959
12960                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12961
12962                 if (tiVerificationNeeded)
12963                 {
12964
12965                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12966                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12967
12968                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12969                     typeInfo declType =
12970                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12971
12972                     typeInfo arg = impStackTop().seTypeInfo;
12973                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12974                            "bad ldvirtftn");
12975
12976                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12977                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12978                     {
12979                         instanceClassHnd = arg.GetClassHandleForObjRef();
12980                     }
12981
12982                     // check any constraints on the method's class and type parameters
12983                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12984                                    "method has unsatisfied class constraints");
12985                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12986                                                                                 resolvedToken.hMethod),
12987                                    "method has unsatisfied method constraints");
12988
12989                     if (mflags & CORINFO_FLG_PROTECTED)
12990                     {
12991                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12992                                "Accessing protected method through wrong type.");
12993                     }
12994                 }
12995
12996                 /* Get the object-ref */
12997                 op1 = impPopStack().val;
12998                 assertImp(op1->gtType == TYP_REF);
12999
13000                 if (opts.IsReadyToRun())
13001                 {
13002                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13003                     {
13004                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13005                         {
13006                             op1 = gtUnusedValNode(op1);
13007                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13008                         }
13009                         goto DO_LDFTN;
13010                     }
13011                 }
13012                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13013                 {
13014                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13015                     {
13016                         op1 = gtUnusedValNode(op1);
13017                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13018                     }
13019                     goto DO_LDFTN;
13020                 }
13021
13022                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13023                 if (compDonotInline())
13024                 {
13025                     return;
13026                 }
13027
13028                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13029                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13030                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13031                 impPushOnStack(fptr, typeInfo(heapToken));
13032
13033                 break;
13034             }
13035
13036             case CEE_CONSTRAINED:
13037
13038                 assertImp(sz == sizeof(unsigned));
13039                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13040                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13041                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13042
13043                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13044                 prefixFlags |= PREFIX_CONSTRAINED;
13045
13046                 {
13047                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13048                     if (actualOpcode != CEE_CALLVIRT)
13049                     {
13050                         BADCODE("constrained. has to be followed by callvirt");
13051                     }
13052                 }
13053
13054                 goto PREFIX;
13055
13056             case CEE_READONLY:
13057                 JITDUMP(" readonly.");
13058
13059                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13060                 prefixFlags |= PREFIX_READONLY;
13061
13062                 {
13063                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13064                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13065                     {
13066                         BADCODE("readonly. has to be followed by ldelema or call");
13067                     }
13068                 }
13069
13070                 assert(sz == 0);
13071                 goto PREFIX;
13072
13073             case CEE_TAILCALL:
13074                 JITDUMP(" tail.");
13075
13076                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13077                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13078
13079                 {
13080                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13081                     if (!impOpcodeIsCallOpcode(actualOpcode))
13082                     {
13083                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13084                     }
13085                 }
13086                 assert(sz == 0);
13087                 goto PREFIX;
13088
13089             case CEE_NEWOBJ:
13090
13091                 /* Since we will implicitly insert newObjThisPtr at the start of the
13092                    argument list, spill any GTF_ORDER_SIDEEFF */
13093                 impSpillSpecialSideEff();
13094
13095                 /* NEWOBJ does not respond to TAIL */
13096                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13097
13098                 /* NEWOBJ does not respond to CONSTRAINED */
13099                 prefixFlags &= ~PREFIX_CONSTRAINED;
13100
13101                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13102
13103                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13104                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13105                               &callInfo);
13106
13107                 if (compIsForInlining())
13108                 {
13109                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13110                     {
13111                         // Check to see if this call violates the boundary.
13112                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13113                         return;
13114                     }
13115                 }
13116
13117                 mflags = callInfo.methodFlags;
13118
13119                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13120                 {
13121                     BADCODE("newobj on static or abstract method");
13122                 }
13123
13124                 // Insert the security callout before any actual code is generated
13125                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13126
13127                 // There are three different cases for new
13128                 // Object size is variable (depends on arguments)
13129                 //      1) Object is an array (arrays treated specially by the EE)
13130                 //      2) Object is some other variable sized object (e.g. String)
13131                 //      3) Class Size can be determined beforehand (normal case)
13132                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13133                 // in the second case we call the constructor with a '0' this pointer
13134                 // In the third case we alloc the memory, then call the constuctor
13135
13136                 clsFlags = callInfo.classFlags;
13137                 if (clsFlags & CORINFO_FLG_ARRAY)
13138                 {
13139                     if (tiVerificationNeeded)
13140                     {
13141                         CORINFO_CLASS_HANDLE elemTypeHnd;
13142                         INDEBUG(CorInfoType corType =)
13143                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13144                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13145                         Verify(elemTypeHnd == nullptr ||
13146                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13147                                "newarr of byref-like objects");
13148                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13149                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13150                                       &callInfo DEBUGARG(info.compFullName));
13151                     }
13152                     // Arrays need to call the NEWOBJ helper.
13153                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13154
13155                     impImportNewObjArray(&resolvedToken, &callInfo);
13156                     if (compDonotInline())
13157                     {
13158                         return;
13159                     }
13160
13161                     callTyp = TYP_REF;
13162                     break;
13163                 }
13164                 // At present this can only be String
13165                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13166                 {
13167                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13168                     {
13169                         // The dummy argument does not exist in CoreRT
13170                         newObjThisPtr = nullptr;
13171                     }
13172                     else
13173                     {
13174                         // This is the case for variable-sized objects that are not
13175                         // arrays.  In this case, call the constructor with a null 'this'
13176                         // pointer
13177                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13178                     }
13179
13180                     /* Remember that this basic block contains 'new' of an object */
13181                     block->bbFlags |= BBF_HAS_NEWOBJ;
13182                     optMethodFlags |= OMF_HAS_NEWOBJ;
13183                 }
13184                 else
13185                 {
13186                     // This is the normal case where the size of the object is
13187                     // fixed.  Allocate the memory and call the constructor.
13188
13189                     // Note: We cannot add a peep to avoid use of temp here
13190                     // becase we don't have enough interference info to detect when
13191                     // sources and destination interfere, example: s = new S(ref);
13192
13193                     // TODO: We find the correct place to introduce a general
13194                     // reverse copy prop for struct return values from newobj or
13195                     // any function returning structs.
13196
13197                     /* get a temporary for the new object */
13198                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13199                     if (compDonotInline())
13200                     {
13201                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13202                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13203                         return;
13204                     }
13205
13206                     // In the value class case we only need clsHnd for size calcs.
13207                     //
13208                     // The lookup of the code pointer will be handled by CALL in this case
13209                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13210                     {
13211                         if (compIsForInlining())
13212                         {
13213                             // If value class has GC fields, inform the inliner. It may choose to
13214                             // bail out on the inline.
13215                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13216                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13217                             {
13218                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13219                                 if (compInlineResult->IsFailure())
13220                                 {
13221                                     return;
13222                                 }
13223
13224                                 // Do further notification in the case where the call site is rare;
13225                                 // some policies do not track the relative hotness of call sites for
13226                                 // "always" inline cases.
13227                                 if (impInlineInfo->iciBlock->isRunRarely())
13228                                 {
13229                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13230                                     if (compInlineResult->IsFailure())
13231                                     {
13232                                         return;
13233                                     }
13234                                 }
13235                             }
13236                         }
13237
13238                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13239                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13240
13241                         if (impIsPrimitive(jitTyp))
13242                         {
13243                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13244                         }
13245                         else
13246                         {
13247                             // The local variable itself is the allocated space.
13248                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13249                             // and potentially exploitable.
13250                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13251                         }
13252                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13253                         {
13254                             // Append a tree to zero-out the temp
13255                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13256
13257                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13258                                                            gtNewIconNode(0), // Value
13259                                                            size,             // Size
13260                                                            false,            // isVolatile
13261                                                            false);           // not copyBlock
13262                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13263                         }
13264
13265                         // Obtain the address of the temp
13266                         newObjThisPtr =
13267                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13268                     }
13269                     else
13270                     {
13271 #ifdef FEATURE_READYTORUN_COMPILER
13272                         if (opts.IsReadyToRun())
13273                         {
13274                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13275                             usingReadyToRunHelper = (op1 != nullptr);
13276                         }
13277
13278                         if (!usingReadyToRunHelper)
13279 #endif
13280                         {
13281                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13282                             if (op1 == nullptr)
13283                             { // compDonotInline()
13284                                 return;
13285                             }
13286
13287                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13288                             // and the newfast call with a single call to a dynamic R2R cell that will:
13289                             //      1) Load the context
13290                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13291                             //      stub
13292                             //      3) Allocate and return the new object
13293                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13294
13295                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13296                                                     resolvedToken.hClass, TYP_REF, op1);
13297                         }
13298
13299                         // Remember that this basic block contains 'new' of an object
13300                         block->bbFlags |= BBF_HAS_NEWOBJ;
13301                         optMethodFlags |= OMF_HAS_NEWOBJ;
13302
13303                         // Append the assignment to the temp/local. Dont need to spill
13304                         // at all as we are just calling an EE-Jit helper which can only
13305                         // cause an (async) OutOfMemoryException.
13306
13307                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13308                         // to a temp. Note that the pattern "temp = allocObj" is required
13309                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13310                         // without exhaustive walk over all expressions.
13311
13312                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13313                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13314
13315                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13316                     }
13317                 }
13318                 goto CALL;
13319
13320             case CEE_CALLI:
13321
13322                 /* CALLI does not respond to CONSTRAINED */
13323                 prefixFlags &= ~PREFIX_CONSTRAINED;
13324
13325                 if (compIsForInlining())
13326                 {
13327                     // CALLI doesn't have a method handle, so assume the worst.
13328                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13329                     {
13330                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13331                         return;
13332                     }
13333                 }
13334
13335             // fall through
13336
13337             case CEE_CALLVIRT:
13338             case CEE_CALL:
13339
13340                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13341                 // many other places.  We unfortunately embed that knowledge here.
13342                 if (opcode != CEE_CALLI)
13343                 {
13344                     _impResolveToken(CORINFO_TOKENKIND_Method);
13345
13346                     eeGetCallInfo(&resolvedToken,
13347                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13348                                   // this is how impImportCall invokes getCallInfo
13349                                   addVerifyFlag(
13350                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13351                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13352                                                                        : CORINFO_CALLINFO_NONE)),
13353                                   &callInfo);
13354                 }
13355                 else
13356                 {
13357                     // Suppress uninitialized use warning.
13358                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13359                     memset(&callInfo, 0, sizeof(callInfo));
13360
13361                     resolvedToken.token = getU4LittleEndian(codeAddr);
13362                 }
13363
13364             CALL: // memberRef should be set.
13365                 // newObjThisPtr should be set for CEE_NEWOBJ
13366
13367                 JITDUMP(" %08X", resolvedToken.token);
13368                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13369
13370                 bool newBBcreatedForTailcallStress;
13371
13372                 newBBcreatedForTailcallStress = false;
13373
13374                 if (compIsForInlining())
13375                 {
13376                     if (compDonotInline())
13377                     {
13378                         return;
13379                     }
13380                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13381                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13382                 }
13383                 else
13384                 {
13385                     if (compTailCallStress())
13386                     {
13387                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13388                         // Tail call stress only recognizes call+ret patterns and forces them to be
13389                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13390                         // doesn't import 'ret' opcode following the call into the basic block containing
13391                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13392                         // is already checking that there is an opcode following call and hence it is
13393                         // safe here to read next opcode without bounds check.
13394                         newBBcreatedForTailcallStress =
13395                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13396                                                              // make it jump to RET.
13397                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13398
13399                         if (newBBcreatedForTailcallStress &&
13400                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13401                             verCheckTailCallConstraint(opcode, &resolvedToken,
13402                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13403                                                        true) // Is it legal to do tailcall?
13404                             )
13405                         {
13406                             // Stress the tailcall.
13407                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13408                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13409                         }
13410                     }
13411                 }
13412
13413                 // This is split up to avoid goto flow warnings.
13414                 bool isRecursive;
13415                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13416
13417                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13418                 // hence will not be considered for implicit tail calling.
13419                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13420                 {
13421                     if (compIsForInlining())
13422                     {
13423 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13424                         // Are we inlining at an implicit tail call site? If so the we can flag
13425                         // implicit tail call sites in the inline body. These call sites
13426                         // often end up in non BBJ_RETURN blocks, so only flag them when
13427                         // we're able to handle shared returns.
13428                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13429                         {
13430                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13431                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13432                         }
13433 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13434                     }
13435                     else
13436                     {
13437                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13438                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13439                     }
13440                 }
13441
13442                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13443                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13444                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13445
13446                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13447                 {
13448                     // All calls and delegates need a security callout.
13449                     // For delegates, this is the call to the delegate constructor, not the access check on the
13450                     // LD(virt)FTN.
13451                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13452
13453 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13454
13455                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13456                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13457                 // ldtoken <filed token>, and we now check accessibility
13458                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13459                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13460                 {
13461                     if (prevOpcode != CEE_LDTOKEN)
13462                     {
13463                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13464                     }
13465                     else
13466                     {
13467                         assert(lastLoadToken != NULL);
13468                         // Now that we know we have a token, verify that it is accessible for loading
13469                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13470                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13471                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13472                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13473                     }
13474                 }
13475
13476 #endif // DevDiv 410397
13477                 }
13478
13479                 if (tiVerificationNeeded)
13480                 {
13481                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13482                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13483                                   &callInfo DEBUGARG(info.compFullName));
13484                 }
13485
13486                 // Insert delegate callout here.
13487                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13488                 {
13489 #ifdef DEBUG
13490                     // We should do this only if verification is enabled
13491                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13492                     if (tiVerificationNeeded)
13493                     {
13494                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13495                         // We should get here only for well formed delegate creation.
13496                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13497                     }
13498 #endif
13499                 }
13500
13501                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13502                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13503                 if (compDonotInline())
13504                 {
13505                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13506                     assert((callTyp == TYP_UNDEF) ||
13507                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13508                     return;
13509                 }
13510
13511                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13512                                                                        // have created a new BB after the "call"
13513                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13514                 {
13515                     assert(!compIsForInlining());
13516                     goto RET;
13517                 }
13518
13519                 break;
13520
13521             case CEE_LDFLD:
13522             case CEE_LDSFLD:
13523             case CEE_LDFLDA:
13524             case CEE_LDSFLDA:
13525             {
13526
13527                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13528                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13529
13530                 /* Get the CP_Fieldref index */
13531                 assertImp(sz == sizeof(unsigned));
13532
13533                 _impResolveToken(CORINFO_TOKENKIND_Field);
13534
13535                 JITDUMP(" %08X", resolvedToken.token);
13536
13537                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13538
13539                 GenTree*             obj     = nullptr;
13540                 typeInfo*            tiObj   = nullptr;
13541                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13542
13543                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13544                 {
13545                     tiObj         = &impStackTop().seTypeInfo;
13546                     StackEntry se = impPopStack();
13547                     objType       = se.seTypeInfo.GetClassHandle();
13548                     obj           = se.val;
13549
13550                     if (impIsThis(obj))
13551                     {
13552                         aflags |= CORINFO_ACCESS_THIS;
13553
13554                         // An optimization for Contextful classes:
13555                         // we unwrap the proxy when we have a 'this reference'
13556
13557                         if (info.compUnwrapContextful)
13558                         {
13559                             aflags |= CORINFO_ACCESS_UNWRAP;
13560                         }
13561                     }
13562                 }
13563
13564                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13565
13566                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13567                 // handle
13568                 CorInfoType ciType = fieldInfo.fieldType;
13569                 clsHnd             = fieldInfo.structType;
13570
13571                 lclTyp = JITtype2varType(ciType);
13572
13573 #ifdef _TARGET_AMD64
13574                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13575 #endif // _TARGET_AMD64
13576
13577                 if (compIsForInlining())
13578                 {
13579                     switch (fieldInfo.fieldAccessor)
13580                     {
13581                         case CORINFO_FIELD_INSTANCE_HELPER:
13582                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13583                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13584                         case CORINFO_FIELD_STATIC_TLS:
13585
13586                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13587                             return;
13588
13589                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13590                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13591                             /* We may be able to inline the field accessors in specific instantiations of generic
13592                              * methods */
13593                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13594                             return;
13595
13596                         default:
13597                             break;
13598                     }
13599
13600                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13601                         clsHnd)
13602                     {
13603                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13604                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13605                         {
13606                             // Loading a static valuetype field usually will cause a JitHelper to be called
13607                             // for the static base. This will bloat the code.
13608                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13609
13610                             if (compInlineResult->IsFailure())
13611                             {
13612                                 return;
13613                             }
13614                         }
13615                     }
13616                 }
13617
13618                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13619                 if (isLoadAddress)
13620                 {
13621                     tiRetVal.MakeByRef();
13622                 }
13623                 else
13624                 {
13625                     tiRetVal.NormaliseForStack();
13626                 }
13627
13628                 // Perform this check always to ensure that we get field access exceptions even with
13629                 // SkipVerification.
13630                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13631
13632                 if (tiVerificationNeeded)
13633                 {
13634                     // You can also pass the unboxed struct to  LDFLD
13635                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13636                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13637                     {
13638                         bAllowPlainValueTypeAsThis = TRUE;
13639                     }
13640
13641                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13642
13643                     // If we're doing this on a heap object or from a 'safe' byref
13644                     // then the result is a safe byref too
13645                     if (isLoadAddress) // load address
13646                     {
13647                         if (fieldInfo.fieldFlags &
13648                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13649                         {
13650                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13651                             {
13652                                 tiRetVal.SetIsPermanentHomeByRef();
13653                             }
13654                         }
13655                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13656                         {
13657                             // ldflda of byref is safe if done on a gc object or on  a
13658                             // safe byref
13659                             tiRetVal.SetIsPermanentHomeByRef();
13660                         }
13661                     }
13662                 }
13663                 else
13664                 {
13665                     // tiVerificationNeeded is false.
13666                     // Raise InvalidProgramException if static load accesses non-static field
13667                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13668                     {
13669                         BADCODE("static access on an instance field");
13670                     }
13671                 }
13672
13673                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13674                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13675                 {
13676                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13677                     {
13678                         obj = gtUnusedValNode(obj);
13679                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13680                     }
13681                     obj = nullptr;
13682                 }
13683
13684                 /* Preserve 'small' int types */
13685                 if (!varTypeIsSmall(lclTyp))
13686                 {
13687                     lclTyp = genActualType(lclTyp);
13688                 }
13689
13690                 bool usesHelper = false;
13691
13692                 switch (fieldInfo.fieldAccessor)
13693                 {
13694                     case CORINFO_FIELD_INSTANCE:
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13697 #endif
13698                     {
13699                         bool nullcheckNeeded = false;
13700
13701                         obj = impCheckForNullPointer(obj);
13702
13703                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13704                         {
13705                             nullcheckNeeded = true;
13706                         }
13707
13708                         // If the object is a struct, what we really want is
13709                         // for the field to operate on the address of the struct.
13710                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13711                         {
13712                             assert(opcode == CEE_LDFLD && objType != nullptr);
13713
13714                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13715                         }
13716
13717                         /* Create the data member node */
13718                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13719
13720 #ifdef FEATURE_READYTORUN_COMPILER
13721                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13722                         {
13723                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13724                         }
13725 #endif
13726
13727                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13728
13729                         if (fgAddrCouldBeNull(obj))
13730                         {
13731                             op1->gtFlags |= GTF_EXCEPT;
13732                         }
13733
13734                         // If gtFldObj is a BYREF then our target is a value class and
13735                         // it could point anywhere, example a boxed class static int
13736                         if (obj->gtType == TYP_BYREF)
13737                         {
13738                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13739                         }
13740
13741                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13742                         if (StructHasOverlappingFields(typeFlags))
13743                         {
13744                             op1->gtField.gtFldMayOverlap = true;
13745                         }
13746
13747                         // wrap it in a address of operator if necessary
13748                         if (isLoadAddress)
13749                         {
13750                             op1 = gtNewOperNode(GT_ADDR,
13751                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13752                         }
13753                         else
13754                         {
13755                             if (compIsForInlining() &&
13756                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13757                                                                                    impInlineInfo->inlArgInfo))
13758                             {
13759                                 impInlineInfo->thisDereferencedFirst = true;
13760                             }
13761                         }
13762                     }
13763                     break;
13764
13765                     case CORINFO_FIELD_STATIC_TLS:
13766 #ifdef _TARGET_X86_
13767                         // Legacy TLS access is implemented as intrinsic on x86 only
13768
13769                         /* Create the data member node */
13770                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13771                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13772
13773                         if (isLoadAddress)
13774                         {
13775                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13776                         }
13777                         break;
13778 #else
13779                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13780
13781                         __fallthrough;
13782 #endif
13783
13784                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13785                     case CORINFO_FIELD_INSTANCE_HELPER:
13786                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13787                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13788                                                clsHnd, nullptr);
13789                         usesHelper = true;
13790                         break;
13791
13792                     case CORINFO_FIELD_STATIC_ADDRESS:
13793                         // Replace static read-only fields with constant if possible
13794                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13795                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13796                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13797                         {
13798                             CorInfoInitClassResult initClassResult =
13799                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13800                                                             impTokenLookupContextHandle);
13801
13802                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13803                             {
13804                                 void** pFldAddr = nullptr;
13805                                 void*  fldAddr =
13806                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13807
13808                                 // We should always be able to access this static's address directly
13809                                 assert(pFldAddr == nullptr);
13810
13811                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13812                                 goto FIELD_DONE;
13813                             }
13814                         }
13815
13816                         __fallthrough;
13817
13818                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13819                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13820                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13821                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13822                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13823                                                          lclTyp);
13824                         break;
13825
13826                     case CORINFO_FIELD_INTRINSIC_ZERO:
13827                     {
13828                         assert(aflags & CORINFO_ACCESS_GET);
13829                         op1 = gtNewIconNode(0, lclTyp);
13830                         goto FIELD_DONE;
13831                     }
13832                     break;
13833
13834                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13835                     {
13836                         assert(aflags & CORINFO_ACCESS_GET);
13837
13838                         LPVOID         pValue;
13839                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13840                         op1                = gtNewStringLiteralNode(iat, pValue);
13841                         goto FIELD_DONE;
13842                     }
13843                     break;
13844
13845                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13846                     {
13847                         assert(aflags & CORINFO_ACCESS_GET);
13848 #if BIGENDIAN
13849                         op1 = gtNewIconNode(0, lclTyp);
13850 #else
13851                         op1                     = gtNewIconNode(1, lclTyp);
13852 #endif
13853                         goto FIELD_DONE;
13854                     }
13855                     break;
13856
13857                     default:
13858                         assert(!"Unexpected fieldAccessor");
13859                 }
13860
13861                 if (!isLoadAddress)
13862                 {
13863
13864                     if (prefixFlags & PREFIX_VOLATILE)
13865                     {
13866                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13867                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13868
13869                         if (!usesHelper)
13870                         {
13871                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13872                                    (op1->OperGet() == GT_OBJ));
13873                             op1->gtFlags |= GTF_IND_VOLATILE;
13874                         }
13875                     }
13876
13877                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13878                     {
13879                         if (!usesHelper)
13880                         {
13881                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13882                                    (op1->OperGet() == GT_OBJ));
13883                             op1->gtFlags |= GTF_IND_UNALIGNED;
13884                         }
13885                     }
13886                 }
13887
13888                 /* Check if the class needs explicit initialization */
13889
13890                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13891                 {
13892                     GenTree* helperNode = impInitClass(&resolvedToken);
13893                     if (compDonotInline())
13894                     {
13895                         return;
13896                     }
13897                     if (helperNode != nullptr)
13898                     {
13899                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13900                     }
13901                 }
13902
13903             FIELD_DONE:
13904                 impPushOnStack(op1, tiRetVal);
13905             }
13906             break;
13907
13908             case CEE_STFLD:
13909             case CEE_STSFLD:
13910             {
13911
13912                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13913
13914                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13915
13916                 /* Get the CP_Fieldref index */
13917
13918                 assertImp(sz == sizeof(unsigned));
13919
13920                 _impResolveToken(CORINFO_TOKENKIND_Field);
13921
13922                 JITDUMP(" %08X", resolvedToken.token);
13923
13924                 int       aflags = CORINFO_ACCESS_SET;
13925                 GenTree*  obj    = nullptr;
13926                 typeInfo* tiObj  = nullptr;
13927                 typeInfo  tiVal;
13928
13929                 /* Pull the value from the stack */
13930                 StackEntry se = impPopStack();
13931                 op2           = se.val;
13932                 tiVal         = se.seTypeInfo;
13933                 clsHnd        = tiVal.GetClassHandle();
13934
13935                 if (opcode == CEE_STFLD)
13936                 {
13937                     tiObj = &impStackTop().seTypeInfo;
13938                     obj   = impPopStack().val;
13939
13940                     if (impIsThis(obj))
13941                     {
13942                         aflags |= CORINFO_ACCESS_THIS;
13943
13944                         // An optimization for Contextful classes:
13945                         // we unwrap the proxy when we have a 'this reference'
13946
13947                         if (info.compUnwrapContextful)
13948                         {
13949                             aflags |= CORINFO_ACCESS_UNWRAP;
13950                         }
13951                     }
13952                 }
13953
13954                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13955
13956                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13957                 // handle
13958                 CorInfoType ciType = fieldInfo.fieldType;
13959                 fieldClsHnd        = fieldInfo.structType;
13960
13961                 lclTyp = JITtype2varType(ciType);
13962
13963                 if (compIsForInlining())
13964                 {
13965                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13966                      * per-inst static? */
13967
13968                     switch (fieldInfo.fieldAccessor)
13969                     {
13970                         case CORINFO_FIELD_INSTANCE_HELPER:
13971                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13972                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13973                         case CORINFO_FIELD_STATIC_TLS:
13974
13975                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13976                             return;
13977
13978                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13979                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13980                             /* We may be able to inline the field accessors in specific instantiations of generic
13981                              * methods */
13982                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13983                             return;
13984
13985                         default:
13986                             break;
13987                     }
13988                 }
13989
13990                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13991
13992                 if (tiVerificationNeeded)
13993                 {
13994                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13995                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13996                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13997                 }
13998                 else
13999                 {
14000                     // tiVerificationNeed is false.
14001                     // Raise InvalidProgramException if static store accesses non-static field
14002                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14003                     {
14004                         BADCODE("static access on an instance field");
14005                     }
14006                 }
14007
14008                 // We are using stfld on a static field.
14009                 // We allow it, but need to eval any side-effects for obj
14010                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14011                 {
14012                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14013                     {
14014                         obj = gtUnusedValNode(obj);
14015                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14016                     }
14017                     obj = nullptr;
14018                 }
14019
14020                 /* Preserve 'small' int types */
14021                 if (!varTypeIsSmall(lclTyp))
14022                 {
14023                     lclTyp = genActualType(lclTyp);
14024                 }
14025
14026                 switch (fieldInfo.fieldAccessor)
14027                 {
14028                     case CORINFO_FIELD_INSTANCE:
14029 #ifdef FEATURE_READYTORUN_COMPILER
14030                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14031 #endif
14032                     {
14033                         obj = impCheckForNullPointer(obj);
14034
14035                         /* Create the data member node */
14036                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14037                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14038                         if (StructHasOverlappingFields(typeFlags))
14039                         {
14040                             op1->gtField.gtFldMayOverlap = true;
14041                         }
14042
14043 #ifdef FEATURE_READYTORUN_COMPILER
14044                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14045                         {
14046                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14047                         }
14048 #endif
14049
14050                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14051
14052                         if (fgAddrCouldBeNull(obj))
14053                         {
14054                             op1->gtFlags |= GTF_EXCEPT;
14055                         }
14056
14057                         // If gtFldObj is a BYREF then our target is a value class and
14058                         // it could point anywhere, example a boxed class static int
14059                         if (obj->gtType == TYP_BYREF)
14060                         {
14061                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14062                         }
14063
14064                         if (compIsForInlining() &&
14065                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14066                         {
14067                             impInlineInfo->thisDereferencedFirst = true;
14068                         }
14069                     }
14070                     break;
14071
14072                     case CORINFO_FIELD_STATIC_TLS:
14073 #ifdef _TARGET_X86_
14074                         // Legacy TLS access is implemented as intrinsic on x86 only
14075
14076                         /* Create the data member node */
14077                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14078                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14079
14080                         break;
14081 #else
14082                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14083
14084                         __fallthrough;
14085 #endif
14086
14087                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14088                     case CORINFO_FIELD_INSTANCE_HELPER:
14089                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14090                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14091                                                clsHnd, op2);
14092                         goto SPILL_APPEND;
14093
14094                     case CORINFO_FIELD_STATIC_ADDRESS:
14095                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14096                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14097                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14098                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14099                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14100                                                          lclTyp);
14101                         break;
14102
14103                     default:
14104                         assert(!"Unexpected fieldAccessor");
14105                 }
14106
14107                 // Create the member assignment, unless we have a struct.
14108                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14109                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14110
14111                 if (!deferStructAssign)
14112                 {
14113                     if (prefixFlags & PREFIX_VOLATILE)
14114                     {
14115                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14116                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14117                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14118                         op1->gtFlags |= GTF_IND_VOLATILE;
14119                     }
14120                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14121                     {
14122                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14123                         op1->gtFlags |= GTF_IND_UNALIGNED;
14124                     }
14125
14126                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14127                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14128                        importation and reads from the union as if it were a long during code generation. Though this
14129                        can potentially read garbage, one can get lucky to have this working correctly.
14130
14131                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14132                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14133                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14134                        it works correctly always.
14135
14136                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14137                        for V4.0.
14138                     */
14139                     CLANG_FORMAT_COMMENT_ANCHOR;
14140
14141 #ifndef _TARGET_64BIT_
14142                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14143                     // generated for ARM as well as x86, so the following IR will be accepted:
14144                     //     *  STMT      void
14145                     //         |  /--*  CNS_INT   int    2
14146                     //         \--*  ASG       long
14147                     //            \--*  CLS_VAR   long
14148
14149                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14150                         varTypeIsLong(op1->TypeGet()))
14151                     {
14152                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14153                     }
14154 #endif
14155
14156 #ifdef _TARGET_64BIT_
14157                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14158                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14159                     {
14160                         op2->gtType = TYP_I_IMPL;
14161                     }
14162                     else
14163                     {
14164                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14165                         //
14166                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14167                         {
14168                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
14169                         }
14170                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14171                         //
14172                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14173                         {
14174                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
14175                         }
14176                     }
14177 #endif
14178
14179 #if !FEATURE_X87_DOUBLES
14180                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14181                     // We insert a cast to the dest 'op1' type
14182                     //
14183                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14184                         varTypeIsFloating(op2->gtType))
14185                     {
14186                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14187                     }
14188 #endif // !FEATURE_X87_DOUBLES
14189
14190                     op1 = gtNewAssignNode(op1, op2);
14191
14192                     /* Mark the expression as containing an assignment */
14193
14194                     op1->gtFlags |= GTF_ASG;
14195                 }
14196
14197                 /* Check if the class needs explicit initialization */
14198
14199                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14200                 {
14201                     GenTree* helperNode = impInitClass(&resolvedToken);
14202                     if (compDonotInline())
14203                     {
14204                         return;
14205                     }
14206                     if (helperNode != nullptr)
14207                     {
14208                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14209                     }
14210                 }
14211
14212                 /* stfld can interfere with value classes (consider the sequence
14213                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14214                    spill all value class references from the stack. */
14215
14216                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14217                 {
14218                     assert(tiObj);
14219
14220                     if (impIsValueType(tiObj))
14221                     {
14222                         impSpillEvalStack();
14223                     }
14224                     else
14225                     {
14226                         impSpillValueClasses();
14227                     }
14228                 }
14229
14230                 /* Spill any refs to the same member from the stack */
14231
14232                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14233
14234                 /* stsfld also interferes with indirect accesses (for aliased
14235                    statics) and calls. But don't need to spill other statics
14236                    as we have explicitly spilled this particular static field. */
14237
14238                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14239
14240                 if (deferStructAssign)
14241                 {
14242                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14243                 }
14244             }
14245                 goto APPEND;
14246
14247             case CEE_NEWARR:
14248             {
14249
14250                 /* Get the class type index operand */
14251
14252                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14253
14254                 JITDUMP(" %08X", resolvedToken.token);
14255
14256                 if (!opts.IsReadyToRun())
14257                 {
14258                     // Need to restore array classes before creating array objects on the heap
14259                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14260                     if (op1 == nullptr)
14261                     { // compDonotInline()
14262                         return;
14263                     }
14264                 }
14265
14266                 if (tiVerificationNeeded)
14267                 {
14268                     // As per ECMA 'numElems' specified can be either int32 or native int.
14269                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14270
14271                     CORINFO_CLASS_HANDLE elemTypeHnd;
14272                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14273                     Verify(elemTypeHnd == nullptr ||
14274                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14275                            "array of byref-like type");
14276                 }
14277
14278                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14279
14280                 accessAllowedResult =
14281                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14282                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14283
14284                 /* Form the arglist: array class handle, size */
14285                 op2 = impPopStack().val;
14286                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14287
14288 #ifdef FEATURE_READYTORUN_COMPILER
14289                 if (opts.IsReadyToRun())
14290                 {
14291                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14292                                                     gtNewArgList(op2));
14293                     usingReadyToRunHelper = (op1 != nullptr);
14294
14295                     if (!usingReadyToRunHelper)
14296                     {
14297                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14298                         // and the newarr call with a single call to a dynamic R2R cell that will:
14299                         //      1) Load the context
14300                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14301                         //      3) Allocate the new array
14302                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14303
14304                         // Need to restore array classes before creating array objects on the heap
14305                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14306                         if (op1 == nullptr)
14307                         { // compDonotInline()
14308                             return;
14309                         }
14310                     }
14311                 }
14312
14313                 if (!usingReadyToRunHelper)
14314 #endif
14315                 {
14316                     args = gtNewArgList(op1, op2);
14317
14318                     /* Create a call to 'new' */
14319
14320                     // Note that this only works for shared generic code because the same helper is used for all
14321                     // reference array types
14322                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14323                 }
14324
14325                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14326
14327                 /* Remember that this basic block contains 'new' of an sd array */
14328
14329                 block->bbFlags |= BBF_HAS_NEWARRAY;
14330                 optMethodFlags |= OMF_HAS_NEWARRAY;
14331
14332                 /* Push the result of the call on the stack */
14333
14334                 impPushOnStack(op1, tiRetVal);
14335
14336                 callTyp = TYP_REF;
14337             }
14338             break;
14339
14340             case CEE_LOCALLOC:
14341                 if (tiVerificationNeeded)
14342                 {
14343                     Verify(false, "bad opcode");
14344                 }
14345
14346                 // We don't allow locallocs inside handlers
14347                 if (block->hasHndIndex())
14348                 {
14349                     BADCODE("Localloc can't be inside handler");
14350                 }
14351
14352                 setNeedsGSSecurityCookie();
14353
14354                 // Get the size to allocate
14355
14356                 op2 = impPopStack().val;
14357                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14358
14359                 if (verCurrentState.esStackDepth != 0)
14360                 {
14361                     BADCODE("Localloc can only be used when the stack is empty");
14362                 }
14363
14364                 // If the localloc is not in a loop and its size is a small constant,
14365                 // create a new local var of TYP_BLK and return its address.
14366                 {
14367                     bool convertedToLocal = false;
14368
14369                     // Need to aggressively fold here, as even fixed-size locallocs
14370                     // will have casts in the way.
14371                     op2 = gtFoldExpr(op2);
14372
14373                     if (op2->IsIntegralConst())
14374                     {
14375                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14376
14377                         if (allocSize == 0)
14378                         {
14379                             // Result is nullptr
14380                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14381                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14382                             convertedToLocal = true;
14383                         }
14384                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14385                         {
14386                             // Get the size threshold for local conversion
14387                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14388
14389 #ifdef DEBUG
14390                             // Optionally allow this to be modified
14391                             maxSize = JitConfig.JitStackAllocToLocalSize();
14392 #endif // DEBUG
14393
14394                             if (allocSize <= maxSize)
14395                             {
14396                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14397                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14398                                         stackallocAsLocal);
14399                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14400                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14401                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14402                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14403                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14404                                 convertedToLocal         = true;
14405                                 compGSReorderStackLayout = true;
14406                             }
14407                         }
14408                     }
14409
14410                     if (!convertedToLocal)
14411                     {
14412                         // Bail out if inlining and the localloc was not converted.
14413                         //
14414                         // Note we might consider allowing the inline, if the call
14415                         // site is not in a loop.
14416                         if (compIsForInlining())
14417                         {
14418                             InlineObservation obs = op2->IsIntegralConst()
14419                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14420                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14421                             compInlineResult->NoteFatal(obs);
14422                             return;
14423                         }
14424
14425                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14426                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14427                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14428
14429                         /* The FP register may not be back to the original value at the end
14430                            of the method, even if the frame size is 0, as localloc may
14431                            have modified it. So we will HAVE to reset it */
14432                         compLocallocUsed = true;
14433                     }
14434                     else
14435                     {
14436                         compLocallocOptimized = true;
14437                     }
14438                 }
14439
14440                 impPushOnStack(op1, tiRetVal);
14441                 break;
14442
14443             case CEE_ISINST:
14444             {
14445                 /* Get the type token */
14446                 assertImp(sz == sizeof(unsigned));
14447
14448                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14449
14450                 JITDUMP(" %08X", resolvedToken.token);
14451
14452                 if (!opts.IsReadyToRun())
14453                 {
14454                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14455                     if (op2 == nullptr)
14456                     { // compDonotInline()
14457                         return;
14458                     }
14459                 }
14460
14461                 if (tiVerificationNeeded)
14462                 {
14463                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14464                     // Even if this is a value class, we know it is boxed.
14465                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14466                 }
14467                 accessAllowedResult =
14468                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14469                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14470
14471                 op1 = impPopStack().val;
14472
14473                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14474
14475                 if (optTree != nullptr)
14476                 {
14477                     impPushOnStack(optTree, tiRetVal);
14478                 }
14479                 else
14480                 {
14481
14482 #ifdef FEATURE_READYTORUN_COMPILER
14483                     if (opts.IsReadyToRun())
14484                     {
14485                         GenTreeCall* opLookup =
14486                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14487                                                       gtNewArgList(op1));
14488                         usingReadyToRunHelper = (opLookup != nullptr);
14489                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14490
14491                         if (!usingReadyToRunHelper)
14492                         {
14493                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14494                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14495                             //      1) Load the context
14496                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14497                             //      stub
14498                             //      3) Perform the 'is instance' check on the input object
14499                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14500
14501                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14502                             if (op2 == nullptr)
14503                             { // compDonotInline()
14504                                 return;
14505                             }
14506                         }
14507                     }
14508
14509                     if (!usingReadyToRunHelper)
14510 #endif
14511                     {
14512                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14513                     }
14514                     if (compDonotInline())
14515                     {
14516                         return;
14517                     }
14518
14519                     impPushOnStack(op1, tiRetVal);
14520                 }
14521                 break;
14522             }
14523
14524             case CEE_REFANYVAL:
14525
14526                 // get the class handle and make a ICON node out of it
14527
14528                 _impResolveToken(CORINFO_TOKENKIND_Class);
14529
14530                 JITDUMP(" %08X", resolvedToken.token);
14531
14532                 op2 = impTokenToHandle(&resolvedToken);
14533                 if (op2 == nullptr)
14534                 { // compDonotInline()
14535                     return;
14536                 }
14537
14538                 if (tiVerificationNeeded)
14539                 {
14540                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14541                            "need refany");
14542                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14543                 }
14544
14545                 op1 = impPopStack().val;
14546                 // make certain it is normalized;
14547                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14548
14549                 // Call helper GETREFANY(classHandle, op1);
14550                 args = gtNewArgList(op2, op1);
14551                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14552
14553                 impPushOnStack(op1, tiRetVal);
14554                 break;
14555
14556             case CEE_REFANYTYPE:
14557
14558                 if (tiVerificationNeeded)
14559                 {
14560                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14561                            "need refany");
14562                 }
14563
14564                 op1 = impPopStack().val;
14565
14566                 // make certain it is normalized;
14567                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14568
14569                 if (op1->gtOper == GT_OBJ)
14570                 {
14571                     // Get the address of the refany
14572                     op1 = op1->gtOp.gtOp1;
14573
14574                     // Fetch the type from the correct slot
14575                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14576                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14577                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14578                 }
14579                 else
14580                 {
14581                     assertImp(op1->gtOper == GT_MKREFANY);
14582
14583                     // The pointer may have side-effects
14584                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14585                     {
14586                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14587 #ifdef DEBUG
14588                         impNoteLastILoffs();
14589 #endif
14590                     }
14591
14592                     // We already have the class handle
14593                     op1 = op1->gtOp.gtOp2;
14594                 }
14595
14596                 // convert native TypeHandle to RuntimeTypeHandle
14597                 {
14598                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14599
14600                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14601
14602                     // The handle struct is returned in register
14603                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14604
14605                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14606                 }
14607
14608                 impPushOnStack(op1, tiRetVal);
14609                 break;
14610
14611             case CEE_LDTOKEN:
14612             {
14613                 /* Get the Class index */
14614                 assertImp(sz == sizeof(unsigned));
14615                 lastLoadToken = codeAddr;
14616                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14617
14618                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14619
14620                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14621                 if (op1 == nullptr)
14622                 { // compDonotInline()
14623                     return;
14624                 }
14625
14626                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14627                 assert(resolvedToken.hClass != nullptr);
14628
14629                 if (resolvedToken.hMethod != nullptr)
14630                 {
14631                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14632                 }
14633                 else if (resolvedToken.hField != nullptr)
14634                 {
14635                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14636                 }
14637
14638                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14639
14640                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14641
14642                 // The handle struct is returned in register
14643                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14644
14645                 tiRetVal = verMakeTypeInfo(tokenType);
14646                 impPushOnStack(op1, tiRetVal);
14647             }
14648             break;
14649
14650             case CEE_UNBOX:
14651             case CEE_UNBOX_ANY:
14652             {
14653                 /* Get the Class index */
14654                 assertImp(sz == sizeof(unsigned));
14655
14656                 _impResolveToken(CORINFO_TOKENKIND_Class);
14657
14658                 JITDUMP(" %08X", resolvedToken.token);
14659
14660                 BOOL runtimeLookup;
14661                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14662                 if (op2 == nullptr)
14663                 {
14664                     assert(compDonotInline());
14665                     return;
14666                 }
14667
14668                 // Run this always so we can get access exceptions even with SkipVerification.
14669                 accessAllowedResult =
14670                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14671                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14672
14673                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14674                 {
14675                     if (tiVerificationNeeded)
14676                     {
14677                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14678                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14679                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14680                         tiRetVal.NormaliseForStack();
14681                     }
14682                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14683                     op1 = impPopStack().val;
14684                     goto CASTCLASS;
14685                 }
14686
14687                 /* Pop the object and create the unbox helper call */
14688                 /* You might think that for UNBOX_ANY we need to push a different */
14689                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14690                 /* for the intermediate pointer which we then transfer onto the OBJ */
14691                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14692                 if (tiVerificationNeeded)
14693                 {
14694                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14695                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14696
14697                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14698                     Verify(tiRetVal.IsValueClass(), "not value class");
14699                     tiRetVal.MakeByRef();
14700
14701                     // We always come from an objref, so this is safe byref
14702                     tiRetVal.SetIsPermanentHomeByRef();
14703                     tiRetVal.SetIsReadonlyByRef();
14704                 }
14705
14706                 op1 = impPopStack().val;
14707                 assertImp(op1->gtType == TYP_REF);
14708
14709                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14710                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14711
14712                 // Check legality and profitability of inline expansion for unboxing.
14713                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14714                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14715
14716                 if (canExpandInline && shouldExpandInline)
14717                 {
14718                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14719                     // we are doing normal unboxing
14720                     // inline the common case of the unbox helper
14721                     // UNBOX(exp) morphs into
14722                     // clone = pop(exp);
14723                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14724                     // push(clone + TARGET_POINTER_SIZE)
14725                     //
14726                     GenTree* cloneOperand;
14727                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14728                                        nullptr DEBUGARG("inline UNBOX clone1"));
14729                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14730
14731                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14732
14733                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14734                                        nullptr DEBUGARG("inline UNBOX clone2"));
14735                     op2 = impTokenToHandle(&resolvedToken);
14736                     if (op2 == nullptr)
14737                     { // compDonotInline()
14738                         return;
14739                     }
14740                     args = gtNewArgList(op2, op1);
14741                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14742
14743                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14744                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14745                     condBox->gtFlags |= GTF_RELOP_QMARK;
14746
14747                     // QMARK nodes cannot reside on the evaluation stack. Because there
14748                     // may be other trees on the evaluation stack that side-effect the
14749                     // sources of the UNBOX operation we must spill the stack.
14750
14751                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14752
14753                     // Create the address-expression to reference past the object header
14754                     // to the beginning of the value-type. Today this means adjusting
14755                     // past the base of the objects vtable field which is pointer sized.
14756
14757                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14758                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14759                 }
14760                 else
14761                 {
14762                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14763                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14764
14765                     // Don't optimize, just call the helper and be done with it
14766                     args = gtNewArgList(op2, op1);
14767                     op1 =
14768                         gtNewHelperCallNode(helper,
14769                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14770                 }
14771
14772                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14773                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14774                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14775                        );
14776
14777                 /*
14778                   ----------------------------------------------------------------------
14779                   | \ helper  |                         |                              |
14780                   |   \       |                         |                              |
14781                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14782                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14783                   | opcode  \ |                         |                              |
14784                   |---------------------------------------------------------------------
14785                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14786                   |           |                         | push the BYREF to this local |
14787                   |---------------------------------------------------------------------
14788                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14789                   |           | the BYREF               | For Linux when the           |
14790                   |           |                         |  struct is returned in two   |
14791                   |           |                         |  registers create a temp     |
14792                   |           |                         |  which address is passed to  |
14793                   |           |                         |  the unbox_nullable helper.  |
14794                   |---------------------------------------------------------------------
14795                 */
14796
14797                 if (opcode == CEE_UNBOX)
14798                 {
14799                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14800                     {
14801                         // Unbox nullable helper returns a struct type.
14802                         // We need to spill it to a temp so than can take the address of it.
14803                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14804                         // further along and potetially be exploitable.
14805
14806                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14807                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14808
14809                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14810                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14811                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14812
14813                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14814                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14815                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14816                     }
14817
14818                     assert(op1->gtType == TYP_BYREF);
14819                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14820                 }
14821                 else
14822                 {
14823                     assert(opcode == CEE_UNBOX_ANY);
14824
14825                     if (helper == CORINFO_HELP_UNBOX)
14826                     {
14827                         // Normal unbox helper returns a TYP_BYREF.
14828                         impPushOnStack(op1, tiRetVal);
14829                         oper = GT_OBJ;
14830                         goto OBJ;
14831                     }
14832
14833                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14834
14835 #if FEATURE_MULTIREG_RET
14836
14837                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14838                     {
14839                         // Unbox nullable helper returns a TYP_STRUCT.
14840                         // For the multi-reg case we need to spill it to a temp so that
14841                         // we can pass the address to the unbox_nullable jit helper.
14842
14843                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14844                         lvaTable[tmp].lvIsMultiRegArg = true;
14845                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14846
14847                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14848                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14849                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14850
14851                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14852                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14853                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14854
14855                         // In this case the return value of the unbox helper is TYP_BYREF.
14856                         // Make sure the right type is placed on the operand type stack.
14857                         impPushOnStack(op1, tiRetVal);
14858
14859                         // Load the struct.
14860                         oper = GT_OBJ;
14861
14862                         assert(op1->gtType == TYP_BYREF);
14863                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14864
14865                         goto OBJ;
14866                     }
14867                     else
14868
14869 #endif // !FEATURE_MULTIREG_RET
14870
14871                     {
14872                         // If non register passable struct we have it materialized in the RetBuf.
14873                         assert(op1->gtType == TYP_STRUCT);
14874                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14875                         assert(tiRetVal.IsValueClass());
14876                     }
14877                 }
14878
14879                 impPushOnStack(op1, tiRetVal);
14880             }
14881             break;
14882
14883             case CEE_BOX:
14884             {
14885                 /* Get the Class index */
14886                 assertImp(sz == sizeof(unsigned));
14887
14888                 _impResolveToken(CORINFO_TOKENKIND_Box);
14889
14890                 JITDUMP(" %08X", resolvedToken.token);
14891
14892                 if (tiVerificationNeeded)
14893                 {
14894                     typeInfo tiActual = impStackTop().seTypeInfo;
14895                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14896
14897                     Verify(verIsBoxable(tiBox), "boxable type expected");
14898
14899                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14900                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14901                            "boxed type has unsatisfied class constraints");
14902
14903                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14904
14905                     // Observation: the following code introduces a boxed value class on the stack, but,
14906                     // according to the ECMA spec, one would simply expect: tiRetVal =
14907                     // typeInfo(TI_REF,impGetObjectClass());
14908
14909                     // Push the result back on the stack,
14910                     // even if clsHnd is a value class we want the TI_REF
14911                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14912                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14913                 }
14914
14915                 accessAllowedResult =
14916                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14917                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14918
14919                 // Note BOX can be used on things that are not value classes, in which
14920                 // case we get a NOP.  However the verifier's view of the type on the
14921                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14922                 if (!eeIsValueClass(resolvedToken.hClass))
14923                 {
14924                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14925                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14926                     break;
14927                 }
14928
14929                 // Look ahead for unbox.any
14930                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14931                 {
14932                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14933
14934                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14935
14936                     // See if the resolved tokens describe types that are equal.
14937                     const TypeCompareState compare =
14938                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14939
14940                     // If so, box/unbox.any is a nop.
14941                     if (compare == TypeCompareState::Must)
14942                     {
14943                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14944                         // Skip the next unbox.any instruction
14945                         sz += sizeof(mdToken) + 1;
14946                         break;
14947                     }
14948                 }
14949
14950                 impImportAndPushBox(&resolvedToken);
14951                 if (compDonotInline())
14952                 {
14953                     return;
14954                 }
14955             }
14956             break;
14957
14958             case CEE_SIZEOF:
14959
14960                 /* Get the Class index */
14961                 assertImp(sz == sizeof(unsigned));
14962
14963                 _impResolveToken(CORINFO_TOKENKIND_Class);
14964
14965                 JITDUMP(" %08X", resolvedToken.token);
14966
14967                 if (tiVerificationNeeded)
14968                 {
14969                     tiRetVal = typeInfo(TI_INT);
14970                 }
14971
14972                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14973                 impPushOnStack(op1, tiRetVal);
14974                 break;
14975
14976             case CEE_CASTCLASS:
14977
14978                 /* Get the Class index */
14979
14980                 assertImp(sz == sizeof(unsigned));
14981
14982                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14983
14984                 JITDUMP(" %08X", resolvedToken.token);
14985
14986                 if (!opts.IsReadyToRun())
14987                 {
14988                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14989                     if (op2 == nullptr)
14990                     { // compDonotInline()
14991                         return;
14992                     }
14993                 }
14994
14995                 if (tiVerificationNeeded)
14996                 {
14997                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14998                     // box it
14999                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15000                 }
15001
15002                 accessAllowedResult =
15003                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15004                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15005
15006                 op1 = impPopStack().val;
15007
15008             /* Pop the address and create the 'checked cast' helper call */
15009
15010             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15011             // and op2 to contain code that creates the type handle corresponding to typeRef
15012             CASTCLASS:
15013             {
15014                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15015
15016                 if (optTree != nullptr)
15017                 {
15018                     impPushOnStack(optTree, tiRetVal);
15019                 }
15020                 else
15021                 {
15022
15023 #ifdef FEATURE_READYTORUN_COMPILER
15024                     if (opts.IsReadyToRun())
15025                     {
15026                         GenTreeCall* opLookup =
15027                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15028                                                       gtNewArgList(op1));
15029                         usingReadyToRunHelper = (opLookup != nullptr);
15030                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15031
15032                         if (!usingReadyToRunHelper)
15033                         {
15034                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15035                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15036                             //      1) Load the context
15037                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15038                             //      stub
15039                             //      3) Check the object on the stack for the type-cast
15040                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15041
15042                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15043                             if (op2 == nullptr)
15044                             { // compDonotInline()
15045                                 return;
15046                             }
15047                         }
15048                     }
15049
15050                     if (!usingReadyToRunHelper)
15051 #endif
15052                     {
15053                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15054                     }
15055                     if (compDonotInline())
15056                     {
15057                         return;
15058                     }
15059
15060                     /* Push the result back on the stack */
15061                     impPushOnStack(op1, tiRetVal);
15062                 }
15063             }
15064             break;
15065
15066             case CEE_THROW:
15067
15068                 if (compIsForInlining())
15069                 {
15070                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15071                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15072                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15073
15074                     /* Do we have just the exception on the stack ?*/
15075
15076                     if (verCurrentState.esStackDepth != 1)
15077                     {
15078                         /* if not, just don't inline the method */
15079
15080                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15081                         return;
15082                     }
15083                 }
15084
15085                 if (tiVerificationNeeded)
15086                 {
15087                     tiRetVal = impStackTop().seTypeInfo;
15088                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15089                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15090                     {
15091                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15092                     }
15093                 }
15094
15095                 block->bbSetRunRarely(); // any block with a throw is rare
15096                 /* Pop the exception object and create the 'throw' helper call */
15097
15098                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15099
15100             EVAL_APPEND:
15101                 if (verCurrentState.esStackDepth > 0)
15102                 {
15103                     impEvalSideEffects();
15104                 }
15105
15106                 assert(verCurrentState.esStackDepth == 0);
15107
15108                 goto APPEND;
15109
15110             case CEE_RETHROW:
15111
15112                 assert(!compIsForInlining());
15113
15114                 if (info.compXcptnsCount == 0)
15115                 {
15116                     BADCODE("rethrow outside catch");
15117                 }
15118
15119                 if (tiVerificationNeeded)
15120                 {
15121                     Verify(block->hasHndIndex(), "rethrow outside catch");
15122                     if (block->hasHndIndex())
15123                     {
15124                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15125                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15126                         if (HBtab->HasFilter())
15127                         {
15128                             // we better be in the handler clause part, not the filter part
15129                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15130                                    "rethrow in filter");
15131                         }
15132                     }
15133                 }
15134
15135                 /* Create the 'rethrow' helper call */
15136
15137                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15138
15139                 goto EVAL_APPEND;
15140
15141             case CEE_INITOBJ:
15142
15143                 assertImp(sz == sizeof(unsigned));
15144
15145                 _impResolveToken(CORINFO_TOKENKIND_Class);
15146
15147                 JITDUMP(" %08X", resolvedToken.token);
15148
15149                 if (tiVerificationNeeded)
15150                 {
15151                     typeInfo tiTo    = impStackTop().seTypeInfo;
15152                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15153
15154                     Verify(tiTo.IsByRef(), "byref expected");
15155                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15156
15157                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15158                            "type operand incompatible with type of address");
15159                 }
15160
15161                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15162                 op2  = gtNewIconNode(0);                                     // Value
15163                 op1  = impPopStack().val;                                    // Dest
15164                 op1  = gtNewBlockVal(op1, size);
15165                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15166                 goto SPILL_APPEND;
15167
15168             case CEE_INITBLK:
15169
15170                 if (tiVerificationNeeded)
15171                 {
15172                     Verify(false, "bad opcode");
15173                 }
15174
15175                 op3 = impPopStack().val; // Size
15176                 op2 = impPopStack().val; // Value
15177                 op1 = impPopStack().val; // Dest
15178
15179                 if (op3->IsCnsIntOrI())
15180                 {
15181                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15182                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15183                 }
15184                 else
15185                 {
15186                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15187                     size = 0;
15188                 }
15189                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15190
15191                 goto SPILL_APPEND;
15192
15193             case CEE_CPBLK:
15194
15195                 if (tiVerificationNeeded)
15196                 {
15197                     Verify(false, "bad opcode");
15198                 }
15199                 op3 = impPopStack().val; // Size
15200                 op2 = impPopStack().val; // Src
15201                 op1 = impPopStack().val; // Dest
15202
15203                 if (op3->IsCnsIntOrI())
15204                 {
15205                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15206                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15207                 }
15208                 else
15209                 {
15210                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15211                     size = 0;
15212                 }
15213                 if (op2->OperGet() == GT_ADDR)
15214                 {
15215                     op2 = op2->gtOp.gtOp1;
15216                 }
15217                 else
15218                 {
15219                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15220                 }
15221
15222                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15223                 goto SPILL_APPEND;
15224
15225             case CEE_CPOBJ:
15226
15227                 assertImp(sz == sizeof(unsigned));
15228
15229                 _impResolveToken(CORINFO_TOKENKIND_Class);
15230
15231                 JITDUMP(" %08X", resolvedToken.token);
15232
15233                 if (tiVerificationNeeded)
15234                 {
15235                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15236                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15237                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15238
15239                     Verify(tiFrom.IsByRef(), "expected byref source");
15240                     Verify(tiTo.IsByRef(), "expected byref destination");
15241
15242                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15243                            "type of source address incompatible with type operand");
15244                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15245                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15246                            "type operand incompatible with type of destination address");
15247                 }
15248
15249                 if (!eeIsValueClass(resolvedToken.hClass))
15250                 {
15251                     op1 = impPopStack().val; // address to load from
15252
15253                     impBashVarAddrsToI(op1);
15254
15255                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15256
15257                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15258                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15259
15260                     impPushOnStack(op1, typeInfo());
15261                     opcode = CEE_STIND_REF;
15262                     lclTyp = TYP_REF;
15263                     goto STIND_POST_VERIFY;
15264                 }
15265
15266                 op2 = impPopStack().val; // Src
15267                 op1 = impPopStack().val; // Dest
15268                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15269                 goto SPILL_APPEND;
15270
15271             case CEE_STOBJ:
15272             {
15273                 assertImp(sz == sizeof(unsigned));
15274
15275                 _impResolveToken(CORINFO_TOKENKIND_Class);
15276
15277                 JITDUMP(" %08X", resolvedToken.token);
15278
15279                 if (eeIsValueClass(resolvedToken.hClass))
15280                 {
15281                     lclTyp = TYP_STRUCT;
15282                 }
15283                 else
15284                 {
15285                     lclTyp = TYP_REF;
15286                 }
15287
15288                 if (tiVerificationNeeded)
15289                 {
15290
15291                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15292
15293                     // Make sure we have a good looking byref
15294                     Verify(tiPtr.IsByRef(), "pointer not byref");
15295                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15296                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15297                     {
15298                         compUnsafeCastUsed = true;
15299                     }
15300
15301                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15302                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15303
15304                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15305                     {
15306                         Verify(false, "type of value incompatible with type operand");
15307                         compUnsafeCastUsed = true;
15308                     }
15309
15310                     if (!tiCompatibleWith(argVal, ptrVal, false))
15311                     {
15312                         Verify(false, "type operand incompatible with type of address");
15313                         compUnsafeCastUsed = true;
15314                     }
15315                 }
15316                 else
15317                 {
15318                     compUnsafeCastUsed = true;
15319                 }
15320
15321                 if (lclTyp == TYP_REF)
15322                 {
15323                     opcode = CEE_STIND_REF;
15324                     goto STIND_POST_VERIFY;
15325                 }
15326
15327                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15328                 if (impIsPrimitive(jitTyp))
15329                 {
15330                     lclTyp = JITtype2varType(jitTyp);
15331                     goto STIND_POST_VERIFY;
15332                 }
15333
15334                 op2 = impPopStack().val; // Value
15335                 op1 = impPopStack().val; // Ptr
15336
15337                 assertImp(varTypeIsStruct(op2));
15338
15339                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15340
15341                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15342                 {
15343                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15344                 }
15345                 goto SPILL_APPEND;
15346             }
15347
15348             case CEE_MKREFANY:
15349
15350                 assert(!compIsForInlining());
15351
15352                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15353                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15354
15355                 JITDUMP("disabling struct promotion because of mkrefany\n");
15356                 fgNoStructPromotion = true;
15357
15358                 oper = GT_MKREFANY;
15359                 assertImp(sz == sizeof(unsigned));
15360
15361                 _impResolveToken(CORINFO_TOKENKIND_Class);
15362
15363                 JITDUMP(" %08X", resolvedToken.token);
15364
15365                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15366                 if (op2 == nullptr)
15367                 { // compDonotInline()
15368                     return;
15369                 }
15370
15371                 if (tiVerificationNeeded)
15372                 {
15373                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15374                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15375
15376                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15377                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15378                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15379                 }
15380
15381                 accessAllowedResult =
15382                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15383                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15384
15385                 op1 = impPopStack().val;
15386
15387                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15388                 // But JIT32 allowed it, so we continue to allow it.
15389                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15390
15391                 // MKREFANY returns a struct.  op2 is the class token.
15392                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15393
15394                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15395                 break;
15396
15397             case CEE_LDOBJ:
15398             {
15399                 oper = GT_OBJ;
15400                 assertImp(sz == sizeof(unsigned));
15401
15402                 _impResolveToken(CORINFO_TOKENKIND_Class);
15403
15404                 JITDUMP(" %08X", resolvedToken.token);
15405
15406             OBJ:
15407
15408                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15409
15410                 if (tiVerificationNeeded)
15411                 {
15412                     typeInfo tiPtr = impStackTop().seTypeInfo;
15413
15414                     // Make sure we have a byref
15415                     if (!tiPtr.IsByRef())
15416                     {
15417                         Verify(false, "pointer not byref");
15418                         compUnsafeCastUsed = true;
15419                     }
15420                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15421
15422                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15423                     {
15424                         Verify(false, "type of address incompatible with type operand");
15425                         compUnsafeCastUsed = true;
15426                     }
15427                     tiRetVal.NormaliseForStack();
15428                 }
15429                 else
15430                 {
15431                     compUnsafeCastUsed = true;
15432                 }
15433
15434                 if (eeIsValueClass(resolvedToken.hClass))
15435                 {
15436                     lclTyp = TYP_STRUCT;
15437                 }
15438                 else
15439                 {
15440                     lclTyp = TYP_REF;
15441                     opcode = CEE_LDIND_REF;
15442                     goto LDIND_POST_VERIFY;
15443                 }
15444
15445                 op1 = impPopStack().val;
15446
15447                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15448
15449                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15450                 if (impIsPrimitive(jitTyp))
15451                 {
15452                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15453
15454                     // Could point anywhere, example a boxed class static int
15455                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15456                     assertImp(varTypeIsArithmetic(op1->gtType));
15457                 }
15458                 else
15459                 {
15460                     // OBJ returns a struct
15461                     // and an inline argument which is the class token of the loaded obj
15462                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15463                 }
15464                 op1->gtFlags |= GTF_EXCEPT;
15465
15466                 if (prefixFlags & PREFIX_UNALIGNED)
15467                 {
15468                     op1->gtFlags |= GTF_IND_UNALIGNED;
15469                 }
15470
15471                 impPushOnStack(op1, tiRetVal);
15472                 break;
15473             }
15474
15475             case CEE_LDLEN:
15476                 if (tiVerificationNeeded)
15477                 {
15478                     typeInfo tiArray = impStackTop().seTypeInfo;
15479                     Verify(verIsSDArray(tiArray), "bad array");
15480                     tiRetVal = typeInfo(TI_INT);
15481                 }
15482
15483                 op1 = impPopStack().val;
15484                 if (!opts.MinOpts() && !opts.compDbgCode)
15485                 {
15486                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15487                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15488
15489                     /* Mark the block as containing a length expression */
15490
15491                     if (op1->gtOper == GT_LCL_VAR)
15492                     {
15493                         block->bbFlags |= BBF_HAS_IDX_LEN;
15494                     }
15495
15496                     op1 = arrLen;
15497                 }
15498                 else
15499                 {
15500                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15501                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15502                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15503                     op1 = gtNewIndir(TYP_INT, op1);
15504                     op1->gtFlags |= GTF_IND_ARR_LEN;
15505                 }
15506
15507                 /* Push the result back on the stack */
15508                 impPushOnStack(op1, tiRetVal);
15509                 break;
15510
15511             case CEE_BREAK:
15512                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15513                 goto SPILL_APPEND;
15514
15515             case CEE_NOP:
15516                 if (opts.compDbgCode)
15517                 {
15518                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15519                     goto SPILL_APPEND;
15520                 }
15521                 break;
15522
15523             /******************************** NYI *******************************/
15524
15525             case 0xCC:
15526                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15527
15528             case CEE_ILLEGAL:
15529             case CEE_MACRO_END:
15530
15531             default:
15532                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15533         }
15534
15535         codeAddr += sz;
15536         prevOpcode = opcode;
15537
15538         prefixFlags = 0;
15539     }
15540
15541     return;
15542 #undef _impResolveToken
15543 }
15544 #ifdef _PREFAST_
15545 #pragma warning(pop)
15546 #endif
15547
15548 // Push a local/argument treeon the operand stack
15549 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15550 {
15551     tiRetVal.NormaliseForStack();
15552
15553     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15554     {
15555         tiRetVal.SetUninitialisedObjRef();
15556     }
15557
15558     impPushOnStack(op, tiRetVal);
15559 }
15560
15561 // Load a local/argument on the operand stack
15562 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15563 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15564 {
15565     var_types lclTyp;
15566
15567     if (lvaTable[lclNum].lvNormalizeOnLoad())
15568     {
15569         lclTyp = lvaGetRealType(lclNum);
15570     }
15571     else
15572     {
15573         lclTyp = lvaGetActualType(lclNum);
15574     }
15575
15576     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15577 }
15578
15579 // Load an argument on the operand stack
15580 // Shared by the various CEE_LDARG opcodes
15581 // ilArgNum is the argument index as specified in IL.
15582 // It will be mapped to the correct lvaTable index
15583 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15584 {
15585     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15586
15587     if (compIsForInlining())
15588     {
15589         if (ilArgNum >= info.compArgsCount)
15590         {
15591             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15592             return;
15593         }
15594
15595         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15596                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15597     }
15598     else
15599     {
15600         if (ilArgNum >= info.compArgsCount)
15601         {
15602             BADCODE("Bad IL");
15603         }
15604
15605         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15606
15607         if (lclNum == info.compThisArg)
15608         {
15609             lclNum = lvaArg0Var;
15610         }
15611
15612         impLoadVar(lclNum, offset);
15613     }
15614 }
15615
15616 // Load a local on the operand stack
15617 // Shared by the various CEE_LDLOC opcodes
15618 // ilLclNum is the local index as specified in IL.
15619 // It will be mapped to the correct lvaTable index
15620 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15621 {
15622     if (tiVerificationNeeded)
15623     {
15624         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15625         Verify(info.compInitMem, "initLocals not set");
15626     }
15627
15628     if (compIsForInlining())
15629     {
15630         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15631         {
15632             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15633             return;
15634         }
15635
15636         // Get the local type
15637         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15638
15639         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15640
15641         /* Have we allocated a temp for this local? */
15642
15643         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15644
15645         // All vars of inlined methods should be !lvNormalizeOnLoad()
15646
15647         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15648         lclTyp = genActualType(lclTyp);
15649
15650         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15651     }
15652     else
15653     {
15654         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15655         {
15656             BADCODE("Bad IL");
15657         }
15658
15659         unsigned lclNum = info.compArgsCount + ilLclNum;
15660
15661         impLoadVar(lclNum, offset);
15662     }
15663 }
15664
15665 #ifdef _TARGET_ARM_
15666 /**************************************************************************************
15667  *
15668  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15669  *  dst struct, because struct promotion will turn it into a float/double variable while
15670  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15671  *  a float, but there is nothing that might prevent us from doing so. The tree however
15672  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15673  *
15674  *  tmpNum - the lcl dst variable num that is a struct.
15675  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15676  *  hClass - the type handle for the struct variable.
15677  *
15678  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15679  *        however, we could do a codegen of transferring from int to float registers
15680  *        (transfer, not a cast.)
15681  *
15682  */
15683 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15684 {
15685     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15686     {
15687         int       hfaSlots = GetHfaCount(hClass);
15688         var_types hfaType  = GetHfaType(hClass);
15689
15690         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15691         // type: struct/float at importer because the ABI calls out return in integer registers.
15692         // We don't want struct promotion to replace an expression like this:
15693         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15694         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15695         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15696             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15697         {
15698             // Make sure this struct type stays as struct so we can receive the call in a struct.
15699             lvaTable[tmpNum].lvIsMultiRegRet = true;
15700         }
15701     }
15702 }
15703 #endif // _TARGET_ARM_
15704
15705 #if FEATURE_MULTIREG_RET
15706 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15707 {
15708     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15709     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15710     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15711
15712     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15713     ret->gtFlags |= GTF_DONT_CSE;
15714
15715     assert(IsMultiRegReturnedType(hClass));
15716
15717     // Mark the var so that fields are not promoted and stay together.
15718     lvaTable[tmpNum].lvIsMultiRegRet = true;
15719
15720     return ret;
15721 }
15722 #endif // FEATURE_MULTIREG_RET
15723
15724 // do import for a return
15725 // returns false if inlining was aborted
15726 // opcode can be ret or call in the case of a tail.call
15727 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15728 {
15729     if (tiVerificationNeeded)
15730     {
15731         verVerifyThisPtrInitialised();
15732
15733         unsigned expectedStack = 0;
15734         if (info.compRetType != TYP_VOID)
15735         {
15736             typeInfo tiVal = impStackTop().seTypeInfo;
15737             typeInfo tiDeclared =
15738                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15739
15740             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15741
15742             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15743             expectedStack = 1;
15744         }
15745         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15746     }
15747
15748 #ifdef DEBUG
15749     // If we are importing an inlinee and have GC ref locals we always
15750     // need to have a spill temp for the return value.  This temp
15751     // should have been set up in advance, over in fgFindBasicBlocks.
15752     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15753     {
15754         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15755     }
15756 #endif // DEBUG
15757
15758     GenTree*             op2       = nullptr;
15759     GenTree*             op1       = nullptr;
15760     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15761
15762     if (info.compRetType != TYP_VOID)
15763     {
15764         StackEntry se = impPopStack();
15765         retClsHnd     = se.seTypeInfo.GetClassHandle();
15766         op2           = se.val;
15767
15768         if (!compIsForInlining())
15769         {
15770             impBashVarAddrsToI(op2);
15771             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15772             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15773             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15774                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15775                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15776                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15777                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15778
15779 #ifdef DEBUG
15780             if (opts.compGcChecks && info.compRetType == TYP_REF)
15781             {
15782                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15783                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15784                 // one-return BB.
15785
15786                 assert(op2->gtType == TYP_REF);
15787
15788                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15789                 GenTreeArgList* args = gtNewArgList(op2);
15790                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15791
15792                 if (verbose)
15793                 {
15794                     printf("\ncompGcChecks tree:\n");
15795                     gtDispTree(op2);
15796                 }
15797             }
15798 #endif
15799         }
15800         else
15801         {
15802             // inlinee's stack should be empty now.
15803             assert(verCurrentState.esStackDepth == 0);
15804
15805 #ifdef DEBUG
15806             if (verbose)
15807             {
15808                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15809                 gtDispTree(op2);
15810             }
15811 #endif
15812
15813             // Make sure the type matches the original call.
15814
15815             var_types returnType       = genActualType(op2->gtType);
15816             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15817             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15818             {
15819                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15820             }
15821
15822             if (returnType != originalCallType)
15823             {
15824                 JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
15825                         varTypeName(originalCallType));
15826                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15827                 return false;
15828             }
15829
15830             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15831             // expression. At this point, retExpr could already be set if there are multiple
15832             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15833             // the other blocks already set it. If there is only a single return block,
15834             // retExpr shouldn't be set. However, this is not true if we reimport a block
15835             // with a return. In that case, retExpr will be set, then the block will be
15836             // reimported, but retExpr won't get cleared as part of setting the block to
15837             // be reimported. The reimported retExpr value should be the same, so even if
15838             // we don't unconditionally overwrite it, it shouldn't matter.
15839             if (info.compRetNativeType != TYP_STRUCT)
15840             {
15841                 // compRetNativeType is not TYP_STRUCT.
15842                 // This implies it could be either a scalar type or SIMD vector type or
15843                 // a struct type that can be normalized to a scalar type.
15844
15845                 if (varTypeIsStruct(info.compRetType))
15846                 {
15847                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15848                     // adjust the type away from struct to integral
15849                     // and no normalizing
15850                     op2 = impFixupStructReturnType(op2, retClsHnd);
15851                 }
15852                 else
15853                 {
15854                     // Do we have to normalize?
15855                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15856                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15857                         fgCastNeeded(op2, fncRealRetType))
15858                     {
15859                         // Small-typed return values are normalized by the callee
15860                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15861                     }
15862                 }
15863
15864                 if (fgNeedReturnSpillTemp())
15865                 {
15866                     assert(info.compRetNativeType != TYP_VOID &&
15867                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15868
15869                     // If this method returns a ref type, track the actual types seen
15870                     // in the returns.
15871                     if (info.compRetType == TYP_REF)
15872                     {
15873                         bool                 isExact      = false;
15874                         bool                 isNonNull    = false;
15875                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15876
15877                         if (impInlineInfo->retExpr == nullptr)
15878                         {
15879                             // This is the first return, so best known type is the type
15880                             // of this return value.
15881                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15882                             impInlineInfo->retExprClassHndIsExact = isExact;
15883                         }
15884                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15885                         {
15886                             // This return site type differs from earlier seen sites,
15887                             // so reset the info and we'll fall back to using the method's
15888                             // declared return type for the return spill temp.
15889                             impInlineInfo->retExprClassHnd        = nullptr;
15890                             impInlineInfo->retExprClassHndIsExact = false;
15891                         }
15892                     }
15893
15894                     // This is a bit of a workaround...
15895                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15896                     // not a struct (for example, the struct is composed of exactly one int, and the native
15897                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15898                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15899                     // to the *native* return type), and at least one of the return blocks is the result of
15900                     // a call, then we have a problem. The situation is like this (from a failed test case):
15901                     //
15902                     // inliner:
15903                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15904                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15905                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15906                     //
15907                     // inlinee:
15908                     //      ...
15909                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15910                     //      ret
15911                     //      ...
15912                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15913                     //      object&, class System.Func`1<!!0>)
15914                     //      ret
15915                     //
15916                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15917                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15918                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15919                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15920                     //
15921                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15922                     // native return type, which is what it will be set to eventually. We generate the
15923                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15924                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15925
15926                     bool restoreType = false;
15927                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15928                     {
15929                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15930                         op2->gtType = info.compRetNativeType;
15931                         restoreType = true;
15932                     }
15933
15934                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15935                                      (unsigned)CHECK_SPILL_ALL);
15936
15937                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15938
15939                     if (restoreType)
15940                     {
15941                         op2->gtType = TYP_STRUCT; // restore it to what it was
15942                     }
15943
15944                     op2 = tmpOp2;
15945
15946 #ifdef DEBUG
15947                     if (impInlineInfo->retExpr)
15948                     {
15949                         // Some other block(s) have seen the CEE_RET first.
15950                         // Better they spilled to the same temp.
15951                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15952                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15953                     }
15954 #endif
15955                 }
15956
15957 #ifdef DEBUG
15958                 if (verbose)
15959                 {
15960                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15961                     gtDispTree(op2);
15962                 }
15963 #endif
15964
15965                 // Report the return expression
15966                 impInlineInfo->retExpr = op2;
15967             }
15968             else
15969             {
15970                 // compRetNativeType is TYP_STRUCT.
15971                 // This implies that struct return via RetBuf arg or multi-reg struct return
15972
15973                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15974
15975                 // Assign the inlinee return into a spill temp.
15976                 // spill temp only exists if there are multiple return points
15977                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15978                 {
15979                     // in this case we have to insert multiple struct copies to the temp
15980                     // and the retexpr is just the temp.
15981                     assert(info.compRetNativeType != TYP_VOID);
15982                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15983
15984                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15985                                      (unsigned)CHECK_SPILL_ALL);
15986                 }
15987
15988 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15989 #if defined(_TARGET_ARM_)
15990                 // TODO-ARM64-NYI: HFA
15991                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15992                 // next ifdefs could be refactored in a single method with the ifdef inside.
15993                 if (IsHfa(retClsHnd))
15994                 {
15995 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15996 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15997                 ReturnTypeDesc retTypeDesc;
15998                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15999                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16000
16001                 if (retRegCount != 0)
16002                 {
16003                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16004                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16005                     // max allowed.)
16006                     assert(retRegCount == MAX_RET_REG_COUNT);
16007                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16008                     CLANG_FORMAT_COMMENT_ANCHOR;
16009 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16010
16011                     if (fgNeedReturnSpillTemp())
16012                     {
16013                         if (!impInlineInfo->retExpr)
16014                         {
16015 #if defined(_TARGET_ARM_)
16016                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16017 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16018                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16019                             impInlineInfo->retExpr =
16020                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16021 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16022                         }
16023                     }
16024                     else
16025                     {
16026                         impInlineInfo->retExpr = op2;
16027                     }
16028                 }
16029                 else
16030 #elif defined(_TARGET_ARM64_)
16031                 ReturnTypeDesc retTypeDesc;
16032                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16033                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16034
16035                 if (retRegCount != 0)
16036                 {
16037                     assert(!iciCall->HasRetBufArg());
16038                     assert(retRegCount >= 2);
16039                     if (fgNeedReturnSpillTemp())
16040                     {
16041                         if (!impInlineInfo->retExpr)
16042                         {
16043                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16044                             impInlineInfo->retExpr =
16045                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16046                         }
16047                     }
16048                     else
16049                     {
16050                         impInlineInfo->retExpr = op2;
16051                     }
16052                 }
16053                 else
16054 #endif // defined(_TARGET_ARM64_)
16055                 {
16056                     assert(iciCall->HasRetBufArg());
16057                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16058                     // spill temp only exists if there are multiple return points
16059                     if (fgNeedReturnSpillTemp())
16060                     {
16061                         // if this is the first return we have seen set the retExpr
16062                         if (!impInlineInfo->retExpr)
16063                         {
16064                             impInlineInfo->retExpr =
16065                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16066                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16067                         }
16068                     }
16069                     else
16070                     {
16071                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16072                     }
16073                 }
16074             }
16075         }
16076     }
16077
16078     if (compIsForInlining())
16079     {
16080         return true;
16081     }
16082
16083     if (info.compRetType == TYP_VOID)
16084     {
16085         // return void
16086         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16087     }
16088     else if (info.compRetBuffArg != BAD_VAR_NUM)
16089     {
16090         // Assign value to return buff (first param)
16091         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16092
16093         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16094         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16095
16096         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16097         CLANG_FORMAT_COMMENT_ANCHOR;
16098
16099 #if defined(_TARGET_AMD64_)
16100
16101         // x64 (System V and Win64) calling convention requires to
16102         // return the implicit return buffer explicitly (in RAX).
16103         // Change the return type to be BYREF.
16104         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16105 #else  // !defined(_TARGET_AMD64_)
16106         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16107         // In such case the return value of the function is changed to BYREF.
16108         // If profiler hook is not needed the return type of the function is TYP_VOID.
16109         if (compIsProfilerHookNeeded())
16110         {
16111             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16112         }
16113         else
16114         {
16115             // return void
16116             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16117         }
16118 #endif // !defined(_TARGET_AMD64_)
16119     }
16120     else if (varTypeIsStruct(info.compRetType))
16121     {
16122 #if !FEATURE_MULTIREG_RET
16123         // For both ARM architectures the HFA native types are maintained as structs.
16124         // Also on System V AMD64 the multireg structs returns are also left as structs.
16125         noway_assert(info.compRetNativeType != TYP_STRUCT);
16126 #endif
16127         op2 = impFixupStructReturnType(op2, retClsHnd);
16128         // return op2
16129         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16130     }
16131     else
16132     {
16133         // return op2
16134         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16135     }
16136
16137     // We must have imported a tailcall and jumped to RET
16138     if (prefixFlags & PREFIX_TAILCALL)
16139     {
16140 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16141         // Jit64 compat:
16142         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16143         //      tail.call
16144         //      pop
16145         //      ret
16146         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16147 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16148
16149         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16150
16151         // impImportCall() would have already appended TYP_VOID calls
16152         if (info.compRetType == TYP_VOID)
16153         {
16154             return true;
16155         }
16156     }
16157
16158     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16159 #ifdef DEBUG
16160     // Remember at which BC offset the tree was finished
16161     impNoteLastILoffs();
16162 #endif
16163     return true;
16164 }
16165
16166 /*****************************************************************************
16167  *  Mark the block as unimported.
16168  *  Note that the caller is responsible for calling impImportBlockPending(),
16169  *  with the appropriate stack-state
16170  */
16171
16172 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16173 {
16174 #ifdef DEBUG
16175     if (verbose && (block->bbFlags & BBF_IMPORTED))
16176     {
16177         printf("\nBB%02u will be reimported\n", block->bbNum);
16178     }
16179 #endif
16180
16181     block->bbFlags &= ~BBF_IMPORTED;
16182 }
16183
16184 /*****************************************************************************
16185  *  Mark the successors of the given block as unimported.
16186  *  Note that the caller is responsible for calling impImportBlockPending()
16187  *  for all the successors, with the appropriate stack-state.
16188  */
16189
16190 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16191 {
16192     const unsigned numSuccs = block->NumSucc();
16193     for (unsigned i = 0; i < numSuccs; i++)
16194     {
16195         impReimportMarkBlock(block->GetSucc(i));
16196     }
16197 }
16198
16199 /*****************************************************************************
16200  *
16201  *  Filter wrapper to handle only passed in exception code
16202  *  from it).
16203  */
16204
16205 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16206 {
16207     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16208     {
16209         return EXCEPTION_EXECUTE_HANDLER;
16210     }
16211
16212     return EXCEPTION_CONTINUE_SEARCH;
16213 }
16214
16215 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16216 {
16217     assert(block->hasTryIndex());
16218     assert(!compIsForInlining());
16219
16220     unsigned  tryIndex = block->getTryIndex();
16221     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16222
16223     if (isTryStart)
16224     {
16225         assert(block->bbFlags & BBF_TRY_BEG);
16226
16227         // The Stack must be empty
16228         //
16229         if (block->bbStkDepth != 0)
16230         {
16231             BADCODE("Evaluation stack must be empty on entry into a try block");
16232         }
16233     }
16234
16235     // Save the stack contents, we'll need to restore it later
16236     //
16237     SavedStack blockState;
16238     impSaveStackState(&blockState, false);
16239
16240     while (HBtab != nullptr)
16241     {
16242         if (isTryStart)
16243         {
16244             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16245             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16246             //
16247             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16248             {
16249                 // We  trigger an invalid program exception here unless we have a try/fault region.
16250                 //
16251                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16252                 {
16253                     BADCODE(
16254                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16255                 }
16256                 else
16257                 {
16258                     // Allow a try/fault region to proceed.
16259                     assert(HBtab->HasFaultHandler());
16260                 }
16261             }
16262
16263             /* Recursively process the handler block */
16264             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16265
16266             //  Construct the proper verification stack state
16267             //   either empty or one that contains just
16268             //   the Exception Object that we are dealing with
16269             //
16270             verCurrentState.esStackDepth = 0;
16271
16272             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16273             {
16274                 CORINFO_CLASS_HANDLE clsHnd;
16275
16276                 if (HBtab->HasFilter())
16277                 {
16278                     clsHnd = impGetObjectClass();
16279                 }
16280                 else
16281                 {
16282                     CORINFO_RESOLVED_TOKEN resolvedToken;
16283
16284                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16285                     resolvedToken.tokenScope   = info.compScopeHnd;
16286                     resolvedToken.token        = HBtab->ebdTyp;
16287                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16288                     info.compCompHnd->resolveToken(&resolvedToken);
16289
16290                     clsHnd = resolvedToken.hClass;
16291                 }
16292
16293                 // push catch arg the stack, spill to a temp if necessary
16294                 // Note: can update HBtab->ebdHndBeg!
16295                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16296             }
16297
16298             // Queue up the handler for importing
16299             //
16300             impImportBlockPending(hndBegBB);
16301
16302             if (HBtab->HasFilter())
16303             {
16304                 /* @VERIFICATION : Ideally the end of filter state should get
16305                    propagated to the catch handler, this is an incompleteness,
16306                    but is not a security/compliance issue, since the only
16307                    interesting state is the 'thisInit' state.
16308                    */
16309
16310                 verCurrentState.esStackDepth = 0;
16311
16312                 BasicBlock* filterBB = HBtab->ebdFilter;
16313
16314                 // push catch arg the stack, spill to a temp if necessary
16315                 // Note: can update HBtab->ebdFilter!
16316                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16317                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16318
16319                 impImportBlockPending(filterBB);
16320             }
16321         }
16322         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16323         {
16324             /* Recursively process the handler block */
16325
16326             verCurrentState.esStackDepth = 0;
16327
16328             // Queue up the fault handler for importing
16329             //
16330             impImportBlockPending(HBtab->ebdHndBeg);
16331         }
16332
16333         // Now process our enclosing try index (if any)
16334         //
16335         tryIndex = HBtab->ebdEnclosingTryIndex;
16336         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16337         {
16338             HBtab = nullptr;
16339         }
16340         else
16341         {
16342             HBtab = ehGetDsc(tryIndex);
16343         }
16344     }
16345
16346     // Restore the stack contents
16347     impRestoreStackState(&blockState);
16348 }
16349
16350 //***************************************************************
16351 // Import the instructions for the given basic block.  Perform
16352 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16353 // time, or whose verification pre-state is changed.
16354
16355 #ifdef _PREFAST_
16356 #pragma warning(push)
16357 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16358 #endif
16359 void Compiler::impImportBlock(BasicBlock* block)
16360 {
16361     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16362     // handle them specially. In particular, there is no IL to import for them, but we do need
16363     // to mark them as imported and put their successors on the pending import list.
16364     if (block->bbFlags & BBF_INTERNAL)
16365     {
16366         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16367         block->bbFlags |= BBF_IMPORTED;
16368
16369         const unsigned numSuccs = block->NumSucc();
16370         for (unsigned i = 0; i < numSuccs; i++)
16371         {
16372             impImportBlockPending(block->GetSucc(i));
16373         }
16374
16375         return;
16376     }
16377
16378     bool markImport;
16379
16380     assert(block);
16381
16382     /* Make the block globaly available */
16383
16384     compCurBB = block;
16385
16386 #ifdef DEBUG
16387     /* Initialize the debug variables */
16388     impCurOpcName = "unknown";
16389     impCurOpcOffs = block->bbCodeOffs;
16390 #endif
16391
16392     /* Set the current stack state to the merged result */
16393     verResetCurrentState(block, &verCurrentState);
16394
16395     /* Now walk the code and import the IL into GenTrees */
16396
16397     struct FilterVerificationExceptionsParam
16398     {
16399         Compiler*   pThis;
16400         BasicBlock* block;
16401     };
16402     FilterVerificationExceptionsParam param;
16403
16404     param.pThis = this;
16405     param.block = block;
16406
16407     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16408     {
16409         /* @VERIFICATION : For now, the only state propagation from try
16410            to it's handler is "thisInit" state (stack is empty at start of try).
16411            In general, for state that we track in verification, we need to
16412            model the possibility that an exception might happen at any IL
16413            instruction, so we really need to merge all states that obtain
16414            between IL instructions in a try block into the start states of
16415            all handlers.
16416
16417            However we do not allow the 'this' pointer to be uninitialized when
16418            entering most kinds try regions (only try/fault are allowed to have
16419            an uninitialized this pointer on entry to the try)
16420
16421            Fortunately, the stack is thrown away when an exception
16422            leads to a handler, so we don't have to worry about that.
16423            We DO, however, have to worry about the "thisInit" state.
16424            But only for the try/fault case.
16425
16426            The only allowed transition is from TIS_Uninit to TIS_Init.
16427
16428            So for a try/fault region for the fault handler block
16429            we will merge the start state of the try begin
16430            and the post-state of each block that is part of this try region
16431         */
16432
16433         // merge the start state of the try begin
16434         //
16435         if (pParam->block->bbFlags & BBF_TRY_BEG)
16436         {
16437             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16438         }
16439
16440         pParam->pThis->impImportBlockCode(pParam->block);
16441
16442         // As discussed above:
16443         // merge the post-state of each block that is part of this try region
16444         //
16445         if (pParam->block->hasTryIndex())
16446         {
16447             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16448         }
16449     }
16450     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16451     {
16452         verHandleVerificationFailure(block DEBUGARG(false));
16453     }
16454     PAL_ENDTRY
16455
16456     if (compDonotInline())
16457     {
16458         return;
16459     }
16460
16461     assert(!compDonotInline());
16462
16463     markImport = false;
16464
16465 SPILLSTACK:
16466
16467     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16468     bool        reimportSpillClique = false;
16469     BasicBlock* tgtBlock            = nullptr;
16470
16471     /* If the stack is non-empty, we might have to spill its contents */
16472
16473     if (verCurrentState.esStackDepth != 0)
16474     {
16475         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16476                                   // on the stack, its lifetime is hard to determine, simply
16477                                   // don't reuse such temps.
16478
16479         GenTree* addStmt = nullptr;
16480
16481         /* Do the successors of 'block' have any other predecessors ?
16482            We do not want to do some of the optimizations related to multiRef
16483            if we can reimport blocks */
16484
16485         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16486
16487         switch (block->bbJumpKind)
16488         {
16489             case BBJ_COND:
16490
16491                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16492
16493                 assert(impTreeLast);
16494                 assert(impTreeLast->gtOper == GT_STMT);
16495                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16496
16497                 addStmt     = impTreeLast;
16498                 impTreeLast = impTreeLast->gtPrev;
16499
16500                 /* Note if the next block has more than one ancestor */
16501
16502                 multRef |= block->bbNext->bbRefs;
16503
16504                 /* Does the next block have temps assigned? */
16505
16506                 baseTmp  = block->bbNext->bbStkTempsIn;
16507                 tgtBlock = block->bbNext;
16508
16509                 if (baseTmp != NO_BASE_TMP)
16510                 {
16511                     break;
16512                 }
16513
16514                 /* Try the target of the jump then */
16515
16516                 multRef |= block->bbJumpDest->bbRefs;
16517                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16518                 tgtBlock = block->bbJumpDest;
16519                 break;
16520
16521             case BBJ_ALWAYS:
16522                 multRef |= block->bbJumpDest->bbRefs;
16523                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16524                 tgtBlock = block->bbJumpDest;
16525                 break;
16526
16527             case BBJ_NONE:
16528                 multRef |= block->bbNext->bbRefs;
16529                 baseTmp  = block->bbNext->bbStkTempsIn;
16530                 tgtBlock = block->bbNext;
16531                 break;
16532
16533             case BBJ_SWITCH:
16534
16535                 BasicBlock** jmpTab;
16536                 unsigned     jmpCnt;
16537
16538                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16539
16540                 assert(impTreeLast);
16541                 assert(impTreeLast->gtOper == GT_STMT);
16542                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16543
16544                 addStmt     = impTreeLast;
16545                 impTreeLast = impTreeLast->gtPrev;
16546
16547                 jmpCnt = block->bbJumpSwt->bbsCount;
16548                 jmpTab = block->bbJumpSwt->bbsDstTab;
16549
16550                 do
16551                 {
16552                     tgtBlock = (*jmpTab);
16553
16554                     multRef |= tgtBlock->bbRefs;
16555
16556                     // Thanks to spill cliques, we should have assigned all or none
16557                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16558                     baseTmp = tgtBlock->bbStkTempsIn;
16559                     if (multRef > 1)
16560                     {
16561                         break;
16562                     }
16563                 } while (++jmpTab, --jmpCnt);
16564
16565                 break;
16566
16567             case BBJ_CALLFINALLY:
16568             case BBJ_EHCATCHRET:
16569             case BBJ_RETURN:
16570             case BBJ_EHFINALLYRET:
16571             case BBJ_EHFILTERRET:
16572             case BBJ_THROW:
16573                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16574                 break;
16575
16576             default:
16577                 noway_assert(!"Unexpected bbJumpKind");
16578                 break;
16579         }
16580
16581         assert(multRef >= 1);
16582
16583         /* Do we have a base temp number? */
16584
16585         bool newTemps = (baseTmp == NO_BASE_TMP);
16586
16587         if (newTemps)
16588         {
16589             /* Grab enough temps for the whole stack */
16590             baseTmp = impGetSpillTmpBase(block);
16591         }
16592
16593         /* Spill all stack entries into temps */
16594         unsigned level, tempNum;
16595
16596         JITDUMP("\nSpilling stack entries into temps\n");
16597         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16598         {
16599             GenTree* tree = verCurrentState.esStack[level].val;
16600
16601             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16602                the other. This should merge to a byref in unverifiable code.
16603                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16604                successor would be imported assuming there was a TYP_I_IMPL on
16605                the stack. Thus the value would not get GC-tracked. Hence,
16606                change the temp to TYP_BYREF and reimport the successors.
16607                Note: We should only allow this in unverifiable code.
16608             */
16609             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16610             {
16611                 lvaTable[tempNum].lvType = TYP_BYREF;
16612                 impReimportMarkSuccessors(block);
16613                 markImport = true;
16614             }
16615
16616 #ifdef _TARGET_64BIT_
16617             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16618             {
16619                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16620                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16621                 {
16622                     // Merge the current state into the entry state of block;
16623                     // the call to verMergeEntryStates must have changed
16624                     // the entry state of the block by merging the int local var
16625                     // and the native-int stack entry.
16626                     bool changed = false;
16627                     if (verMergeEntryStates(tgtBlock, &changed))
16628                     {
16629                         impRetypeEntryStateTemps(tgtBlock);
16630                         impReimportBlockPending(tgtBlock);
16631                         assert(changed);
16632                     }
16633                     else
16634                     {
16635                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16636                         break;
16637                     }
16638                 }
16639
16640                 // Some other block in the spill clique set this to "int", but now we have "native int".
16641                 // Change the type and go back to re-import any blocks that used the wrong type.
16642                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16643                 reimportSpillClique      = true;
16644             }
16645             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16646             {
16647                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16648                 // Insert a sign-extension to "native int" so we match the clique.
16649                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16650             }
16651
16652             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16653             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16654             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16655             // behavior instead of asserting and then generating bad code (where we save/restore the
16656             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16657             // imported already, we need to change the type of the local and reimport the spill clique.
16658             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16659             // the 'byref' size.
16660             if (!tiVerificationNeeded)
16661             {
16662                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16663                 {
16664                     // Some other block in the spill clique set this to "int", but now we have "byref".
16665                     // Change the type and go back to re-import any blocks that used the wrong type.
16666                     lvaTable[tempNum].lvType = TYP_BYREF;
16667                     reimportSpillClique      = true;
16668                 }
16669                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16670                 {
16671                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16672                     // Insert a sign-extension to "native int" so we match the clique size.
16673                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16674                 }
16675             }
16676 #endif // _TARGET_64BIT_
16677
16678 #if FEATURE_X87_DOUBLES
16679             // X87 stack doesn't differentiate between float/double
16680             // so promoting is no big deal.
16681             // For everybody else keep it as float until we have a collision and then promote
16682             // Just like for x64's TYP_INT<->TYP_I_IMPL
16683
16684             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16685             {
16686                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16687             }
16688
16689 #else // !FEATURE_X87_DOUBLES
16690
16691             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16692             {
16693                 // Some other block in the spill clique set this to "float", but now we have "double".
16694                 // Change the type and go back to re-import any blocks that used the wrong type.
16695                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16696                 reimportSpillClique      = true;
16697             }
16698             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16699             {
16700                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16701                 // Insert a cast to "double" so we match the clique.
16702                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16703             }
16704
16705 #endif // FEATURE_X87_DOUBLES
16706
16707             /* If addStmt has a reference to tempNum (can only happen if we
16708                are spilling to the temps already used by a previous block),
16709                we need to spill addStmt */
16710
16711             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16712             {
16713                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16714
16715                 if (addTree->gtOper == GT_JTRUE)
16716                 {
16717                     GenTree* relOp = addTree->gtOp.gtOp1;
16718                     assert(relOp->OperIsCompare());
16719
16720                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16721
16722                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16723                     {
16724                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16725                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16726                         type              = genActualType(lvaTable[temp].TypeGet());
16727                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16728                     }
16729
16730                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16731                     {
16732                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16733                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16734                         type              = genActualType(lvaTable[temp].TypeGet());
16735                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16736                     }
16737                 }
16738                 else
16739                 {
16740                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16741
16742                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16743                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16744                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16745                 }
16746             }
16747
16748             /* Spill the stack entry, and replace with the temp */
16749
16750             if (!impSpillStackEntry(level, tempNum
16751 #ifdef DEBUG
16752                                     ,
16753                                     true, "Spill Stack Entry"
16754 #endif
16755                                     ))
16756             {
16757                 if (markImport)
16758                 {
16759                     BADCODE("bad stack state");
16760                 }
16761
16762                 // Oops. Something went wrong when spilling. Bad code.
16763                 verHandleVerificationFailure(block DEBUGARG(true));
16764
16765                 goto SPILLSTACK;
16766             }
16767         }
16768
16769         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16770
16771         if (addStmt)
16772         {
16773             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16774         }
16775     }
16776
16777     // Some of the append/spill logic works on compCurBB
16778
16779     assert(compCurBB == block);
16780
16781     /* Save the tree list in the block */
16782     impEndTreeList(block);
16783
16784     // impEndTreeList sets BBF_IMPORTED on the block
16785     // We do *NOT* want to set it later than this because
16786     // impReimportSpillClique might clear it if this block is both a
16787     // predecessor and successor in the current spill clique
16788     assert(block->bbFlags & BBF_IMPORTED);
16789
16790     // If we had a int/native int, or float/double collision, we need to re-import
16791     if (reimportSpillClique)
16792     {
16793         // This will re-import all the successors of block (as well as each of their predecessors)
16794         impReimportSpillClique(block);
16795
16796         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16797         const unsigned numSuccs = block->NumSucc();
16798         for (unsigned i = 0; i < numSuccs; i++)
16799         {
16800             BasicBlock* succ = block->GetSucc(i);
16801             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16802             {
16803                 impImportBlockPending(succ);
16804             }
16805         }
16806     }
16807     else // the normal case
16808     {
16809         // otherwise just import the successors of block
16810
16811         /* Does this block jump to any other blocks? */
16812         const unsigned numSuccs = block->NumSucc();
16813         for (unsigned i = 0; i < numSuccs; i++)
16814         {
16815             impImportBlockPending(block->GetSucc(i));
16816         }
16817     }
16818 }
16819 #ifdef _PREFAST_
16820 #pragma warning(pop)
16821 #endif
16822
16823 /*****************************************************************************/
16824 //
16825 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16826 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16827 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16828 // (its "pre-state").
16829
16830 void Compiler::impImportBlockPending(BasicBlock* block)
16831 {
16832 #ifdef DEBUG
16833     if (verbose)
16834     {
16835         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16836     }
16837 #endif
16838
16839     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16840     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16841     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16842
16843     // If the block has not been imported, add to pending set.
16844     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16845
16846     // Initialize bbEntryState just the first time we try to add this block to the pending list
16847     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16848     // We use NULL to indicate the 'common' state to avoid memory allocation
16849     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16850         (impGetPendingBlockMember(block) == 0))
16851     {
16852         verInitBBEntryState(block, &verCurrentState);
16853         assert(block->bbStkDepth == 0);
16854         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16855         assert(addToPending);
16856         assert(impGetPendingBlockMember(block) == 0);
16857     }
16858     else
16859     {
16860         // The stack should have the same height on entry to the block from all its predecessors.
16861         if (block->bbStkDepth != verCurrentState.esStackDepth)
16862         {
16863 #ifdef DEBUG
16864             char buffer[400];
16865             sprintf_s(buffer, sizeof(buffer),
16866                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16867                       "Previous depth was %d, current depth is %d",
16868                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16869                       verCurrentState.esStackDepth);
16870             buffer[400 - 1] = 0;
16871             NO_WAY(buffer);
16872 #else
16873             NO_WAY("Block entered with different stack depths");
16874 #endif
16875         }
16876
16877         // Additionally, if we need to verify, merge the verification state.
16878         if (tiVerificationNeeded)
16879         {
16880             // Merge the current state into the entry state of block; if this does not change the entry state
16881             // by merging, do not add the block to the pending-list.
16882             bool changed = false;
16883             if (!verMergeEntryStates(block, &changed))
16884             {
16885                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16886                 addToPending = true; // We will pop it off, and check the flag set above.
16887             }
16888             else if (changed)
16889             {
16890                 addToPending = true;
16891
16892                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16893             }
16894         }
16895
16896         if (!addToPending)
16897         {
16898             return;
16899         }
16900
16901         if (block->bbStkDepth > 0)
16902         {
16903             // We need to fix the types of any spill temps that might have changed:
16904             //   int->native int, float->double, int->byref, etc.
16905             impRetypeEntryStateTemps(block);
16906         }
16907
16908         // OK, we must add to the pending list, if it's not already in it.
16909         if (impGetPendingBlockMember(block) != 0)
16910         {
16911             return;
16912         }
16913     }
16914
16915     // Get an entry to add to the pending list
16916
16917     PendingDsc* dsc;
16918
16919     if (impPendingFree)
16920     {
16921         // We can reuse one of the freed up dscs.
16922         dsc            = impPendingFree;
16923         impPendingFree = dsc->pdNext;
16924     }
16925     else
16926     {
16927         // We have to create a new dsc
16928         dsc = new (this, CMK_Unknown) PendingDsc;
16929     }
16930
16931     dsc->pdBB                 = block;
16932     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16933     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16934
16935     // Save the stack trees for later
16936
16937     if (verCurrentState.esStackDepth)
16938     {
16939         impSaveStackState(&dsc->pdSavedStack, false);
16940     }
16941
16942     // Add the entry to the pending list
16943
16944     dsc->pdNext    = impPendingList;
16945     impPendingList = dsc;
16946     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16947
16948     // Various assertions require us to now to consider the block as not imported (at least for
16949     // the final time...)
16950     block->bbFlags &= ~BBF_IMPORTED;
16951
16952 #ifdef DEBUG
16953     if (verbose && 0)
16954     {
16955         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16956     }
16957 #endif
16958 }
16959
16960 /*****************************************************************************/
16961 //
16962 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16963 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16964 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16965
16966 void Compiler::impReimportBlockPending(BasicBlock* block)
16967 {
16968     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16969
16970     assert(block->bbFlags & BBF_IMPORTED);
16971
16972     // OK, we must add to the pending list, if it's not already in it.
16973     if (impGetPendingBlockMember(block) != 0)
16974     {
16975         return;
16976     }
16977
16978     // Get an entry to add to the pending list
16979
16980     PendingDsc* dsc;
16981
16982     if (impPendingFree)
16983     {
16984         // We can reuse one of the freed up dscs.
16985         dsc            = impPendingFree;
16986         impPendingFree = dsc->pdNext;
16987     }
16988     else
16989     {
16990         // We have to create a new dsc
16991         dsc = new (this, CMK_ImpStack) PendingDsc;
16992     }
16993
16994     dsc->pdBB = block;
16995
16996     if (block->bbEntryState)
16997     {
16998         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16999         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17000         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17001     }
17002     else
17003     {
17004         dsc->pdThisPtrInit        = TIS_Bottom;
17005         dsc->pdSavedStack.ssDepth = 0;
17006         dsc->pdSavedStack.ssTrees = nullptr;
17007     }
17008
17009     // Add the entry to the pending list
17010
17011     dsc->pdNext    = impPendingList;
17012     impPendingList = dsc;
17013     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17014
17015     // Various assertions require us to now to consider the block as not imported (at least for
17016     // the final time...)
17017     block->bbFlags &= ~BBF_IMPORTED;
17018
17019 #ifdef DEBUG
17020     if (verbose && 0)
17021     {
17022         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17023     }
17024 #endif
17025 }
17026
17027 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17028 {
17029     if (comp->impBlockListNodeFreeList == nullptr)
17030     {
17031         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17032     }
17033     else
17034     {
17035         BlockListNode* res             = comp->impBlockListNodeFreeList;
17036         comp->impBlockListNodeFreeList = res->m_next;
17037         return res;
17038     }
17039 }
17040
17041 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17042 {
17043     node->m_next             = impBlockListNodeFreeList;
17044     impBlockListNodeFreeList = node;
17045 }
17046
17047 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17048 {
17049     bool toDo = true;
17050
17051     noway_assert(!fgComputePredsDone);
17052     if (!fgCheapPredsValid)
17053     {
17054         fgComputeCheapPreds();
17055     }
17056
17057     BlockListNode* succCliqueToDo = nullptr;
17058     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17059     while (toDo)
17060     {
17061         toDo = false;
17062         // Look at the successors of every member of the predecessor to-do list.
17063         while (predCliqueToDo != nullptr)
17064         {
17065             BlockListNode* node = predCliqueToDo;
17066             predCliqueToDo      = node->m_next;
17067             BasicBlock* blk     = node->m_blk;
17068             FreeBlockListNode(node);
17069
17070             const unsigned numSuccs = blk->NumSucc();
17071             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17072             {
17073                 BasicBlock* succ = blk->GetSucc(succNum);
17074                 // If it's not already in the clique, add it, and also add it
17075                 // as a member of the successor "toDo" set.
17076                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17077                 {
17078                     callback->Visit(SpillCliqueSucc, succ);
17079                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17080                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17081                     toDo           = true;
17082                 }
17083             }
17084         }
17085         // Look at the predecessors of every member of the successor to-do list.
17086         while (succCliqueToDo != nullptr)
17087         {
17088             BlockListNode* node = succCliqueToDo;
17089             succCliqueToDo      = node->m_next;
17090             BasicBlock* blk     = node->m_blk;
17091             FreeBlockListNode(node);
17092
17093             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17094             {
17095                 BasicBlock* predBlock = pred->block;
17096                 // If it's not already in the clique, add it, and also add it
17097                 // as a member of the predecessor "toDo" set.
17098                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17099                 {
17100                     callback->Visit(SpillCliquePred, predBlock);
17101                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17102                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17103                     toDo           = true;
17104                 }
17105             }
17106         }
17107     }
17108
17109     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17110     // miss walking back to include the predecessor we started from.
17111     // This most likely cause: missing or out of date bbPreds
17112     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17113 }
17114
17115 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17116 {
17117     if (predOrSucc == SpillCliqueSucc)
17118     {
17119         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17120         blk->bbStkTempsIn = m_baseTmp;
17121     }
17122     else
17123     {
17124         assert(predOrSucc == SpillCliquePred);
17125         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17126         blk->bbStkTempsOut = m_baseTmp;
17127     }
17128 }
17129
17130 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17131 {
17132     // For Preds we could be a little smarter and just find the existing store
17133     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17134     // just re-import the whole block (just like we do for successors)
17135
17136     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17137     {
17138         // If we haven't imported this block and we're not going to (because it isn't on
17139         // the pending list) then just ignore it for now.
17140
17141         // This block has either never been imported (EntryState == NULL) or it failed
17142         // verification. Neither state requires us to force it to be imported now.
17143         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17144         return;
17145     }
17146
17147     // For successors we have a valid verCurrentState, so just mark them for reimport
17148     // the 'normal' way
17149     // Unlike predecessors, we *DO* need to reimport the current block because the
17150     // initial import had the wrong entry state types.
17151     // Similarly, blocks that are currently on the pending list, still need to call
17152     // impImportBlockPending to fixup their entry state.
17153     if (predOrSucc == SpillCliqueSucc)
17154     {
17155         m_pComp->impReimportMarkBlock(blk);
17156
17157         // Set the current stack state to that of the blk->bbEntryState
17158         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17159         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17160
17161         m_pComp->impImportBlockPending(blk);
17162     }
17163     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17164     {
17165         // As described above, we are only visiting predecessors so they can
17166         // add the appropriate casts, since we have already done that for the current
17167         // block, it does not need to be reimported.
17168         // Nor do we need to reimport blocks that are still pending, but not yet
17169         // imported.
17170         //
17171         // For predecessors, we have no state to seed the EntryState, so we just have
17172         // to assume the existing one is correct.
17173         // If the block is also a successor, it will get the EntryState properly
17174         // updated when it is visited as a successor in the above "if" block.
17175         assert(predOrSucc == SpillCliquePred);
17176         m_pComp->impReimportBlockPending(blk);
17177     }
17178 }
17179
17180 // Re-type the incoming lclVar nodes to match the varDsc.
17181 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17182 {
17183     if (blk->bbEntryState != nullptr)
17184     {
17185         EntryState* es = blk->bbEntryState;
17186         for (unsigned level = 0; level < es->esStackDepth; level++)
17187         {
17188             GenTree* tree = es->esStack[level].val;
17189             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17190             {
17191                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17192                 noway_assert(lclNum < lvaCount);
17193                 LclVarDsc* varDsc              = lvaTable + lclNum;
17194                 es->esStack[level].val->gtType = varDsc->TypeGet();
17195             }
17196         }
17197     }
17198 }
17199
17200 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17201 {
17202     if (block->bbStkTempsOut != NO_BASE_TMP)
17203     {
17204         return block->bbStkTempsOut;
17205     }
17206
17207 #ifdef DEBUG
17208     if (verbose)
17209     {
17210         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17211     }
17212 #endif // DEBUG
17213
17214     // Otherwise, choose one, and propagate to all members of the spill clique.
17215     // Grab enough temps for the whole stack.
17216     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17217     SetSpillTempsBase callback(baseTmp);
17218
17219     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17220     // to one spill clique, and similarly can only be the sucessor to one spill clique
17221     impWalkSpillCliqueFromPred(block, &callback);
17222
17223     return baseTmp;
17224 }
17225
17226 void Compiler::impReimportSpillClique(BasicBlock* block)
17227 {
17228 #ifdef DEBUG
17229     if (verbose)
17230     {
17231         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17232     }
17233 #endif // DEBUG
17234
17235     // If we get here, it is because this block is already part of a spill clique
17236     // and one predecessor had an outgoing live stack slot of type int, and this
17237     // block has an outgoing live stack slot of type native int.
17238     // We need to reset these before traversal because they have already been set
17239     // by the previous walk to determine all the members of the spill clique.
17240     impInlineRoot()->impSpillCliquePredMembers.Reset();
17241     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17242
17243     ReimportSpillClique callback(this);
17244
17245     impWalkSpillCliqueFromPred(block, &callback);
17246 }
17247
17248 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17249 // a copy of "srcState", cloning tree pointers as required.
17250 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17251 {
17252     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17253     {
17254         block->bbEntryState = nullptr;
17255         return;
17256     }
17257
17258     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17259
17260     // block->bbEntryState.esRefcount = 1;
17261
17262     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17263     block->bbEntryState->thisInitialized = TIS_Bottom;
17264
17265     if (srcState->esStackDepth > 0)
17266     {
17267         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17268         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17269
17270         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17271         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17272         {
17273             GenTree* tree                           = srcState->esStack[level].val;
17274             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17275         }
17276     }
17277
17278     if (verTrackObjCtorInitState)
17279     {
17280         verSetThisInit(block, srcState->thisInitialized);
17281     }
17282
17283     return;
17284 }
17285
17286 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17287 {
17288     assert(tis != TIS_Bottom); // Precondition.
17289     if (block->bbEntryState == nullptr)
17290     {
17291         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17292     }
17293
17294     block->bbEntryState->thisInitialized = tis;
17295 }
17296
17297 /*
17298  * Resets the current state to the state at the start of the basic block
17299  */
17300 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17301 {
17302
17303     if (block->bbEntryState == nullptr)
17304     {
17305         destState->esStackDepth    = 0;
17306         destState->thisInitialized = TIS_Bottom;
17307         return;
17308     }
17309
17310     destState->esStackDepth = block->bbEntryState->esStackDepth;
17311
17312     if (destState->esStackDepth > 0)
17313     {
17314         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17315
17316         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17317     }
17318
17319     destState->thisInitialized = block->bbThisOnEntry();
17320
17321     return;
17322 }
17323
17324 ThisInitState BasicBlock::bbThisOnEntry()
17325 {
17326     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17327 }
17328
17329 unsigned BasicBlock::bbStackDepthOnEntry()
17330 {
17331     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17332 }
17333
17334 void BasicBlock::bbSetStack(void* stackBuffer)
17335 {
17336     assert(bbEntryState);
17337     assert(stackBuffer);
17338     bbEntryState->esStack = (StackEntry*)stackBuffer;
17339 }
17340
17341 StackEntry* BasicBlock::bbStackOnEntry()
17342 {
17343     assert(bbEntryState);
17344     return bbEntryState->esStack;
17345 }
17346
17347 void Compiler::verInitCurrentState()
17348 {
17349     verTrackObjCtorInitState        = FALSE;
17350     verCurrentState.thisInitialized = TIS_Bottom;
17351
17352     if (tiVerificationNeeded)
17353     {
17354         // Track this ptr initialization
17355         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17356         {
17357             verTrackObjCtorInitState        = TRUE;
17358             verCurrentState.thisInitialized = TIS_Uninit;
17359         }
17360     }
17361
17362     // initialize stack info
17363
17364     verCurrentState.esStackDepth = 0;
17365     assert(verCurrentState.esStack != nullptr);
17366
17367     // copy current state to entry state of first BB
17368     verInitBBEntryState(fgFirstBB, &verCurrentState);
17369 }
17370
17371 Compiler* Compiler::impInlineRoot()
17372 {
17373     if (impInlineInfo == nullptr)
17374     {
17375         return this;
17376     }
17377     else
17378     {
17379         return impInlineInfo->InlineRoot;
17380     }
17381 }
17382
17383 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17384 {
17385     if (predOrSucc == SpillCliquePred)
17386     {
17387         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17388     }
17389     else
17390     {
17391         assert(predOrSucc == SpillCliqueSucc);
17392         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17393     }
17394 }
17395
17396 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17397 {
17398     if (predOrSucc == SpillCliquePred)
17399     {
17400         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17401     }
17402     else
17403     {
17404         assert(predOrSucc == SpillCliqueSucc);
17405         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17406     }
17407 }
17408
17409 /*****************************************************************************
17410  *
17411  *  Convert the instrs ("import") into our internal format (trees). The
17412  *  basic flowgraph has already been constructed and is passed in.
17413  */
17414
17415 void Compiler::impImport(BasicBlock* method)
17416 {
17417 #ifdef DEBUG
17418     if (verbose)
17419     {
17420         printf("*************** In impImport() for %s\n", info.compFullName);
17421     }
17422 #endif
17423
17424     /* Allocate the stack contents */
17425
17426     if (info.compMaxStack <= _countof(impSmallStack))
17427     {
17428         /* Use local variable, don't waste time allocating on the heap */
17429
17430         impStkSize              = _countof(impSmallStack);
17431         verCurrentState.esStack = impSmallStack;
17432     }
17433     else
17434     {
17435         impStkSize              = info.compMaxStack;
17436         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17437     }
17438
17439     // initialize the entry state at start of method
17440     verInitCurrentState();
17441
17442     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17443     Compiler* inlineRoot = impInlineRoot();
17444     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17445     {
17446         // We have initialized these previously, but to size 0.  Make them larger.
17447         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17448         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17449         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17450     }
17451     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17452     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17453     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17454     impBlockListNodeFreeList = nullptr;
17455
17456 #ifdef DEBUG
17457     impLastILoffsStmt   = nullptr;
17458     impNestedStackSpill = false;
17459 #endif
17460     impBoxTemp = BAD_VAR_NUM;
17461
17462     impPendingList = impPendingFree = nullptr;
17463
17464     /* Add the entry-point to the worker-list */
17465
17466     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17467     // from EH normalization.
17468     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17469     // out.
17470     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17471     {
17472         // Treat these as imported.
17473         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17474         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17475         method->bbFlags |= BBF_IMPORTED;
17476     }
17477
17478     impImportBlockPending(method);
17479
17480     /* Import blocks in the worker-list until there are no more */
17481
17482     while (impPendingList)
17483     {
17484         /* Remove the entry at the front of the list */
17485
17486         PendingDsc* dsc = impPendingList;
17487         impPendingList  = impPendingList->pdNext;
17488         impSetPendingBlockMember(dsc->pdBB, 0);
17489
17490         /* Restore the stack state */
17491
17492         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17493         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17494         if (verCurrentState.esStackDepth)
17495         {
17496             impRestoreStackState(&dsc->pdSavedStack);
17497         }
17498
17499         /* Add the entry to the free list for reuse */
17500
17501         dsc->pdNext    = impPendingFree;
17502         impPendingFree = dsc;
17503
17504         /* Now import the block */
17505
17506         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17507         {
17508
17509 #ifdef _TARGET_64BIT_
17510             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17511             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17512             // method for further explanation on why we raise this exception instead of making the jitted
17513             // code throw the verification exception during execution.
17514             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17515             {
17516                 BADCODE("Basic block marked as not verifiable");
17517             }
17518             else
17519 #endif // _TARGET_64BIT_
17520             {
17521                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17522                 impEndTreeList(dsc->pdBB);
17523             }
17524         }
17525         else
17526         {
17527             impImportBlock(dsc->pdBB);
17528
17529             if (compDonotInline())
17530             {
17531                 return;
17532             }
17533             if (compIsForImportOnly() && !tiVerificationNeeded)
17534             {
17535                 return;
17536             }
17537         }
17538     }
17539
17540 #ifdef DEBUG
17541     if (verbose && info.compXcptnsCount)
17542     {
17543         printf("\nAfter impImport() added block for try,catch,finally");
17544         fgDispBasicBlocks();
17545         printf("\n");
17546     }
17547
17548     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17549     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17550     {
17551         block->bbFlags &= ~BBF_VISITED;
17552     }
17553 #endif
17554
17555     assert(!compIsForInlining() || !tiVerificationNeeded);
17556 }
17557
17558 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17559 // The invariant here is that if it's not a ref or a method and has a class handle
17560 // it's a valuetype
17561 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17562 {
17563     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17564     {
17565         return true;
17566     }
17567     else
17568     {
17569         return false;
17570     }
17571 }
17572
17573 /*****************************************************************************
17574  *  Check to see if the tree is the address of a local or
17575     the address of a field in a local.
17576
17577     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17578
17579  */
17580
17581 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17582 {
17583     if (tree->gtOper != GT_ADDR)
17584     {
17585         return FALSE;
17586     }
17587
17588     GenTree* op = tree->gtOp.gtOp1;
17589     while (op->gtOper == GT_FIELD)
17590     {
17591         op = op->gtField.gtFldObj;
17592         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17593         {
17594             op = op->gtOp.gtOp1;
17595         }
17596         else
17597         {
17598             return false;
17599         }
17600     }
17601
17602     if (op->gtOper == GT_LCL_VAR)
17603     {
17604         *lclVarTreeOut = op;
17605         return TRUE;
17606     }
17607     else
17608     {
17609         return FALSE;
17610     }
17611 }
17612
17613 //------------------------------------------------------------------------
17614 // impMakeDiscretionaryInlineObservations: make observations that help
17615 // determine the profitability of a discretionary inline
17616 //
17617 // Arguments:
17618 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17619 //    inlineResult -- InlineResult accumulating information about this inline
17620 //
17621 // Notes:
17622 //    If inlining or prejitting the root, this method also makes
17623 //    various observations about the method that factor into inline
17624 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17625
17626 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17627 {
17628     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17629            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17630            );
17631
17632     // If we're really inlining, we should just have one result in play.
17633     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17634
17635     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17636     // to the trouble of estimating the native code size. Even if it did, it
17637     // shouldn't be relying on the result of this method.
17638     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17639
17640     // Note if the caller contains NEWOBJ or NEWARR.
17641     Compiler* rootCompiler = impInlineRoot();
17642
17643     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17644     {
17645         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17646     }
17647
17648     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17649     {
17650         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17651     }
17652
17653     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17654     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17655
17656     if (isSpecialMethod)
17657     {
17658         if (calleeIsStatic)
17659         {
17660             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17661         }
17662         else
17663         {
17664             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17665         }
17666     }
17667     else if (!calleeIsStatic)
17668     {
17669         // Callee is an instance method.
17670         //
17671         // Check if the callee has the same 'this' as the root.
17672         if (pInlineInfo != nullptr)
17673         {
17674             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17675             assert(thisArg);
17676             bool isSameThis = impIsThis(thisArg);
17677             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17678         }
17679     }
17680
17681     // Note if the callee's class is a promotable struct
17682     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17683     {
17684         lvaStructPromotionInfo structPromotionInfo;
17685         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17686         if (structPromotionInfo.canPromote)
17687         {
17688             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17689         }
17690     }
17691
17692 #ifdef FEATURE_SIMD
17693
17694     // Note if this method is has SIMD args or return value
17695     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17696     {
17697         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17698     }
17699
17700 #endif // FEATURE_SIMD
17701
17702     // Roughly classify callsite frequency.
17703     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17704
17705     // If this is a prejit root, or a maximally hot block...
17706     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17707     {
17708         frequency = InlineCallsiteFrequency::HOT;
17709     }
17710     // No training data.  Look for loop-like things.
17711     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17712     // However, give it to things nearby.
17713     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17714              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17715     {
17716         frequency = InlineCallsiteFrequency::LOOP;
17717     }
17718     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17719     {
17720         frequency = InlineCallsiteFrequency::WARM;
17721     }
17722     // Now modify the multiplier based on where we're called from.
17723     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17724     {
17725         frequency = InlineCallsiteFrequency::RARE;
17726     }
17727     else
17728     {
17729         frequency = InlineCallsiteFrequency::BORING;
17730     }
17731
17732     // Also capture the block weight of the call site.  In the prejit
17733     // root case, assume there's some hot call site for this method.
17734     unsigned weight = 0;
17735
17736     if (pInlineInfo != nullptr)
17737     {
17738         weight = pInlineInfo->iciBlock->bbWeight;
17739     }
17740     else
17741     {
17742         weight = BB_MAX_WEIGHT;
17743     }
17744
17745     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17746     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17747 }
17748
17749 /*****************************************************************************
17750  This method makes STATIC inlining decision based on the IL code.
17751  It should not make any inlining decision based on the context.
17752  If forceInline is true, then the inlining decision should not depend on
17753  performance heuristics (code size, etc.).
17754  */
17755
17756 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17757                               CORINFO_METHOD_INFO*  methInfo,
17758                               bool                  forceInline,
17759                               InlineResult*         inlineResult)
17760 {
17761     unsigned codeSize = methInfo->ILCodeSize;
17762
17763     // We shouldn't have made up our minds yet...
17764     assert(!inlineResult->IsDecided());
17765
17766     if (methInfo->EHcount)
17767     {
17768         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17769         return;
17770     }
17771
17772     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17773     {
17774         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17775         return;
17776     }
17777
17778     // For now we don't inline varargs (import code can't handle it)
17779
17780     if (methInfo->args.isVarArg())
17781     {
17782         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17783         return;
17784     }
17785
17786     // Reject if it has too many locals.
17787     // This is currently an implementation limit due to fixed-size arrays in the
17788     // inline info, rather than a performance heuristic.
17789
17790     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17791
17792     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17793     {
17794         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17795         return;
17796     }
17797
17798     // Make sure there aren't too many arguments.
17799     // This is currently an implementation limit due to fixed-size arrays in the
17800     // inline info, rather than a performance heuristic.
17801
17802     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17803
17804     if (methInfo->args.numArgs > MAX_INL_ARGS)
17805     {
17806         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17807         return;
17808     }
17809
17810     // Note force inline state
17811
17812     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17813
17814     // Note IL code size
17815
17816     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17817
17818     if (inlineResult->IsFailure())
17819     {
17820         return;
17821     }
17822
17823     // Make sure maxstack is not too big
17824
17825     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17826
17827     if (inlineResult->IsFailure())
17828     {
17829         return;
17830     }
17831 }
17832
17833 /*****************************************************************************
17834  */
17835
17836 void Compiler::impCheckCanInline(GenTree*               call,
17837                                  CORINFO_METHOD_HANDLE  fncHandle,
17838                                  unsigned               methAttr,
17839                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17840                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17841                                  InlineResult*          inlineResult)
17842 {
17843     // Either EE or JIT might throw exceptions below.
17844     // If that happens, just don't inline the method.
17845
17846     struct Param
17847     {
17848         Compiler*              pThis;
17849         GenTree*               call;
17850         CORINFO_METHOD_HANDLE  fncHandle;
17851         unsigned               methAttr;
17852         CORINFO_CONTEXT_HANDLE exactContextHnd;
17853         InlineResult*          result;
17854         InlineCandidateInfo**  ppInlineCandidateInfo;
17855     } param;
17856     memset(&param, 0, sizeof(param));
17857
17858     param.pThis                 = this;
17859     param.call                  = call;
17860     param.fncHandle             = fncHandle;
17861     param.methAttr              = methAttr;
17862     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17863     param.result                = inlineResult;
17864     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17865
17866     bool success = eeRunWithErrorTrap<Param>(
17867         [](Param* pParam) {
17868             DWORD                  dwRestrictions = 0;
17869             CorInfoInitClassResult initClassResult;
17870
17871 #ifdef DEBUG
17872             const char* methodName;
17873             const char* className;
17874             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17875
17876             if (JitConfig.JitNoInline())
17877             {
17878                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17879                 goto _exit;
17880             }
17881 #endif
17882
17883             /* Try to get the code address/size for the method */
17884
17885             CORINFO_METHOD_INFO methInfo;
17886             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17887             {
17888                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17889                 goto _exit;
17890             }
17891
17892             bool forceInline;
17893             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17894
17895             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17896
17897             if (pParam->result->IsFailure())
17898             {
17899                 assert(pParam->result->IsNever());
17900                 goto _exit;
17901             }
17902
17903             // Speculatively check if initClass() can be done.
17904             // If it can be done, we will try to inline the method. If inlining
17905             // succeeds, then we will do the non-speculative initClass() and commit it.
17906             // If this speculative call to initClass() fails, there is no point
17907             // trying to inline this method.
17908             initClassResult =
17909                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17910                                                            pParam->exactContextHnd /* context */,
17911                                                            TRUE /* speculative */);
17912
17913             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17914             {
17915                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17916                 goto _exit;
17917             }
17918
17919             // Given the EE the final say in whether to inline or not.
17920             // This should be last since for verifiable code, this can be expensive
17921
17922             /* VM Inline check also ensures that the method is verifiable if needed */
17923             CorInfoInline vmResult;
17924             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17925                                                                   &dwRestrictions);
17926
17927             if (vmResult == INLINE_FAIL)
17928             {
17929                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17930             }
17931             else if (vmResult == INLINE_NEVER)
17932             {
17933                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17934             }
17935
17936             if (pParam->result->IsFailure())
17937             {
17938                 // Make sure not to report this one.  It was already reported by the VM.
17939                 pParam->result->SetReported();
17940                 goto _exit;
17941             }
17942
17943             // check for unsupported inlining restrictions
17944             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17945
17946             if (dwRestrictions & INLINE_SAME_THIS)
17947             {
17948                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17949                 assert(thisArg);
17950
17951                 if (!pParam->pThis->impIsThis(thisArg))
17952                 {
17953                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17954                     goto _exit;
17955                 }
17956             }
17957
17958             /* Get the method properties */
17959
17960             CORINFO_CLASS_HANDLE clsHandle;
17961             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17962             unsigned clsAttr;
17963             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17964
17965             /* Get the return type */
17966
17967             var_types fncRetType;
17968             fncRetType = pParam->call->TypeGet();
17969
17970 #ifdef DEBUG
17971             var_types fncRealRetType;
17972             fncRealRetType = JITtype2varType(methInfo.args.retType);
17973
17974             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17975                    // <BUGNUM> VSW 288602 </BUGNUM>
17976                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17977                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17978                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17979 #endif
17980
17981             //
17982             // Allocate an InlineCandidateInfo structure
17983             //
17984             InlineCandidateInfo* pInfo;
17985             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17986
17987             pInfo->dwRestrictions  = dwRestrictions;
17988             pInfo->methInfo        = methInfo;
17989             pInfo->methAttr        = pParam->methAttr;
17990             pInfo->clsHandle       = clsHandle;
17991             pInfo->clsAttr         = clsAttr;
17992             pInfo->fncRetType      = fncRetType;
17993             pInfo->exactContextHnd = pParam->exactContextHnd;
17994             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17995             pInfo->initClassResult = initClassResult;
17996
17997             *(pParam->ppInlineCandidateInfo) = pInfo;
17998
17999         _exit:;
18000         },
18001         &param);
18002     if (!success)
18003     {
18004         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18005     }
18006 }
18007
18008 //------------------------------------------------------------------------
18009 // impInlineRecordArgInfo: record information about an inline candidate argument
18010 //
18011 // Arguments:
18012 //   pInlineInfo - inline info for the inline candidate
18013 //   curArgVal - tree for the caller actual argument value
18014 //   argNum - logical index of this argument
18015 //   inlineResult - result of ongoing inline evaluation
18016 //
18017 // Notes:
18018 //
18019 //   Checks for various inline blocking conditions and makes notes in
18020 //   the inline info arg table about the properties of the actual. These
18021 //   properties are used later by impFetchArg to determine how best to
18022 //   pass the argument into the inlinee.
18023
18024 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18025                                       GenTree*      curArgVal,
18026                                       unsigned      argNum,
18027                                       InlineResult* inlineResult)
18028 {
18029     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18030
18031     if (curArgVal->gtOper == GT_MKREFANY)
18032     {
18033         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18034         return;
18035     }
18036
18037     inlCurArgInfo->argNode = curArgVal;
18038
18039     GenTree* lclVarTree;
18040     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18041     {
18042         inlCurArgInfo->argIsByRefToStructLocal = true;
18043 #ifdef FEATURE_SIMD
18044         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18045         {
18046             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18047         }
18048 #endif // FEATURE_SIMD
18049     }
18050
18051     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18052     {
18053         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18054         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18055     }
18056
18057     if (curArgVal->gtOper == GT_LCL_VAR)
18058     {
18059         inlCurArgInfo->argIsLclVar = true;
18060
18061         /* Remember the "original" argument number */
18062         curArgVal->gtLclVar.gtLclILoffs = argNum;
18063     }
18064
18065     if ((curArgVal->OperKind() & GTK_CONST) ||
18066         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18067     {
18068         inlCurArgInfo->argIsInvariant = true;
18069         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18070         {
18071             // Abort inlining at this call site
18072             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18073             return;
18074         }
18075     }
18076
18077     // If the arg is a local that is address-taken, we can't safely
18078     // directly substitute it into the inlinee.
18079     //
18080     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18081     // that has a stronger meaning: that the arg value can change in
18082     // the method body. Using that flag prevents type propagation,
18083     // which is safe in this case.
18084     //
18085     // Instead mark the arg as having a caller local ref.
18086     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18087     {
18088         inlCurArgInfo->argHasCallerLocalRef = true;
18089     }
18090
18091 #ifdef DEBUG
18092     if (verbose)
18093     {
18094         if (inlCurArgInfo->argIsThis)
18095         {
18096             printf("thisArg:");
18097         }
18098         else
18099         {
18100             printf("\nArgument #%u:", argNum);
18101         }
18102         if (inlCurArgInfo->argIsLclVar)
18103         {
18104             printf(" is a local var");
18105         }
18106         if (inlCurArgInfo->argIsInvariant)
18107         {
18108             printf(" is a constant");
18109         }
18110         if (inlCurArgInfo->argHasGlobRef)
18111         {
18112             printf(" has global refs");
18113         }
18114         if (inlCurArgInfo->argHasCallerLocalRef)
18115         {
18116             printf(" has caller local ref");
18117         }
18118         if (inlCurArgInfo->argHasSideEff)
18119         {
18120             printf(" has side effects");
18121         }
18122         if (inlCurArgInfo->argHasLdargaOp)
18123         {
18124             printf(" has ldarga effect");
18125         }
18126         if (inlCurArgInfo->argHasStargOp)
18127         {
18128             printf(" has starg effect");
18129         }
18130         if (inlCurArgInfo->argIsByRefToStructLocal)
18131         {
18132             printf(" is byref to a struct local");
18133         }
18134
18135         printf("\n");
18136         gtDispTree(curArgVal);
18137         printf("\n");
18138     }
18139 #endif
18140 }
18141
18142 //------------------------------------------------------------------------
18143 // impInlineInitVars: setup inline information for inlinee args and locals
18144 //
18145 // Arguments:
18146 //    pInlineInfo - inline info for the inline candidate
18147 //
18148 // Notes:
18149 //    This method primarily adds caller-supplied info to the inlArgInfo
18150 //    and sets up the lclVarInfo table.
18151 //
18152 //    For args, the inlArgInfo records properties of the actual argument
18153 //    including the tree node that produces the arg value. This node is
18154 //    usually the tree node present at the call, but may also differ in
18155 //    various ways:
18156 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18157 //      expr chain for the actual node. Note this will either be the original
18158 //      call (which will be a failed inline by this point), or the return
18159 //      expression from some set of inlines.
18160 //    - when argument type casting is needed the necessary casts are added
18161 //      around the argument node.
18162 //    - if an argment can be simplified by folding then the node here is the
18163 //      folded value.
18164 //
18165 //   The method may make observations that lead to marking this candidate as
18166 //   a failed inline. If this happens the initialization is abandoned immediately
18167 //   to try and reduce the jit time cost for a failed inline.
18168
18169 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18170 {
18171     assert(!compIsForInlining());
18172
18173     GenTree*             call         = pInlineInfo->iciCall;
18174     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18175     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18176     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18177     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18178     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18179
18180     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18181
18182     /* init the argument stuct */
18183
18184     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18185
18186     /* Get hold of the 'this' pointer and the argument list proper */
18187
18188     GenTree* thisArg = call->gtCall.gtCallObjp;
18189     GenTree* argList = call->gtCall.gtCallArgs;
18190     unsigned argCnt  = 0; // Count of the arguments
18191
18192     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18193
18194     if (thisArg)
18195     {
18196         inlArgInfo[0].argIsThis = true;
18197         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18198         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18199
18200         if (inlineResult->IsFailure())
18201         {
18202             return;
18203         }
18204
18205         /* Increment the argument count */
18206         argCnt++;
18207     }
18208
18209     /* Record some information about each of the arguments */
18210     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18211
18212 #if USER_ARGS_COME_LAST
18213     unsigned typeCtxtArg = thisArg ? 1 : 0;
18214 #else  // USER_ARGS_COME_LAST
18215     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18216 #endif // USER_ARGS_COME_LAST
18217
18218     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18219     {
18220         if (argTmp == argList && hasRetBuffArg)
18221         {
18222             continue;
18223         }
18224
18225         // Ignore the type context argument
18226         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18227         {
18228             pInlineInfo->typeContextArg = typeCtxtArg;
18229             typeCtxtArg                 = 0xFFFFFFFF;
18230             continue;
18231         }
18232
18233         assert(argTmp->gtOper == GT_LIST);
18234         GenTree* arg       = argTmp->gtOp.gtOp1;
18235         GenTree* actualArg = arg->gtRetExprVal();
18236         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18237
18238         if (inlineResult->IsFailure())
18239         {
18240             return;
18241         }
18242
18243         /* Increment the argument count */
18244         argCnt++;
18245     }
18246
18247     /* Make sure we got the arg number right */
18248     assert(argCnt == methInfo->args.totalILArgs());
18249
18250 #ifdef FEATURE_SIMD
18251     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18252 #endif // FEATURE_SIMD
18253
18254     /* We have typeless opcodes, get type information from the signature */
18255
18256     if (thisArg)
18257     {
18258         var_types sigType;
18259
18260         if (clsAttr & CORINFO_FLG_VALUECLASS)
18261         {
18262             sigType = TYP_BYREF;
18263         }
18264         else
18265         {
18266             sigType = TYP_REF;
18267         }
18268
18269         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18270         lclVarInfo[0].lclHasLdlocaOp = false;
18271
18272 #ifdef FEATURE_SIMD
18273         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18274         // the inlining multiplier) for anything in that assembly.
18275         // But we only need to normalize it if it is a TYP_STRUCT
18276         // (which we need to do even if we have already set foundSIMDType).
18277         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18278         {
18279             if (sigType == TYP_STRUCT)
18280             {
18281                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18282             }
18283             foundSIMDType = true;
18284         }
18285 #endif // FEATURE_SIMD
18286         lclVarInfo[0].lclTypeInfo = sigType;
18287
18288         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18289                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18290                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18291
18292         if (genActualType(thisArg->gtType) != genActualType(sigType))
18293         {
18294             if (sigType == TYP_REF)
18295             {
18296                 /* The argument cannot be bashed into a ref (see bug 750871) */
18297                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18298                 return;
18299             }
18300
18301             /* This can only happen with byrefs <-> ints/shorts */
18302
18303             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18304             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18305
18306             if (sigType == TYP_BYREF)
18307             {
18308                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18309             }
18310             else if (thisArg->gtType == TYP_BYREF)
18311             {
18312                 assert(sigType == TYP_I_IMPL);
18313
18314                 /* If possible change the BYREF to an int */
18315                 if (thisArg->IsVarAddr())
18316                 {
18317                     thisArg->gtType              = TYP_I_IMPL;
18318                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18319                 }
18320                 else
18321                 {
18322                     /* Arguments 'int <- byref' cannot be bashed */
18323                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18324                     return;
18325                 }
18326             }
18327         }
18328     }
18329
18330     /* Init the types of the arguments and make sure the types
18331      * from the trees match the types in the signature */
18332
18333     CORINFO_ARG_LIST_HANDLE argLst;
18334     argLst = methInfo->args.args;
18335
18336     unsigned i;
18337     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18338     {
18339         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18340
18341         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18342
18343 #ifdef FEATURE_SIMD
18344         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18345         {
18346             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18347             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18348             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18349             foundSIMDType = true;
18350             if (sigType == TYP_STRUCT)
18351             {
18352                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18353                 sigType              = structType;
18354             }
18355         }
18356 #endif // FEATURE_SIMD
18357
18358         lclVarInfo[i].lclTypeInfo    = sigType;
18359         lclVarInfo[i].lclHasLdlocaOp = false;
18360
18361         /* Does the tree type match the signature type? */
18362
18363         GenTree* inlArgNode = inlArgInfo[i].argNode;
18364
18365         if (sigType != inlArgNode->gtType)
18366         {
18367             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18368                but in bad IL cases with caller-callee signature mismatches we can see other types.
18369                Intentionally reject cases with mismatches so the jit is more flexible when
18370                encountering bad IL. */
18371
18372             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18373                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18374                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18375
18376             if (!isPlausibleTypeMatch)
18377             {
18378                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18379                 return;
18380             }
18381
18382             /* Is it a narrowing or widening cast?
18383              * Widening casts are ok since the value computed is already
18384              * normalized to an int (on the IL stack) */
18385
18386             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18387             {
18388                 if (sigType == TYP_BYREF)
18389                 {
18390                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18391                 }
18392                 else if (inlArgNode->gtType == TYP_BYREF)
18393                 {
18394                     assert(varTypeIsIntOrI(sigType));
18395
18396                     /* If possible bash the BYREF to an int */
18397                     if (inlArgNode->IsVarAddr())
18398                     {
18399                         inlArgNode->gtType           = TYP_I_IMPL;
18400                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18401                     }
18402                     else
18403                     {
18404                         /* Arguments 'int <- byref' cannot be changed */
18405                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18406                         return;
18407                     }
18408                 }
18409                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18410                 {
18411                     /* Narrowing cast */
18412
18413                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18414                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18415                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18416                     {
18417                         /* We don't need to insert a cast here as the variable
18418                            was assigned a normalized value of the right type */
18419
18420                         continue;
18421                     }
18422
18423                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
18424
18425                     inlArgInfo[i].argIsLclVar = false;
18426
18427                     /* Try to fold the node in case we have constant arguments */
18428
18429                     if (inlArgInfo[i].argIsInvariant)
18430                     {
18431                         inlArgNode            = gtFoldExprConst(inlArgNode);
18432                         inlArgInfo[i].argNode = inlArgNode;
18433                         assert(inlArgNode->OperIsConst());
18434                     }
18435                 }
18436 #ifdef _TARGET_64BIT_
18437                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18438                 {
18439                     // This should only happen for int -> native int widening
18440                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
18441
18442                     inlArgInfo[i].argIsLclVar = false;
18443
18444                     /* Try to fold the node in case we have constant arguments */
18445
18446                     if (inlArgInfo[i].argIsInvariant)
18447                     {
18448                         inlArgNode            = gtFoldExprConst(inlArgNode);
18449                         inlArgInfo[i].argNode = inlArgNode;
18450                         assert(inlArgNode->OperIsConst());
18451                     }
18452                 }
18453 #endif // _TARGET_64BIT_
18454             }
18455         }
18456     }
18457
18458     /* Init the types of the local variables */
18459
18460     CORINFO_ARG_LIST_HANDLE localsSig;
18461     localsSig = methInfo->locals.args;
18462
18463     for (i = 0; i < methInfo->locals.numArgs; i++)
18464     {
18465         bool      isPinned;
18466         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18467
18468         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18469         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18470         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18471
18472         if (varTypeIsGC(type))
18473         {
18474             pInlineInfo->numberOfGcRefLocals++;
18475         }
18476
18477         if (isPinned)
18478         {
18479             // Pinned locals may cause inlines to fail.
18480             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18481             if (inlineResult->IsFailure())
18482             {
18483                 return;
18484             }
18485         }
18486
18487         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18488
18489         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18490         // out on the inline.
18491         if (type == TYP_STRUCT)
18492         {
18493             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18494             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18495             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18496             {
18497                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18498                 if (inlineResult->IsFailure())
18499                 {
18500                     return;
18501                 }
18502
18503                 // Do further notification in the case where the call site is rare; some policies do
18504                 // not track the relative hotness of call sites for "always" inline cases.
18505                 if (pInlineInfo->iciBlock->isRunRarely())
18506                 {
18507                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18508                     if (inlineResult->IsFailure())
18509                     {
18510
18511                         return;
18512                     }
18513                 }
18514             }
18515         }
18516
18517         localsSig = info.compCompHnd->getArgNext(localsSig);
18518
18519 #ifdef FEATURE_SIMD
18520         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18521         {
18522             foundSIMDType = true;
18523             if (featureSIMD && type == TYP_STRUCT)
18524             {
18525                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18526                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18527             }
18528         }
18529 #endif // FEATURE_SIMD
18530     }
18531
18532 #ifdef FEATURE_SIMD
18533     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
18534     {
18535         foundSIMDType = true;
18536     }
18537     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18538 #endif // FEATURE_SIMD
18539 }
18540
18541 //------------------------------------------------------------------------
18542 // impInlineFetchLocal: get a local var that represents an inlinee local
18543 //
18544 // Arguments:
18545 //    lclNum -- number of the inlinee local
18546 //    reason -- debug string describing purpose of the local var
18547 //
18548 // Returns:
18549 //    Number of the local to use
18550 //
18551 // Notes:
18552 //    This method is invoked only for locals actually used in the
18553 //    inlinee body.
18554 //
18555 //    Allocates a new temp if necessary, and copies key properties
18556 //    over from the inlinee local var info.
18557
18558 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18559 {
18560     assert(compIsForInlining());
18561
18562     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18563
18564     if (tmpNum == BAD_VAR_NUM)
18565     {
18566         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18567         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18568
18569         // The lifetime of this local might span multiple BBs.
18570         // So it is a long lifetime local.
18571         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18572
18573         // Copy over key info
18574         lvaTable[tmpNum].lvType                 = lclTyp;
18575         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18576         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18577         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18578         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18579
18580         // Copy over class handle for ref types. Note this may be a
18581         // shared type -- someday perhaps we can get the exact
18582         // signature and pass in a more precise type.
18583         if (lclTyp == TYP_REF)
18584         {
18585             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18586         }
18587
18588         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18589         {
18590             if (varTypeIsStruct(lclTyp))
18591             {
18592                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18593             }
18594             else
18595             {
18596                 // This is a wrapped primitive.  Make sure the verstate knows that
18597                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18598             }
18599         }
18600
18601 #ifdef DEBUG
18602         // Sanity check that we're properly prepared for gc ref locals.
18603         if (varTypeIsGC(lclTyp))
18604         {
18605             // Since there are gc locals we should have seen them earlier
18606             // and if there was a return value, set up the spill temp.
18607             assert(impInlineInfo->HasGcRefLocals());
18608             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18609         }
18610         else
18611         {
18612             // Make sure all pinned locals count as gc refs.
18613             assert(!inlineeLocal.lclIsPinned);
18614         }
18615 #endif // DEBUG
18616     }
18617
18618     return tmpNum;
18619 }
18620
18621 //------------------------------------------------------------------------
18622 // impInlineFetchArg: return tree node for argument value in an inlinee
18623 //
18624 // Arguments:
18625 //    lclNum -- argument number in inlinee IL
18626 //    inlArgInfo -- argument info for inlinee
18627 //    lclVarInfo -- var info for inlinee
18628 //
18629 // Returns:
18630 //    Tree for the argument's value. Often an inlinee-scoped temp
18631 //    GT_LCL_VAR but can be other tree kinds, if the argument
18632 //    expression from the caller can be directly substituted into the
18633 //    inlinee body.
18634 //
18635 // Notes:
18636 //    Must be used only for arguments -- use impInlineFetchLocal for
18637 //    inlinee locals.
18638 //
18639 //    Direct substitution is performed when the formal argument cannot
18640 //    change value in the inlinee body (no starg or ldarga), and the
18641 //    actual argument expression's value cannot be changed if it is
18642 //    substituted it into the inlinee body.
18643 //
18644 //    Even if an inlinee-scoped temp is returned here, it may later be
18645 //    "bashed" to a caller-supplied tree when arguments are actually
18646 //    passed (see fgInlinePrependStatements). Bashing can happen if
18647 //    the argument ends up being single use and other conditions are
18648 //    met. So the contents of the tree returned here may not end up
18649 //    being the ones ultimately used for the argument.
18650 //
18651 //    This method will side effect inlArgInfo. It should only be called
18652 //    for actual uses of the argument in the inlinee.
18653
18654 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18655 {
18656     // Cache the relevant arg and lcl info for this argument.
18657     // We will modify argInfo but not lclVarInfo.
18658     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18659     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18660     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18661     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18662     GenTree*             op1              = nullptr;
18663
18664     if (argInfo.argIsInvariant && !argCanBeModified)
18665     {
18666         // Directly substitute constants or addresses of locals
18667         //
18668         // Clone the constant. Note that we cannot directly use
18669         // argNode in the trees even if !argInfo.argIsUsed as this
18670         // would introduce aliasing between inlArgInfo[].argNode and
18671         // impInlineExpr. Then gtFoldExpr() could change it, causing
18672         // further references to the argument working off of the
18673         // bashed copy.
18674         op1 = gtCloneExpr(argInfo.argNode);
18675         PREFIX_ASSUME(op1 != nullptr);
18676         argInfo.argTmpNum = BAD_VAR_NUM;
18677
18678         // We may need to retype to ensure we match the callee's view of the type.
18679         // Otherwise callee-pass throughs of arguments can create return type
18680         // mismatches that block inlining.
18681         //
18682         // Note argument type mismatches that prevent inlining should
18683         // have been caught in impInlineInitVars.
18684         if (op1->TypeGet() != lclTyp)
18685         {
18686             op1->gtType = genActualType(lclTyp);
18687         }
18688     }
18689     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18690     {
18691         // Directly substitute unaliased caller locals for args that cannot be modified
18692         //
18693         // Use the caller-supplied node if this is the first use.
18694         op1               = argInfo.argNode;
18695         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18696
18697         // Use an equivalent copy if this is the second or subsequent
18698         // use, or if we need to retype.
18699         //
18700         // Note argument type mismatches that prevent inlining should
18701         // have been caught in impInlineInitVars.
18702         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18703         {
18704             assert(op1->gtOper == GT_LCL_VAR);
18705             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18706
18707             var_types newTyp = lclTyp;
18708
18709             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18710             {
18711                 newTyp = genActualType(lclTyp);
18712             }
18713
18714             // Create a new lcl var node - remember the argument lclNum
18715             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18716         }
18717     }
18718     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18719     {
18720         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18721            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18722            This way we will increase the chance for this byref to be optimized away by
18723            a subsequent "dereference" operation.
18724
18725            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18726            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18727            For example, if the caller is:
18728                 ldloca.s   V_1  // V_1 is a local struct
18729                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18730            and the callee being inlined has:
18731                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18732                     ldarga.s   ptrToInts
18733                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18734            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18735            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18736         */
18737         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18738         op1 = gtCloneExpr(argInfo.argNode);
18739     }
18740     else
18741     {
18742         /* Argument is a complex expression - it must be evaluated into a temp */
18743
18744         if (argInfo.argHasTmp)
18745         {
18746             assert(argInfo.argIsUsed);
18747             assert(argInfo.argTmpNum < lvaCount);
18748
18749             /* Create a new lcl var node - remember the argument lclNum */
18750             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18751
18752             /* This is the second or later use of the this argument,
18753             so we have to use the temp (instead of the actual arg) */
18754             argInfo.argBashTmpNode = nullptr;
18755         }
18756         else
18757         {
18758             /* First time use */
18759             assert(!argInfo.argIsUsed);
18760
18761             /* Reserve a temp for the expression.
18762             * Use a large size node as we may change it later */
18763
18764             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18765
18766             lvaTable[tmpNum].lvType = lclTyp;
18767
18768             // For ref types, determine the type of the temp.
18769             if (lclTyp == TYP_REF)
18770             {
18771                 if (!argCanBeModified)
18772                 {
18773                     // If the arg can't be modified in the method
18774                     // body, use the type of the value, if
18775                     // known. Otherwise, use the declared type.
18776                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18777                 }
18778                 else
18779                 {
18780                     // Arg might be modified, use the declared type of
18781                     // the argument.
18782                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18783                 }
18784             }
18785
18786             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18787             if (argInfo.argHasLdargaOp)
18788             {
18789                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18790             }
18791
18792             if (lclInfo.lclVerTypeInfo.IsStruct())
18793             {
18794                 if (varTypeIsStruct(lclTyp))
18795                 {
18796                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18797                 }
18798                 else
18799                 {
18800                     // This is a wrapped primitive.  Make sure the verstate knows that
18801                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18802                 }
18803             }
18804
18805             argInfo.argHasTmp = true;
18806             argInfo.argTmpNum = tmpNum;
18807
18808             // If we require strict exception order, then arguments must
18809             // be evaluated in sequence before the body of the inlined method.
18810             // So we need to evaluate them to a temp.
18811             // Also, if arguments have global or local references, we need to
18812             // evaluate them to a temp before the inlined body as the
18813             // inlined body may be modifying the global ref.
18814             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18815             // if it is a struct, because it requires some additional handling.
18816
18817             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18818                 !argInfo.argHasCallerLocalRef)
18819             {
18820                 /* Get a *LARGE* LCL_VAR node */
18821                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18822
18823                 /* Record op1 as the very first use of this argument.
18824                 If there are no further uses of the arg, we may be
18825                 able to use the actual arg node instead of the temp.
18826                 If we do see any further uses, we will clear this. */
18827                 argInfo.argBashTmpNode = op1;
18828             }
18829             else
18830             {
18831                 /* Get a small LCL_VAR node */
18832                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18833                 /* No bashing of this argument */
18834                 argInfo.argBashTmpNode = nullptr;
18835             }
18836         }
18837     }
18838
18839     // Mark this argument as used.
18840     argInfo.argIsUsed = true;
18841
18842     return op1;
18843 }
18844
18845 /******************************************************************************
18846  Is this the original "this" argument to the call being inlined?
18847
18848  Note that we do not inline methods with "starg 0", and so we do not need to
18849  worry about it.
18850 */
18851
18852 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18853 {
18854     assert(compIsForInlining());
18855     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18856 }
18857
18858 //-----------------------------------------------------------------------------
18859 // This function checks if a dereference in the inlinee can guarantee that
18860 // the "this" is non-NULL.
18861 // If we haven't hit a branch or a side effect, and we are dereferencing
18862 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18863 // then we can avoid a separate null pointer check.
18864 //
18865 // "additionalTreesToBeEvaluatedBefore"
18866 // is the set of pending trees that have not yet been added to the statement list,
18867 // and which have been removed from verCurrentState.esStack[]
18868
18869 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
18870                                                                   GenTree*    variableBeingDereferenced,
18871                                                                   InlArgInfo* inlArgInfo)
18872 {
18873     assert(compIsForInlining());
18874     assert(opts.OptEnabled(CLFLG_INLINING));
18875
18876     BasicBlock* block = compCurBB;
18877
18878     GenTree* stmt;
18879     GenTree* expr;
18880
18881     if (block != fgFirstBB)
18882     {
18883         return FALSE;
18884     }
18885
18886     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18887     {
18888         return FALSE;
18889     }
18890
18891     if (additionalTreesToBeEvaluatedBefore &&
18892         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18893     {
18894         return FALSE;
18895     }
18896
18897     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18898     {
18899         expr = stmt->gtStmt.gtStmtExpr;
18900
18901         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18902         {
18903             return FALSE;
18904         }
18905     }
18906
18907     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18908     {
18909         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18910         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18911         {
18912             return FALSE;
18913         }
18914     }
18915
18916     return TRUE;
18917 }
18918
18919 //------------------------------------------------------------------------
18920 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18921 //
18922 // Arguments:
18923 //    callNode -- call under scrutiny
18924 //    exactContextHnd -- context handle for inlining
18925 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18926 //    callInfo -- call info from VM
18927 //
18928 // Notes:
18929 //    If callNode is an inline candidate, this method sets the flag
18930 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18931 //    filled in the associated InlineCandidateInfo.
18932 //
18933 //    If callNode is not an inline candidate, and the reason is
18934 //    something that is inherent to the method being called, the
18935 //    method may be marked as "noinline" to short-circuit any
18936 //    future assessments of calls to this method.
18937
18938 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
18939                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18940                                       bool                   exactContextNeedsRuntimeLookup,
18941                                       CORINFO_CALL_INFO*     callInfo)
18942 {
18943     // Let the strategy know there's another call
18944     impInlineRoot()->m_inlineStrategy->NoteCall();
18945
18946     if (!opts.OptEnabled(CLFLG_INLINING))
18947     {
18948         /* XXX Mon 8/18/2008
18949          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18950          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18951          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18952          * figure out why we did not set MAXOPT for this compile.
18953          */
18954         assert(!compIsForInlining());
18955         return;
18956     }
18957
18958     if (compIsForImportOnly())
18959     {
18960         // Don't bother creating the inline candidate during verification.
18961         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18962         // that leads to the creation of multiple instances of Compiler.
18963         return;
18964     }
18965
18966     GenTreeCall* call = callNode->AsCall();
18967     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18968
18969     // Don't inline if not optimizing root method
18970     if (opts.compDbgCode)
18971     {
18972         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18973         return;
18974     }
18975
18976     // Don't inline if inlining into root method is disabled.
18977     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18978     {
18979         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18980         return;
18981     }
18982
18983     // Inlining candidate determination needs to honor only IL tail prefix.
18984     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18985     if (call->IsTailPrefixedCall())
18986     {
18987         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18988         return;
18989     }
18990
18991     // Tail recursion elimination takes precedence over inlining.
18992     // TODO: We may want to do some of the additional checks from fgMorphCall
18993     // here to reduce the chance we don't inline a call that won't be optimized
18994     // as a fast tail call or turned into a loop.
18995     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18996     {
18997         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18998         return;
18999     }
19000
19001     if (call->IsVirtual())
19002     {
19003         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19004         return;
19005     }
19006
19007     /* Ignore helper calls */
19008
19009     if (call->gtCallType == CT_HELPER)
19010     {
19011         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19012         return;
19013     }
19014
19015     /* Ignore indirect calls */
19016     if (call->gtCallType == CT_INDIRECT)
19017     {
19018         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19019         return;
19020     }
19021
19022     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19023      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19024      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19025
19026     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19027     unsigned              methAttr;
19028
19029     // Reuse method flags from the original callInfo if possible
19030     if (fncHandle == callInfo->hMethod)
19031     {
19032         methAttr = callInfo->methodFlags;
19033     }
19034     else
19035     {
19036         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19037     }
19038
19039 #ifdef DEBUG
19040     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19041     {
19042         methAttr |= CORINFO_FLG_FORCEINLINE;
19043     }
19044 #endif
19045
19046     // Check for COMPlus_AggressiveInlining
19047     if (compDoAggressiveInlining)
19048     {
19049         methAttr |= CORINFO_FLG_FORCEINLINE;
19050     }
19051
19052     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19053     {
19054         /* Don't bother inline blocks that are in the filter region */
19055         if (bbInCatchHandlerILRange(compCurBB))
19056         {
19057 #ifdef DEBUG
19058             if (verbose)
19059             {
19060                 printf("\nWill not inline blocks that are in the catch handler region\n");
19061             }
19062
19063 #endif
19064
19065             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19066             return;
19067         }
19068
19069         if (bbInFilterILRange(compCurBB))
19070         {
19071 #ifdef DEBUG
19072             if (verbose)
19073             {
19074                 printf("\nWill not inline blocks that are in the filter region\n");
19075             }
19076 #endif
19077
19078             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19079             return;
19080         }
19081     }
19082
19083     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19084
19085     if (opts.compNeedSecurityCheck)
19086     {
19087         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19088         return;
19089     }
19090
19091     /* Check if we tried to inline this method before */
19092
19093     if (methAttr & CORINFO_FLG_DONT_INLINE)
19094     {
19095         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19096         return;
19097     }
19098
19099     /* Cannot inline synchronized methods */
19100
19101     if (methAttr & CORINFO_FLG_SYNCH)
19102     {
19103         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19104         return;
19105     }
19106
19107     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19108
19109     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19110     {
19111         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19112         return;
19113     }
19114
19115     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19116     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19117
19118     if (inlineResult.IsFailure())
19119     {
19120         return;
19121     }
19122
19123     // The old value should be NULL
19124     assert(call->gtInlineCandidateInfo == nullptr);
19125
19126     // The new value should not be NULL.
19127     assert(inlineCandidateInfo != nullptr);
19128     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19129
19130     call->gtInlineCandidateInfo = inlineCandidateInfo;
19131
19132     // Mark the call node as inline candidate.
19133     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19134
19135     // Let the strategy know there's another candidate.
19136     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19137
19138     // Since we're not actually inlining yet, and this call site is
19139     // still just an inline candidate, there's nothing to report.
19140     inlineResult.SetReported();
19141 }
19142
19143 /******************************************************************************/
19144 // Returns true if the given intrinsic will be implemented by target-specific
19145 // instructions
19146
19147 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19148 {
19149 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19150     switch (intrinsicId)
19151     {
19152         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19153         // instructions to directly compute round/ceiling/floor.
19154         //
19155         // TODO: Because the x86 backend only targets SSE for floating-point code,
19156         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19157         //       implemented those intrinsics as x87 instructions). If this poses
19158         //       a CQ problem, it may be necessary to change the implementation of
19159         //       the helper calls to decrease call overhead or switch back to the
19160         //       x87 instructions. This is tracked by #7097.
19161         case CORINFO_INTRINSIC_Sqrt:
19162         case CORINFO_INTRINSIC_Abs:
19163             return true;
19164
19165         case CORINFO_INTRINSIC_Round:
19166         case CORINFO_INTRINSIC_Ceiling:
19167         case CORINFO_INTRINSIC_Floor:
19168             return compSupports(InstructionSet_SSE41);
19169
19170         default:
19171             return false;
19172     }
19173 #elif defined(_TARGET_ARM64_)
19174     switch (intrinsicId)
19175     {
19176         case CORINFO_INTRINSIC_Sqrt:
19177         case CORINFO_INTRINSIC_Abs:
19178         case CORINFO_INTRINSIC_Round:
19179         case CORINFO_INTRINSIC_Floor:
19180         case CORINFO_INTRINSIC_Ceiling:
19181             return true;
19182
19183         default:
19184             return false;
19185     }
19186 #elif defined(_TARGET_ARM_)
19187     switch (intrinsicId)
19188     {
19189         case CORINFO_INTRINSIC_Sqrt:
19190         case CORINFO_INTRINSIC_Abs:
19191         case CORINFO_INTRINSIC_Round:
19192             return true;
19193
19194         default:
19195             return false;
19196     }
19197 #elif defined(_TARGET_X86_)
19198     switch (intrinsicId)
19199     {
19200         case CORINFO_INTRINSIC_Sin:
19201         case CORINFO_INTRINSIC_Cos:
19202         case CORINFO_INTRINSIC_Sqrt:
19203         case CORINFO_INTRINSIC_Abs:
19204         case CORINFO_INTRINSIC_Round:
19205             return true;
19206
19207         default:
19208             return false;
19209     }
19210 #else
19211     // TODO: This portion of logic is not implemented for other arch.
19212     // The reason for returning true is that on all other arch the only intrinsic
19213     // enabled are target intrinsics.
19214     return true;
19215 #endif //_TARGET_AMD64_
19216 }
19217
19218 /******************************************************************************/
19219 // Returns true if the given intrinsic will be implemented by calling System.Math
19220 // methods.
19221
19222 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19223 {
19224     // Currently, if an math intrisic is not implemented by target-specific
19225     // intructions, it will be implemented by a System.Math call. In the
19226     // future, if we turn to implementing some of them with helper callers,
19227     // this predicate needs to be revisited.
19228     return !IsTargetIntrinsic(intrinsicId);
19229 }
19230
19231 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19232 {
19233     switch (intrinsicId)
19234     {
19235         case CORINFO_INTRINSIC_Sin:
19236         case CORINFO_INTRINSIC_Cbrt:
19237         case CORINFO_INTRINSIC_Sqrt:
19238         case CORINFO_INTRINSIC_Abs:
19239         case CORINFO_INTRINSIC_Cos:
19240         case CORINFO_INTRINSIC_Round:
19241         case CORINFO_INTRINSIC_Cosh:
19242         case CORINFO_INTRINSIC_Sinh:
19243         case CORINFO_INTRINSIC_Tan:
19244         case CORINFO_INTRINSIC_Tanh:
19245         case CORINFO_INTRINSIC_Asin:
19246         case CORINFO_INTRINSIC_Asinh:
19247         case CORINFO_INTRINSIC_Acos:
19248         case CORINFO_INTRINSIC_Acosh:
19249         case CORINFO_INTRINSIC_Atan:
19250         case CORINFO_INTRINSIC_Atan2:
19251         case CORINFO_INTRINSIC_Atanh:
19252         case CORINFO_INTRINSIC_Log10:
19253         case CORINFO_INTRINSIC_Pow:
19254         case CORINFO_INTRINSIC_Exp:
19255         case CORINFO_INTRINSIC_Ceiling:
19256         case CORINFO_INTRINSIC_Floor:
19257             return true;
19258         default:
19259             return false;
19260     }
19261 }
19262
19263 bool Compiler::IsMathIntrinsic(GenTree* tree)
19264 {
19265     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19266 }
19267
19268 //------------------------------------------------------------------------
19269 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19270 //   normal call
19271 //
19272 // Arguments:
19273 //     call -- the call node to examine/modify
19274 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19275 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19276 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19277 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19278 //
19279 // Notes:
19280 //     Virtual calls in IL will always "invoke" the base class method.
19281 //
19282 //     This transformation looks for evidence that the type of 'this'
19283 //     in the call is exactly known, is a final class or would invoke
19284 //     a final method, and if that and other safety checks pan out,
19285 //     modifies the call and the call info to create a direct call.
19286 //
19287 //     This transformation is initially done in the importer and not
19288 //     in some subsequent optimization pass because we want it to be
19289 //     upstream of inline candidate identification.
19290 //
19291 //     However, later phases may supply improved type information that
19292 //     can enable further devirtualization. We currently reinvoke this
19293 //     code after inlining, if the return value of the inlined call is
19294 //     the 'this obj' of a subsequent virtual call.
19295 //
19296 //     If devirtualization succeeds and the call's this object is the
19297 //     result of a box, the jit will ask the EE for the unboxed entry
19298 //     point. If this exists, the jit will see if it can rework the box
19299 //     to instead make a local copy. If that is doable, the call is
19300 //     updated to invoke the unboxed entry on the local copy.
19301 //
19302 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19303                                    CORINFO_METHOD_HANDLE*  method,
19304                                    unsigned*               methodFlags,
19305                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19306                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19307 {
19308     assert(call != nullptr);
19309     assert(method != nullptr);
19310     assert(methodFlags != nullptr);
19311     assert(contextHandle != nullptr);
19312
19313     // This should be a virtual vtable or virtual stub call.
19314     assert(call->IsVirtual());
19315
19316     // Bail if not optimizing
19317     if (opts.MinOpts())
19318     {
19319         return;
19320     }
19321
19322     // Bail if debuggable codegen
19323     if (opts.compDbgCode)
19324     {
19325         return;
19326     }
19327
19328 #if defined(DEBUG)
19329     // Bail if devirt is disabled.
19330     if (JitConfig.JitEnableDevirtualization() == 0)
19331     {
19332         return;
19333     }
19334
19335     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19336 #endif // DEBUG
19337
19338     // Fetch information about the virtual method we're calling.
19339     CORINFO_METHOD_HANDLE baseMethod        = *method;
19340     unsigned              baseMethodAttribs = *methodFlags;
19341
19342     if (baseMethodAttribs == 0)
19343     {
19344         // For late devirt we may not have method attributes, so fetch them.
19345         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19346     }
19347     else
19348     {
19349 #if defined(DEBUG)
19350         // Validate that callInfo has up to date method flags
19351         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19352
19353         // All the base method attributes should agree, save that
19354         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19355         // because of concurrent jitting activity.
19356         //
19357         // Note we don't look at this particular flag bit below, and
19358         // later on (if we do try and inline) we will rediscover why
19359         // the method can't be inlined, so there's no danger here in
19360         // seeing this particular flag bit in different states between
19361         // the cached and fresh values.
19362         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19363         {
19364             assert(!"mismatched method attributes");
19365         }
19366 #endif // DEBUG
19367     }
19368
19369     // In R2R mode, we might see virtual stub calls to
19370     // non-virtuals. For instance cases where the non-virtual method
19371     // is in a different assembly but is called via CALLVIRT. For
19372     // verison resilience we must allow for the fact that the method
19373     // might become virtual in some update.
19374     //
19375     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19376     // regular call+nullcheck upstream, so we won't reach this
19377     // point.
19378     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19379     {
19380         assert(call->IsVirtualStub());
19381         assert(opts.IsReadyToRun());
19382         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19383         return;
19384     }
19385
19386     // See what we know about the type of 'this' in the call.
19387     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19388     GenTree*             actualThisObj = nullptr;
19389     bool                 isExact       = false;
19390     bool                 objIsNonNull  = false;
19391     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19392
19393     // See if we have special knowlege that can get us a type or a better type.
19394     if ((objClass == nullptr) || !isExact)
19395     {
19396         actualThisObj = thisObj;
19397
19398         // Walk back through any return expression placeholders
19399         while (actualThisObj->OperGet() == GT_RET_EXPR)
19400         {
19401             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19402         }
19403
19404         // See if we landed on a call to a special intrinsic method
19405         if (actualThisObj->IsCall())
19406         {
19407             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19408             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19409             {
19410                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19411                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19412                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19413                 if (specialObjClass != nullptr)
19414                 {
19415                     objClass     = specialObjClass;
19416                     isExact      = true;
19417                     objIsNonNull = true;
19418                 }
19419             }
19420         }
19421     }
19422
19423     // Bail if we know nothing.
19424     if (objClass == nullptr)
19425     {
19426         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19427         return;
19428     }
19429
19430     // Fetch information about the class that introduced the virtual method.
19431     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19432     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19433
19434 #if !defined(FEATURE_CORECLR)
19435     // If base class is not beforefieldinit then devirtualizing may
19436     // cause us to miss a base class init trigger. Spec says we don't
19437     // need a trigger for ref class callvirts but desktop seems to
19438     // have one anyways. So defer.
19439     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19440     {
19441         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19442         return;
19443     }
19444 #endif // FEATURE_CORECLR
19445
19446     // Is the call an interface call?
19447     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19448
19449     // If the objClass is sealed (final), then we may be able to devirtualize.
19450     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19451     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19452
19453 #if defined(DEBUG)
19454     const char* callKind       = isInterface ? "interface" : "virtual";
19455     const char* objClassNote   = "[?]";
19456     const char* objClassName   = "?objClass";
19457     const char* baseClassName  = "?baseClass";
19458     const char* baseMethodName = "?baseMethod";
19459
19460     if (verbose || doPrint)
19461     {
19462         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19463         objClassName   = info.compCompHnd->getClassName(objClass);
19464         baseClassName  = info.compCompHnd->getClassName(baseClass);
19465         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19466
19467         if (verbose)
19468         {
19469             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19470                    "    class for 'this' is %s%s (attrib %08x)\n"
19471                    "    base method is %s::%s\n",
19472                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19473         }
19474     }
19475 #endif // defined(DEBUG)
19476
19477     // Bail if obj class is an interface.
19478     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19479     //   IL_021d:  ldloc.0
19480     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19481     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19482     {
19483         JITDUMP("--- obj class is interface, sorry\n");
19484         return;
19485     }
19486
19487     if (isInterface)
19488     {
19489         assert(call->IsVirtualStub());
19490         JITDUMP("--- base class is interface\n");
19491     }
19492
19493     // Fetch the method that would be called based on the declared type of 'this'
19494     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19495     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19496
19497     // If we failed to get a handle, we can't devirtualize.  This can
19498     // happen when prejitting, if the devirtualization crosses
19499     // servicing bubble boundaries.
19500     if (derivedMethod == nullptr)
19501     {
19502         JITDUMP("--- no derived method, sorry\n");
19503         return;
19504     }
19505
19506     // Fetch method attributes to see if method is marked final.
19507     DWORD      derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19508     const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19509
19510 #if defined(DEBUG)
19511     const char* derivedClassName  = "?derivedClass";
19512     const char* derivedMethodName = "?derivedMethod";
19513
19514     const char* note = "speculative";
19515     if (isExact)
19516     {
19517         note = "exact";
19518     }
19519     else if (objClassIsFinal)
19520     {
19521         note = "final class";
19522     }
19523     else if (derivedMethodIsFinal)
19524     {
19525         note = "final method";
19526     }
19527
19528     if (verbose || doPrint)
19529     {
19530         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19531         if (verbose)
19532         {
19533             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19534             gtDispTree(call);
19535         }
19536     }
19537 #endif // defined(DEBUG)
19538
19539     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19540     {
19541         // Type is not exact, and neither class or method is final.
19542         //
19543         // We could speculatively devirtualize, but there's no
19544         // reason to believe the derived method is the one that
19545         // is likely to be invoked.
19546         //
19547         // If there's currently no further overriding (that is, at
19548         // the time of jitting, objClass has no subclasses that
19549         // override this method), then perhaps we'd be willing to
19550         // make a bet...?
19551         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19552         return;
19553     }
19554
19555     // For interface calls we must have an exact type or final class.
19556     if (isInterface && !isExact && !objClassIsFinal)
19557     {
19558         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19559         return;
19560     }
19561
19562     JITDUMP("    %s; can devirtualize\n", note);
19563
19564     // Make the updates.
19565     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19566     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19567     call->gtCallMethHnd = derivedMethod;
19568     call->gtCallType    = CT_USER_FUNC;
19569
19570     // Virtual calls include an implicit null check, which we may
19571     // now need to make explicit.
19572     if (!objIsNonNull)
19573     {
19574         call->gtFlags |= GTF_CALL_NULLCHECK;
19575     }
19576
19577     // Clear the inline candidate info (may be non-null since
19578     // it's a union field used for other things by virtual
19579     // stubs)
19580     call->gtInlineCandidateInfo = nullptr;
19581
19582 #if defined(DEBUG)
19583     if (verbose)
19584     {
19585         printf("... after devirt...\n");
19586         gtDispTree(call);
19587     }
19588
19589     if (doPrint)
19590     {
19591         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19592                baseMethodName, derivedClassName, derivedMethodName, note);
19593     }
19594 #endif // defined(DEBUG)
19595
19596     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19597     if (thisObj->IsBoxedValue())
19598     {
19599         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19600
19601         // Note for some shared methods the unboxed entry point requires an extra parameter.
19602         bool                  requiresInstMethodTableArg = false;
19603         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19604             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19605
19606         if (unboxedEntryMethod != nullptr)
19607         {
19608             // Since the call is the only consumer of the box, we know the box can't escape
19609             // since it is being passed an interior pointer.
19610             //
19611             // So, revise the box to simply create a local copy, use the address of that copy
19612             // as the this pointer, and update the entry point to the unboxed entry.
19613             //
19614             // Ideally, we then inline the boxed method and and if it turns out not to modify
19615             // the copy, we can undo the copy too.
19616             if (requiresInstMethodTableArg)
19617             {
19618                 // Perform a trial box removal and ask for the type handle tree.
19619                 JITDUMP("Unboxed entry needs method table arg...\n");
19620                 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
19621
19622                 if (methodTableArg != nullptr)
19623                 {
19624                     // If that worked, turn the box into a copy to a local var
19625                     JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
19626                     GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19627
19628                     if (localCopyThis != nullptr)
19629                     {
19630                         // Pass the local var as this and the type handle as a new arg
19631                         JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
19632                         call->gtCallObjp = localCopyThis;
19633
19634                         // Prepend for R2L arg passing or empty L2R passing
19635                         if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
19636                         {
19637                             call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
19638                         }
19639                         // Append for non-empty L2R
19640                         else
19641                         {
19642                             GenTreeArgList* beforeArg = call->gtCallArgs;
19643                             while (beforeArg->Rest() != nullptr)
19644                             {
19645                                 beforeArg = beforeArg->Rest();
19646                             }
19647
19648                             beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
19649                         }
19650
19651                         call->gtCallMethHnd = unboxedEntryMethod;
19652                         derivedMethod       = unboxedEntryMethod;
19653
19654                         // Method attributes will differ because unboxed entry point is shared
19655                         const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
19656                         JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
19657                                 unboxedMethodAttribs);
19658                         derivedMethodAttribs = unboxedMethodAttribs;
19659                     }
19660                     else
19661                     {
19662                         JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
19663                     }
19664                 }
19665                 else
19666                 {
19667                     JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
19668                 }
19669             }
19670             else
19671             {
19672                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19673                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19674
19675                 if (localCopyThis != nullptr)
19676                 {
19677                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19678                     call->gtCallObjp    = localCopyThis;
19679                     call->gtCallMethHnd = unboxedEntryMethod;
19680                     derivedMethod       = unboxedEntryMethod;
19681                 }
19682                 else
19683                 {
19684                     JITDUMP("Sorry, failed to undo the box\n");
19685                 }
19686             }
19687         }
19688         else
19689         {
19690             // Many of the low-level methods on value classes won't have unboxed entries,
19691             // as they need access to the type of the object.
19692             //
19693             // Note this may be a cue for us to stack allocate the boxed object, since
19694             // we probably know that these objects don't escape.
19695             JITDUMP("Sorry, failed to find unboxed entry point\n");
19696         }
19697     }
19698
19699     // Fetch the class that introduced the derived method.
19700     //
19701     // Note this may not equal objClass, if there is a
19702     // final method that objClass inherits.
19703     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19704
19705     // Need to update call info too. This is fragile
19706     // but hopefully the derived method conforms to
19707     // the base in most other ways.
19708     *method        = derivedMethod;
19709     *methodFlags   = derivedMethodAttribs;
19710     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19711
19712     // Update context handle.
19713     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19714     {
19715         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19716     }
19717
19718 #ifdef FEATURE_READYTORUN_COMPILER
19719     if (opts.IsReadyToRun())
19720     {
19721         // For R2R, getCallInfo triggers bookkeeping on the zap
19722         // side so we need to call it here.
19723         //
19724         // First, cons up a suitable resolved token.
19725         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19726
19727         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19728         derivedResolvedToken.tokenContext = *contextHandle;
19729         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19730         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19731         derivedResolvedToken.hClass       = derivedClass;
19732         derivedResolvedToken.hMethod      = derivedMethod;
19733
19734         // Look up the new call info.
19735         CORINFO_CALL_INFO derivedCallInfo;
19736         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19737
19738         // Update the call.
19739         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19740         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19741         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19742     }
19743 #endif // FEATURE_READYTORUN_COMPILER
19744 }
19745
19746 //------------------------------------------------------------------------
19747 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19748 //   to an intrinsic returns an exact type
19749 //
19750 // Arguments:
19751 //     methodHnd -- handle for the special intrinsic method
19752 //
19753 // Returns:
19754 //     Exact class handle returned by the intrinsic call, if known.
19755 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19756
19757 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19758 {
19759     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19760
19761     CORINFO_CLASS_HANDLE result = nullptr;
19762
19763     // See what intrinisc we have...
19764     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19765     switch (ni)
19766     {
19767         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19768         {
19769             // Expect one class generic parameter; figure out which it is.
19770             CORINFO_SIG_INFO sig;
19771             info.compCompHnd->getMethodSig(methodHnd, &sig);
19772             assert(sig.sigInst.classInstCount == 1);
19773             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19774             assert(typeHnd != nullptr);
19775
19776             // Lookup can incorrect when we have __Canon as it won't appear
19777             // to implement any interface types.
19778             //
19779             // And if we do not have a final type, devirt & inlining is
19780             // unlikely to result in much simplification.
19781             //
19782             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19783             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19784             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19785
19786             if (isFinalType)
19787             {
19788                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19789                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19790                         result != nullptr ? eeGetClassName(result) : "unknown");
19791             }
19792             else
19793             {
19794                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19795             }
19796
19797             break;
19798         }
19799
19800         default:
19801         {
19802             JITDUMP("This special intrinsic not handled, sorry...\n");
19803             break;
19804         }
19805     }
19806
19807     return result;
19808 }
19809
19810 //------------------------------------------------------------------------
19811 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19812 //
19813 // Arguments:
19814 //    token - init value for the allocated token.
19815 //
19816 // Return Value:
19817 //    pointer to token into jit-allocated memory.
19818 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19819 {
19820     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19821     *memory                        = token;
19822     return memory;
19823 }
19824
19825 //------------------------------------------------------------------------
19826 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19827 //
19828 class SpillRetExprHelper
19829 {
19830 public:
19831     SpillRetExprHelper(Compiler* comp) : comp(comp)
19832     {
19833     }
19834
19835     void StoreRetExprResultsInArgs(GenTreeCall* call)
19836     {
19837         GenTree* args = call->gtCallArgs;
19838         if (args != nullptr)
19839         {
19840             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19841         }
19842         GenTree* thisArg = call->gtCallObjp;
19843         if (thisArg != nullptr)
19844         {
19845             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19846         }
19847     }
19848
19849 private:
19850     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19851     {
19852         assert((pTree != nullptr) && (*pTree != nullptr));
19853         GenTree* tree = *pTree;
19854         if ((tree->gtFlags & GTF_CALL) == 0)
19855         {
19856             // Trees with ret_expr are marked as GTF_CALL.
19857             return Compiler::WALK_SKIP_SUBTREES;
19858         }
19859         if (tree->OperGet() == GT_RET_EXPR)
19860         {
19861             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19862             walker->StoreRetExprAsLocalVar(pTree);
19863         }
19864         return Compiler::WALK_CONTINUE;
19865     }
19866
19867     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19868     {
19869         GenTree* retExpr = *pRetExpr;
19870         assert(retExpr->OperGet() == GT_RET_EXPR);
19871         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19872         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19873         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19874         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19875     }
19876
19877 private:
19878     Compiler* comp;
19879 };
19880
19881 //------------------------------------------------------------------------
19882 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19883 //                         Spill ret_expr in the call node, because they can't be cloned.
19884 //
19885 // Arguments:
19886 //    call - fat calli candidate
19887 //
19888 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19889 {
19890     setMethodHasFatPointer();
19891     call->SetFatPointerCandidate();
19892     SpillRetExprHelper helper(this);
19893     helper.StoreRetExprResultsInArgs(call);
19894 }